Improve the formatting of the title strings of the binutils manual pages.
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = NULL;
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = NULL;
1436 }
1437
1438 if (bfd_link_dll (info))
1439 {
1440 object = _("a shared object");
1441 if (!pic)
1442 pic = _("; recompile with -fPIC");
1443 }
1444 else
1445 {
1446 if (bfd_link_pie (info))
1447 object = _("a PIE object");
1448 else
1449 object = _("a PDE object");
1450 if (!pic)
1451 pic = _("; recompile with -fPIE");
1452 }
1453
1454 /* xgettext:c-format */
1455 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1456 "not be used when making %s%s"),
1457 input_bfd, howto->name, und, v, name,
1458 object, pic);
1459 bfd_set_error (bfd_error_bad_value);
1460 sec->check_relocs_failed = 1;
1461 return FALSE;
1462 }
1463
1464 /* With the local symbol, foo, we convert
1465 mov foo@GOTPCREL(%rip), %reg
1466 to
1467 lea foo(%rip), %reg
1468 and convert
1469 call/jmp *foo@GOTPCREL(%rip)
1470 to
1471 nop call foo/jmp foo nop
1472 When PIC is false, convert
1473 test %reg, foo@GOTPCREL(%rip)
1474 to
1475 test $foo, %reg
1476 and convert
1477 binop foo@GOTPCREL(%rip), %reg
1478 to
1479 binop $foo, %reg
1480 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1481 instructions. */
1482
1483 static bfd_boolean
1484 elf_x86_64_convert_load_reloc (bfd *abfd,
1485 bfd_byte *contents,
1486 unsigned int *r_type_p,
1487 Elf_Internal_Rela *irel,
1488 struct elf_link_hash_entry *h,
1489 bfd_boolean *converted,
1490 struct bfd_link_info *link_info)
1491 {
1492 struct elf_x86_link_hash_table *htab;
1493 bfd_boolean is_pic;
1494 bfd_boolean no_overflow;
1495 bfd_boolean relocx;
1496 bfd_boolean to_reloc_pc32;
1497 asection *tsec;
1498 bfd_signed_vma raddend;
1499 unsigned int opcode;
1500 unsigned int modrm;
1501 unsigned int r_type = *r_type_p;
1502 unsigned int r_symndx;
1503 bfd_vma roff = irel->r_offset;
1504
1505 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1506 return TRUE;
1507
1508 raddend = irel->r_addend;
1509 /* Addend for 32-bit PC-relative relocation must be -4. */
1510 if (raddend != -4)
1511 return TRUE;
1512
1513 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1514 is_pic = bfd_link_pic (link_info);
1515
1516 relocx = (r_type == R_X86_64_GOTPCRELX
1517 || r_type == R_X86_64_REX_GOTPCRELX);
1518
1519 /* TRUE if --no-relax is used. */
1520 no_overflow = link_info->disable_target_specific_optimizations > 1;
1521
1522 r_symndx = htab->r_sym (irel->r_info);
1523
1524 opcode = bfd_get_8 (abfd, contents + roff - 2);
1525
1526 /* Convert mov to lea since it has been done for a while. */
1527 if (opcode != 0x8b)
1528 {
1529 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1530 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1531 test, xor instructions. */
1532 if (!relocx)
1533 return TRUE;
1534 }
1535
1536 /* We convert only to R_X86_64_PC32:
1537 1. Branch.
1538 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1539 3. no_overflow is true.
1540 4. PIC.
1541 */
1542 to_reloc_pc32 = (opcode == 0xff
1543 || !relocx
1544 || no_overflow
1545 || is_pic);
1546
1547 /* Get the symbol referred to by the reloc. */
1548 if (h == NULL)
1549 {
1550 Elf_Internal_Sym *isym
1551 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1552
1553 /* Skip relocation against undefined symbols. */
1554 if (isym->st_shndx == SHN_UNDEF)
1555 return TRUE;
1556
1557 if (isym->st_shndx == SHN_ABS)
1558 tsec = bfd_abs_section_ptr;
1559 else if (isym->st_shndx == SHN_COMMON)
1560 tsec = bfd_com_section_ptr;
1561 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1562 tsec = &_bfd_elf_large_com_section;
1563 else
1564 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1565 }
1566 else
1567 {
1568 /* Undefined weak symbol is only bound locally in executable
1569 and its reference is resolved as 0 without relocation
1570 overflow. We can only perform this optimization for
1571 GOTPCRELX relocations since we need to modify REX byte.
1572 It is OK convert mov with R_X86_64_GOTPCREL to
1573 R_X86_64_PC32. */
1574 bfd_boolean local_ref;
1575 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1576
1577 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1578 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1579 if ((relocx || opcode == 0x8b)
1580 && (h->root.type == bfd_link_hash_undefweak
1581 && !eh->linker_def
1582 && local_ref))
1583 {
1584 if (opcode == 0xff)
1585 {
1586 /* Skip for branch instructions since R_X86_64_PC32
1587 may overflow. */
1588 if (no_overflow)
1589 return TRUE;
1590 }
1591 else if (relocx)
1592 {
1593 /* For non-branch instructions, we can convert to
1594 R_X86_64_32/R_X86_64_32S since we know if there
1595 is a REX byte. */
1596 to_reloc_pc32 = FALSE;
1597 }
1598
1599 /* Since we don't know the current PC when PIC is true,
1600 we can't convert to R_X86_64_PC32. */
1601 if (to_reloc_pc32 && is_pic)
1602 return TRUE;
1603
1604 goto convert;
1605 }
1606 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1607 ld.so may use its link-time address. */
1608 else if (h->start_stop
1609 || eh->linker_def
1610 || ((h->def_regular
1611 || h->root.type == bfd_link_hash_defined
1612 || h->root.type == bfd_link_hash_defweak)
1613 && h != htab->elf.hdynamic
1614 && local_ref))
1615 {
1616 /* bfd_link_hash_new or bfd_link_hash_undefined is
1617 set by an assignment in a linker script in
1618 bfd_elf_record_link_assignment. start_stop is set
1619 on __start_SECNAME/__stop_SECNAME which mark section
1620 SECNAME. */
1621 if (h->start_stop
1622 || eh->linker_def
1623 || (h->def_regular
1624 && (h->root.type == bfd_link_hash_new
1625 || h->root.type == bfd_link_hash_undefined
1626 || ((h->root.type == bfd_link_hash_defined
1627 || h->root.type == bfd_link_hash_defweak)
1628 && h->root.u.def.section == bfd_und_section_ptr))))
1629 {
1630 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1631 if (no_overflow)
1632 return TRUE;
1633 goto convert;
1634 }
1635 tsec = h->root.u.def.section;
1636 }
1637 else
1638 return TRUE;
1639 }
1640
1641 /* Don't convert GOTPCREL relocation against large section. */
1642 if (elf_section_data (tsec) != NULL
1643 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1644 return TRUE;
1645
1646 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return TRUE;
1649
1650 convert:
1651 if (opcode == 0xff)
1652 {
1653 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1654 unsigned int nop;
1655 unsigned int disp;
1656 bfd_vma nop_offset;
1657
1658 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1659 R_X86_64_PC32. */
1660 modrm = bfd_get_8 (abfd, contents + roff - 1);
1661 if (modrm == 0x25)
1662 {
1663 /* Convert to "jmp foo nop". */
1664 modrm = 0xe9;
1665 nop = NOP_OPCODE;
1666 nop_offset = irel->r_offset + 3;
1667 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1668 irel->r_offset -= 1;
1669 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1670 }
1671 else
1672 {
1673 struct elf_x86_link_hash_entry *eh
1674 = (struct elf_x86_link_hash_entry *) h;
1675
1676 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1677 is a nop prefix. */
1678 modrm = 0xe8;
1679 /* To support TLS optimization, always use addr32 prefix for
1680 "call *__tls_get_addr@GOTPCREL(%rip)". */
1681 if (eh && eh->tls_get_addr)
1682 {
1683 nop = 0x67;
1684 nop_offset = irel->r_offset - 2;
1685 }
1686 else
1687 {
1688 nop = htab->params->call_nop_byte;
1689 if (htab->params->call_nop_as_suffix)
1690 {
1691 nop_offset = irel->r_offset + 3;
1692 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1693 irel->r_offset -= 1;
1694 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1695 }
1696 else
1697 nop_offset = irel->r_offset - 2;
1698 }
1699 }
1700 bfd_put_8 (abfd, nop, contents + nop_offset);
1701 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1702 r_type = R_X86_64_PC32;
1703 }
1704 else
1705 {
1706 unsigned int rex;
1707 unsigned int rex_mask = REX_R;
1708
1709 if (r_type == R_X86_64_REX_GOTPCRELX)
1710 rex = bfd_get_8 (abfd, contents + roff - 3);
1711 else
1712 rex = 0;
1713
1714 if (opcode == 0x8b)
1715 {
1716 if (to_reloc_pc32)
1717 {
1718 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1719 "lea foo(%rip), %reg". */
1720 opcode = 0x8d;
1721 r_type = R_X86_64_PC32;
1722 }
1723 else
1724 {
1725 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1726 "mov $foo, %reg". */
1727 opcode = 0xc7;
1728 modrm = bfd_get_8 (abfd, contents + roff - 1);
1729 modrm = 0xc0 | (modrm & 0x38) >> 3;
1730 if ((rex & REX_W) != 0
1731 && ABI_64_P (link_info->output_bfd))
1732 {
1733 /* Keep the REX_W bit in REX byte for LP64. */
1734 r_type = R_X86_64_32S;
1735 goto rewrite_modrm_rex;
1736 }
1737 else
1738 {
1739 /* If the REX_W bit in REX byte isn't needed,
1740 use R_X86_64_32 and clear the W bit to avoid
1741 sign-extend imm32 to imm64. */
1742 r_type = R_X86_64_32;
1743 /* Clear the W bit in REX byte. */
1744 rex_mask |= REX_W;
1745 goto rewrite_modrm_rex;
1746 }
1747 }
1748 }
1749 else
1750 {
1751 /* R_X86_64_PC32 isn't supported. */
1752 if (to_reloc_pc32)
1753 return TRUE;
1754
1755 modrm = bfd_get_8 (abfd, contents + roff - 1);
1756 if (opcode == 0x85)
1757 {
1758 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1759 "test $foo, %reg". */
1760 modrm = 0xc0 | (modrm & 0x38) >> 3;
1761 opcode = 0xf7;
1762 }
1763 else
1764 {
1765 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1766 "binop $foo, %reg". */
1767 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1768 opcode = 0x81;
1769 }
1770
1771 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1772 overflow when sign-extending imm32 to imm64. */
1773 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1774
1775 rewrite_modrm_rex:
1776 bfd_put_8 (abfd, modrm, contents + roff - 1);
1777
1778 if (rex)
1779 {
1780 /* Move the R bit to the B bit in REX byte. */
1781 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1782 bfd_put_8 (abfd, rex, contents + roff - 3);
1783 }
1784
1785 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1786 irel->r_addend = 0;
1787 }
1788
1789 bfd_put_8 (abfd, opcode, contents + roff - 2);
1790 }
1791
1792 *r_type_p = r_type;
1793 irel->r_info = htab->r_info (r_symndx,
1794 r_type | R_X86_64_converted_reloc_bit);
1795
1796 *converted = TRUE;
1797
1798 return TRUE;
1799 }
1800
1801 /* Look through the relocs for a section during the first phase, and
1802 calculate needed space in the global offset table, procedure
1803 linkage table, and dynamic reloc sections. */
1804
1805 static bfd_boolean
1806 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1807 asection *sec,
1808 const Elf_Internal_Rela *relocs)
1809 {
1810 struct elf_x86_link_hash_table *htab;
1811 Elf_Internal_Shdr *symtab_hdr;
1812 struct elf_link_hash_entry **sym_hashes;
1813 const Elf_Internal_Rela *rel;
1814 const Elf_Internal_Rela *rel_end;
1815 asection *sreloc;
1816 bfd_byte *contents;
1817 bfd_boolean converted;
1818
1819 if (bfd_link_relocatable (info))
1820 return TRUE;
1821
1822 /* Don't do anything special with non-loaded, non-alloced sections.
1823 In particular, any relocs in such sections should not affect GOT
1824 and PLT reference counting (ie. we don't allow them to create GOT
1825 or PLT entries), there's no possibility or desire to optimize TLS
1826 relocs, and there's not much point in propagating relocs to shared
1827 libs that the dynamic linker won't relocate. */
1828 if ((sec->flags & SEC_ALLOC) == 0)
1829 return TRUE;
1830
1831 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1832 if (htab == NULL)
1833 {
1834 sec->check_relocs_failed = 1;
1835 return FALSE;
1836 }
1837
1838 BFD_ASSERT (is_x86_elf (abfd, htab));
1839
1840 /* Get the section contents. */
1841 if (elf_section_data (sec)->this_hdr.contents != NULL)
1842 contents = elf_section_data (sec)->this_hdr.contents;
1843 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1844 {
1845 sec->check_relocs_failed = 1;
1846 return FALSE;
1847 }
1848
1849 symtab_hdr = &elf_symtab_hdr (abfd);
1850 sym_hashes = elf_sym_hashes (abfd);
1851
1852 converted = FALSE;
1853
1854 sreloc = NULL;
1855
1856 rel_end = relocs + sec->reloc_count;
1857 for (rel = relocs; rel < rel_end; rel++)
1858 {
1859 unsigned int r_type;
1860 unsigned int r_symndx;
1861 struct elf_link_hash_entry *h;
1862 struct elf_x86_link_hash_entry *eh;
1863 Elf_Internal_Sym *isym;
1864 const char *name;
1865 bfd_boolean size_reloc;
1866 bfd_boolean converted_reloc;
1867 bfd_boolean do_check_pic;
1868
1869 r_symndx = htab->r_sym (rel->r_info);
1870 r_type = ELF32_R_TYPE (rel->r_info);
1871
1872 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1873 {
1874 /* xgettext:c-format */
1875 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1876 abfd, r_symndx);
1877 goto error_return;
1878 }
1879
1880 if (r_symndx < symtab_hdr->sh_info)
1881 {
1882 /* A local symbol. */
1883 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1884 abfd, r_symndx);
1885 if (isym == NULL)
1886 goto error_return;
1887
1888 /* Check relocation against local STT_GNU_IFUNC symbol. */
1889 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1890 {
1891 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1892 TRUE);
1893 if (h == NULL)
1894 goto error_return;
1895
1896 /* Fake a STT_GNU_IFUNC symbol. */
1897 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1898 isym, NULL);
1899 h->type = STT_GNU_IFUNC;
1900 h->def_regular = 1;
1901 h->ref_regular = 1;
1902 h->forced_local = 1;
1903 h->root.type = bfd_link_hash_defined;
1904 }
1905 else
1906 h = NULL;
1907 }
1908 else
1909 {
1910 isym = NULL;
1911 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1912 while (h->root.type == bfd_link_hash_indirect
1913 || h->root.type == bfd_link_hash_warning)
1914 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1915 }
1916
1917 /* Check invalid x32 relocations. */
1918 if (!ABI_64_P (abfd))
1919 switch (r_type)
1920 {
1921 default:
1922 break;
1923
1924 case R_X86_64_DTPOFF64:
1925 case R_X86_64_TPOFF64:
1926 case R_X86_64_PC64:
1927 case R_X86_64_GOTOFF64:
1928 case R_X86_64_GOT64:
1929 case R_X86_64_GOTPCREL64:
1930 case R_X86_64_GOTPC64:
1931 case R_X86_64_GOTPLT64:
1932 case R_X86_64_PLTOFF64:
1933 {
1934 if (h)
1935 name = h->root.root.string;
1936 else
1937 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1938 NULL);
1939 _bfd_error_handler
1940 /* xgettext:c-format */
1941 (_("%pB: relocation %s against symbol `%s' isn't "
1942 "supported in x32 mode"), abfd,
1943 x86_64_elf_howto_table[r_type].name, name);
1944 bfd_set_error (bfd_error_bad_value);
1945 goto error_return;
1946 }
1947 break;
1948 }
1949
1950 if (h != NULL)
1951 {
1952 /* It is referenced by a non-shared object. */
1953 h->ref_regular = 1;
1954 }
1955
1956 converted_reloc = FALSE;
1957 if ((r_type == R_X86_64_GOTPCREL
1958 || r_type == R_X86_64_GOTPCRELX
1959 || r_type == R_X86_64_REX_GOTPCRELX)
1960 && (h == NULL || h->type != STT_GNU_IFUNC))
1961 {
1962 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1963 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1964 irel, h, &converted_reloc,
1965 info))
1966 goto error_return;
1967
1968 if (converted_reloc)
1969 converted = TRUE;
1970 }
1971
1972 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1973 symtab_hdr, sym_hashes,
1974 &r_type, GOT_UNKNOWN,
1975 rel, rel_end, h, r_symndx, FALSE))
1976 goto error_return;
1977
1978 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1979 if (h == htab->elf.hgot)
1980 htab->got_referenced = TRUE;
1981
1982 eh = (struct elf_x86_link_hash_entry *) h;
1983 switch (r_type)
1984 {
1985 case R_X86_64_TLSLD:
1986 htab->tls_ld_or_ldm_got.refcount = 1;
1987 goto create_got;
1988
1989 case R_X86_64_TPOFF32:
1990 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1991 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1992 &x86_64_elf_howto_table[r_type]);
1993 if (eh != NULL)
1994 eh->zero_undefweak &= 0x2;
1995 break;
1996
1997 case R_X86_64_GOTTPOFF:
1998 if (!bfd_link_executable (info))
1999 info->flags |= DF_STATIC_TLS;
2000 /* Fall through */
2001
2002 case R_X86_64_GOT32:
2003 case R_X86_64_GOTPCREL:
2004 case R_X86_64_GOTPCRELX:
2005 case R_X86_64_REX_GOTPCRELX:
2006 case R_X86_64_TLSGD:
2007 case R_X86_64_GOT64:
2008 case R_X86_64_GOTPCREL64:
2009 case R_X86_64_GOTPLT64:
2010 case R_X86_64_GOTPC32_TLSDESC:
2011 case R_X86_64_TLSDESC_CALL:
2012 /* This symbol requires a global offset table entry. */
2013 {
2014 int tls_type, old_tls_type;
2015
2016 switch (r_type)
2017 {
2018 default: tls_type = GOT_NORMAL; break;
2019 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2020 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2021 case R_X86_64_GOTPC32_TLSDESC:
2022 case R_X86_64_TLSDESC_CALL:
2023 tls_type = GOT_TLS_GDESC; break;
2024 }
2025
2026 if (h != NULL)
2027 {
2028 h->got.refcount = 1;
2029 old_tls_type = eh->tls_type;
2030 }
2031 else
2032 {
2033 bfd_signed_vma *local_got_refcounts;
2034
2035 /* This is a global offset table entry for a local symbol. */
2036 local_got_refcounts = elf_local_got_refcounts (abfd);
2037 if (local_got_refcounts == NULL)
2038 {
2039 bfd_size_type size;
2040
2041 size = symtab_hdr->sh_info;
2042 size *= sizeof (bfd_signed_vma)
2043 + sizeof (bfd_vma) + sizeof (char);
2044 local_got_refcounts = ((bfd_signed_vma *)
2045 bfd_zalloc (abfd, size));
2046 if (local_got_refcounts == NULL)
2047 goto error_return;
2048 elf_local_got_refcounts (abfd) = local_got_refcounts;
2049 elf_x86_local_tlsdesc_gotent (abfd)
2050 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2051 elf_x86_local_got_tls_type (abfd)
2052 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2053 }
2054 local_got_refcounts[r_symndx] = 1;
2055 old_tls_type
2056 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2057 }
2058
2059 /* If a TLS symbol is accessed using IE at least once,
2060 there is no point to use dynamic model for it. */
2061 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2062 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2063 || tls_type != GOT_TLS_IE))
2064 {
2065 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2066 tls_type = old_tls_type;
2067 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2068 && GOT_TLS_GD_ANY_P (tls_type))
2069 tls_type |= old_tls_type;
2070 else
2071 {
2072 if (h)
2073 name = h->root.root.string;
2074 else
2075 name = bfd_elf_sym_name (abfd, symtab_hdr,
2076 isym, NULL);
2077 _bfd_error_handler
2078 /* xgettext:c-format */
2079 (_("%pB: '%s' accessed both as normal and"
2080 " thread local symbol"),
2081 abfd, name);
2082 bfd_set_error (bfd_error_bad_value);
2083 goto error_return;
2084 }
2085 }
2086
2087 if (old_tls_type != tls_type)
2088 {
2089 if (eh != NULL)
2090 eh->tls_type = tls_type;
2091 else
2092 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2093 }
2094 }
2095 /* Fall through */
2096
2097 case R_X86_64_GOTOFF64:
2098 case R_X86_64_GOTPC32:
2099 case R_X86_64_GOTPC64:
2100 create_got:
2101 if (eh != NULL)
2102 eh->zero_undefweak &= 0x2;
2103 break;
2104
2105 case R_X86_64_PLT32:
2106 case R_X86_64_PLT32_BND:
2107 /* This symbol requires a procedure linkage table entry. We
2108 actually build the entry in adjust_dynamic_symbol,
2109 because this might be a case of linking PIC code which is
2110 never referenced by a dynamic object, in which case we
2111 don't need to generate a procedure linkage table entry
2112 after all. */
2113
2114 /* If this is a local symbol, we resolve it directly without
2115 creating a procedure linkage table entry. */
2116 if (h == NULL)
2117 continue;
2118
2119 eh->zero_undefweak &= 0x2;
2120 h->needs_plt = 1;
2121 h->plt.refcount = 1;
2122 break;
2123
2124 case R_X86_64_PLTOFF64:
2125 /* This tries to form the 'address' of a function relative
2126 to GOT. For global symbols we need a PLT entry. */
2127 if (h != NULL)
2128 {
2129 h->needs_plt = 1;
2130 h->plt.refcount = 1;
2131 }
2132 goto create_got;
2133
2134 case R_X86_64_SIZE32:
2135 case R_X86_64_SIZE64:
2136 size_reloc = TRUE;
2137 goto do_size;
2138
2139 case R_X86_64_PC8:
2140 case R_X86_64_PC16:
2141 case R_X86_64_PC32:
2142 case R_X86_64_PC32_BND:
2143 do_check_pic = TRUE;
2144 goto check_pic;
2145
2146 case R_X86_64_32:
2147 if (!ABI_64_P (abfd))
2148 goto pointer;
2149 /* Fall through. */
2150 case R_X86_64_8:
2151 case R_X86_64_16:
2152 case R_X86_64_32S:
2153 /* Check relocation overflow as these relocs may lead to
2154 run-time relocation overflow. Don't error out for
2155 sections we don't care about, such as debug sections or
2156 when relocation overflow check is disabled. */
2157 if (!htab->params->no_reloc_overflow_check
2158 && !converted_reloc
2159 && (bfd_link_pic (info)
2160 || (bfd_link_executable (info)
2161 && h != NULL
2162 && !h->def_regular
2163 && h->def_dynamic
2164 && (sec->flags & SEC_READONLY) == 0)))
2165 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2166 &x86_64_elf_howto_table[r_type]);
2167 /* Fall through. */
2168
2169 case R_X86_64_PC64:
2170 case R_X86_64_64:
2171 pointer:
2172 do_check_pic = FALSE;
2173 check_pic:
2174 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2175 eh->zero_undefweak |= 0x2;
2176 /* We are called after all symbols have been resolved. Only
2177 relocation against STT_GNU_IFUNC symbol must go through
2178 PLT. */
2179 if (h != NULL
2180 && (bfd_link_executable (info)
2181 || h->type == STT_GNU_IFUNC))
2182 {
2183 bfd_boolean func_pointer_ref = FALSE;
2184
2185 if (r_type == R_X86_64_PC32)
2186 {
2187 /* Since something like ".long foo - ." may be used
2188 as pointer, make sure that PLT is used if foo is
2189 a function defined in a shared library. */
2190 if ((sec->flags & SEC_CODE) == 0)
2191 {
2192 h->pointer_equality_needed = 1;
2193 if (bfd_link_pie (info)
2194 && h->type == STT_FUNC
2195 && !h->def_regular
2196 && h->def_dynamic)
2197 {
2198 h->needs_plt = 1;
2199 h->plt.refcount = 1;
2200 }
2201 }
2202 }
2203 else if (r_type != R_X86_64_PC32_BND
2204 && r_type != R_X86_64_PC64)
2205 {
2206 h->pointer_equality_needed = 1;
2207 /* At run-time, R_X86_64_64 can be resolved for both
2208 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2209 can only be resolved for x32. */
2210 if ((sec->flags & SEC_READONLY) == 0
2211 && (r_type == R_X86_64_64
2212 || (!ABI_64_P (abfd)
2213 && (r_type == R_X86_64_32
2214 || r_type == R_X86_64_32S))))
2215 func_pointer_ref = TRUE;
2216 }
2217
2218 if (!func_pointer_ref)
2219 {
2220 /* If this reloc is in a read-only section, we might
2221 need a copy reloc. We can't check reliably at this
2222 stage whether the section is read-only, as input
2223 sections have not yet been mapped to output sections.
2224 Tentatively set the flag for now, and correct in
2225 adjust_dynamic_symbol. */
2226 h->non_got_ref = 1;
2227
2228 /* We may need a .plt entry if the symbol is a function
2229 defined in a shared lib or is a function referenced
2230 from the code or read-only section. */
2231 if (!h->def_regular
2232 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2233 h->plt.refcount = 1;
2234 }
2235 }
2236
2237 if (do_check_pic)
2238 {
2239 /* Don't complain about -fPIC if the symbol is undefined
2240 when building executable unless it is unresolved weak
2241 symbol, references a dynamic definition in PIE or
2242 -z nocopyreloc is used. */
2243 bfd_boolean no_copyreloc_p
2244 = (info->nocopyreloc
2245 || (h != NULL
2246 && !h->root.linker_def
2247 && !h->root.ldscript_def
2248 && eh->def_protected
2249 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
2250 if ((sec->flags & SEC_ALLOC) != 0
2251 && (sec->flags & SEC_READONLY) != 0
2252 && h != NULL
2253 && ((bfd_link_executable (info)
2254 && ((h->root.type == bfd_link_hash_undefweak
2255 && (eh == NULL
2256 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2257 eh)))
2258 || (bfd_link_pie (info)
2259 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2260 && h->def_dynamic)
2261 || (no_copyreloc_p
2262 && h->def_dynamic
2263 && !(h->root.u.def.section->flags & SEC_CODE))))
2264 || bfd_link_dll (info)))
2265 {
2266 bfd_boolean fail = FALSE;
2267 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
2268 {
2269 /* Symbol is referenced locally. Make sure it is
2270 defined locally. */
2271 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
2272 }
2273 else if (bfd_link_pie (info))
2274 {
2275 /* We can only use PC-relative relocations in PIE
2276 from non-code sections. */
2277 if (h->type == STT_FUNC
2278 && (sec->flags & SEC_CODE) != 0)
2279 fail = TRUE;
2280 }
2281 else if (no_copyreloc_p || bfd_link_dll (info))
2282 {
2283 /* Symbol doesn't need copy reloc and isn't
2284 referenced locally. Don't allow PC-relative
2285 relocations against default and protected
2286 symbols since address of protected function
2287 and location of protected data may not be in
2288 the shared object. */
2289 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2290 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
2291 }
2292
2293 if (fail)
2294 return elf_x86_64_need_pic (info, abfd, sec, h,
2295 symtab_hdr, isym,
2296 &x86_64_elf_howto_table[r_type]);
2297 }
2298 }
2299
2300 size_reloc = FALSE;
2301 do_size:
2302 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2303 htab->pointer_r_type))
2304 {
2305 struct elf_dyn_relocs *p;
2306 struct elf_dyn_relocs **head;
2307
2308 /* We must copy these reloc types into the output file.
2309 Create a reloc section in dynobj and make room for
2310 this reloc. */
2311 if (sreloc == NULL)
2312 {
2313 sreloc = _bfd_elf_make_dynamic_reloc_section
2314 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2315 abfd, /*rela?*/ TRUE);
2316
2317 if (sreloc == NULL)
2318 goto error_return;
2319 }
2320
2321 /* If this is a global symbol, we count the number of
2322 relocations we need for this symbol. */
2323 if (h != NULL)
2324 head = &eh->dyn_relocs;
2325 else
2326 {
2327 /* Track dynamic relocs needed for local syms too.
2328 We really need local syms available to do this
2329 easily. Oh well. */
2330 asection *s;
2331 void **vpp;
2332
2333 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2334 abfd, r_symndx);
2335 if (isym == NULL)
2336 goto error_return;
2337
2338 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2339 if (s == NULL)
2340 s = sec;
2341
2342 /* Beware of type punned pointers vs strict aliasing
2343 rules. */
2344 vpp = &(elf_section_data (s)->local_dynrel);
2345 head = (struct elf_dyn_relocs **)vpp;
2346 }
2347
2348 p = *head;
2349 if (p == NULL || p->sec != sec)
2350 {
2351 bfd_size_type amt = sizeof *p;
2352
2353 p = ((struct elf_dyn_relocs *)
2354 bfd_alloc (htab->elf.dynobj, amt));
2355 if (p == NULL)
2356 goto error_return;
2357 p->next = *head;
2358 *head = p;
2359 p->sec = sec;
2360 p->count = 0;
2361 p->pc_count = 0;
2362 }
2363
2364 p->count += 1;
2365 /* Count size relocation as PC-relative relocation. */
2366 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2367 p->pc_count += 1;
2368 }
2369 break;
2370
2371 /* This relocation describes the C++ object vtable hierarchy.
2372 Reconstruct it for later use during GC. */
2373 case R_X86_64_GNU_VTINHERIT:
2374 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2375 goto error_return;
2376 break;
2377
2378 /* This relocation describes which C++ vtable entries are actually
2379 used. Record for later use during GC. */
2380 case R_X86_64_GNU_VTENTRY:
2381 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2382 goto error_return;
2383 break;
2384
2385 default:
2386 break;
2387 }
2388 }
2389
2390 if (elf_section_data (sec)->this_hdr.contents != contents)
2391 {
2392 if (!converted && !info->keep_memory)
2393 free (contents);
2394 else
2395 {
2396 /* Cache the section contents for elf_link_input_bfd if any
2397 load is converted or --no-keep-memory isn't used. */
2398 elf_section_data (sec)->this_hdr.contents = contents;
2399 }
2400 }
2401
2402 /* Cache relocations if any load is converted. */
2403 if (elf_section_data (sec)->relocs != relocs && converted)
2404 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2405
2406 return TRUE;
2407
2408 error_return:
2409 if (elf_section_data (sec)->this_hdr.contents != contents)
2410 free (contents);
2411 sec->check_relocs_failed = 1;
2412 return FALSE;
2413 }
2414
2415 /* Return the relocation value for @tpoff relocation
2416 if STT_TLS virtual address is ADDRESS. */
2417
2418 static bfd_vma
2419 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2420 {
2421 struct elf_link_hash_table *htab = elf_hash_table (info);
2422 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2423 bfd_vma static_tls_size;
2424
2425 /* If tls_segment is NULL, we should have signalled an error already. */
2426 if (htab->tls_sec == NULL)
2427 return 0;
2428
2429 /* Consider special static TLS alignment requirements. */
2430 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2431 return address - static_tls_size - htab->tls_sec->vma;
2432 }
2433
2434 /* Relocate an x86_64 ELF section. */
2435
2436 static bfd_boolean
2437 elf_x86_64_relocate_section (bfd *output_bfd,
2438 struct bfd_link_info *info,
2439 bfd *input_bfd,
2440 asection *input_section,
2441 bfd_byte *contents,
2442 Elf_Internal_Rela *relocs,
2443 Elf_Internal_Sym *local_syms,
2444 asection **local_sections)
2445 {
2446 struct elf_x86_link_hash_table *htab;
2447 Elf_Internal_Shdr *symtab_hdr;
2448 struct elf_link_hash_entry **sym_hashes;
2449 bfd_vma *local_got_offsets;
2450 bfd_vma *local_tlsdesc_gotents;
2451 Elf_Internal_Rela *rel;
2452 Elf_Internal_Rela *wrel;
2453 Elf_Internal_Rela *relend;
2454 unsigned int plt_entry_size;
2455
2456 /* Skip if check_relocs failed. */
2457 if (input_section->check_relocs_failed)
2458 return FALSE;
2459
2460 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2461 if (htab == NULL)
2462 return FALSE;
2463
2464 if (!is_x86_elf (input_bfd, htab))
2465 {
2466 bfd_set_error (bfd_error_wrong_format);
2467 return FALSE;
2468 }
2469
2470 plt_entry_size = htab->plt.plt_entry_size;
2471 symtab_hdr = &elf_symtab_hdr (input_bfd);
2472 sym_hashes = elf_sym_hashes (input_bfd);
2473 local_got_offsets = elf_local_got_offsets (input_bfd);
2474 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2475
2476 _bfd_x86_elf_set_tls_module_base (info);
2477
2478 rel = wrel = relocs;
2479 relend = relocs + input_section->reloc_count;
2480 for (; rel < relend; wrel++, rel++)
2481 {
2482 unsigned int r_type, r_type_tls;
2483 reloc_howto_type *howto;
2484 unsigned long r_symndx;
2485 struct elf_link_hash_entry *h;
2486 struct elf_x86_link_hash_entry *eh;
2487 Elf_Internal_Sym *sym;
2488 asection *sec;
2489 bfd_vma off, offplt, plt_offset;
2490 bfd_vma relocation;
2491 bfd_boolean unresolved_reloc;
2492 bfd_reloc_status_type r;
2493 int tls_type;
2494 asection *base_got, *resolved_plt;
2495 bfd_vma st_size;
2496 bfd_boolean resolved_to_zero;
2497 bfd_boolean relative_reloc;
2498 bfd_boolean converted_reloc;
2499 bfd_boolean need_copy_reloc_in_pie;
2500
2501 r_type = ELF32_R_TYPE (rel->r_info);
2502 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2503 || r_type == (int) R_X86_64_GNU_VTENTRY)
2504 {
2505 if (wrel != rel)
2506 *wrel = *rel;
2507 continue;
2508 }
2509
2510 r_symndx = htab->r_sym (rel->r_info);
2511 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2512 if (converted_reloc)
2513 {
2514 r_type &= ~R_X86_64_converted_reloc_bit;
2515 rel->r_info = htab->r_info (r_symndx, r_type);
2516 }
2517
2518 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2519 if (howto == NULL)
2520 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2521
2522 h = NULL;
2523 sym = NULL;
2524 sec = NULL;
2525 unresolved_reloc = FALSE;
2526 if (r_symndx < symtab_hdr->sh_info)
2527 {
2528 sym = local_syms + r_symndx;
2529 sec = local_sections[r_symndx];
2530
2531 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2532 &sec, rel);
2533 st_size = sym->st_size;
2534
2535 /* Relocate against local STT_GNU_IFUNC symbol. */
2536 if (!bfd_link_relocatable (info)
2537 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2538 {
2539 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2540 rel, FALSE);
2541 if (h == NULL)
2542 abort ();
2543
2544 /* Set STT_GNU_IFUNC symbol value. */
2545 h->root.u.def.value = sym->st_value;
2546 h->root.u.def.section = sec;
2547 }
2548 }
2549 else
2550 {
2551 bfd_boolean warned ATTRIBUTE_UNUSED;
2552 bfd_boolean ignored ATTRIBUTE_UNUSED;
2553
2554 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2555 r_symndx, symtab_hdr, sym_hashes,
2556 h, sec, relocation,
2557 unresolved_reloc, warned, ignored);
2558 st_size = h->size;
2559 }
2560
2561 if (sec != NULL && discarded_section (sec))
2562 {
2563 _bfd_clear_contents (howto, input_bfd, input_section,
2564 contents, rel->r_offset);
2565 wrel->r_offset = rel->r_offset;
2566 wrel->r_info = 0;
2567 wrel->r_addend = 0;
2568
2569 /* For ld -r, remove relocations in debug sections against
2570 sections defined in discarded sections. Not done for
2571 eh_frame editing code expects to be present. */
2572 if (bfd_link_relocatable (info)
2573 && (input_section->flags & SEC_DEBUGGING))
2574 wrel--;
2575
2576 continue;
2577 }
2578
2579 if (bfd_link_relocatable (info))
2580 {
2581 if (wrel != rel)
2582 *wrel = *rel;
2583 continue;
2584 }
2585
2586 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2587 {
2588 if (r_type == R_X86_64_64)
2589 {
2590 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2591 zero-extend it to 64bit if addend is zero. */
2592 r_type = R_X86_64_32;
2593 memset (contents + rel->r_offset + 4, 0, 4);
2594 }
2595 else if (r_type == R_X86_64_SIZE64)
2596 {
2597 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2598 zero-extend it to 64bit if addend is zero. */
2599 r_type = R_X86_64_SIZE32;
2600 memset (contents + rel->r_offset + 4, 0, 4);
2601 }
2602 }
2603
2604 eh = (struct elf_x86_link_hash_entry *) h;
2605
2606 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2607 it here if it is defined in a non-shared object. */
2608 if (h != NULL
2609 && h->type == STT_GNU_IFUNC
2610 && h->def_regular)
2611 {
2612 bfd_vma plt_index;
2613 const char *name;
2614
2615 if ((input_section->flags & SEC_ALLOC) == 0)
2616 {
2617 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2618 STT_GNU_IFUNC symbol as STT_FUNC. */
2619 if (elf_section_type (input_section) == SHT_NOTE)
2620 goto skip_ifunc;
2621 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2622 sections because such sections are not SEC_ALLOC and
2623 thus ld.so will not process them. */
2624 if ((input_section->flags & SEC_DEBUGGING) != 0)
2625 continue;
2626 abort ();
2627 }
2628
2629 switch (r_type)
2630 {
2631 default:
2632 break;
2633
2634 case R_X86_64_GOTPCREL:
2635 case R_X86_64_GOTPCRELX:
2636 case R_X86_64_REX_GOTPCRELX:
2637 case R_X86_64_GOTPCREL64:
2638 base_got = htab->elf.sgot;
2639 off = h->got.offset;
2640
2641 if (base_got == NULL)
2642 abort ();
2643
2644 if (off == (bfd_vma) -1)
2645 {
2646 /* We can't use h->got.offset here to save state, or
2647 even just remember the offset, as finish_dynamic_symbol
2648 would use that as offset into .got. */
2649
2650 if (h->plt.offset == (bfd_vma) -1)
2651 abort ();
2652
2653 if (htab->elf.splt != NULL)
2654 {
2655 plt_index = (h->plt.offset / plt_entry_size
2656 - htab->plt.has_plt0);
2657 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2658 base_got = htab->elf.sgotplt;
2659 }
2660 else
2661 {
2662 plt_index = h->plt.offset / plt_entry_size;
2663 off = plt_index * GOT_ENTRY_SIZE;
2664 base_got = htab->elf.igotplt;
2665 }
2666
2667 if (h->dynindx == -1
2668 || h->forced_local
2669 || info->symbolic)
2670 {
2671 /* This references the local defitionion. We must
2672 initialize this entry in the global offset table.
2673 Since the offset must always be a multiple of 8,
2674 we use the least significant bit to record
2675 whether we have initialized it already.
2676
2677 When doing a dynamic link, we create a .rela.got
2678 relocation entry to initialize the value. This
2679 is done in the finish_dynamic_symbol routine. */
2680 if ((off & 1) != 0)
2681 off &= ~1;
2682 else
2683 {
2684 bfd_put_64 (output_bfd, relocation,
2685 base_got->contents + off);
2686 /* Note that this is harmless for the GOTPLT64
2687 case, as -1 | 1 still is -1. */
2688 h->got.offset |= 1;
2689 }
2690 }
2691 }
2692
2693 relocation = (base_got->output_section->vma
2694 + base_got->output_offset + off);
2695
2696 goto do_relocation;
2697 }
2698
2699 if (h->plt.offset == (bfd_vma) -1)
2700 {
2701 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2702 if (r_type == htab->pointer_r_type
2703 && (input_section->flags & SEC_CODE) == 0)
2704 goto do_ifunc_pointer;
2705 goto bad_ifunc_reloc;
2706 }
2707
2708 /* STT_GNU_IFUNC symbol must go through PLT. */
2709 if (htab->elf.splt != NULL)
2710 {
2711 if (htab->plt_second != NULL)
2712 {
2713 resolved_plt = htab->plt_second;
2714 plt_offset = eh->plt_second.offset;
2715 }
2716 else
2717 {
2718 resolved_plt = htab->elf.splt;
2719 plt_offset = h->plt.offset;
2720 }
2721 }
2722 else
2723 {
2724 resolved_plt = htab->elf.iplt;
2725 plt_offset = h->plt.offset;
2726 }
2727
2728 relocation = (resolved_plt->output_section->vma
2729 + resolved_plt->output_offset + plt_offset);
2730
2731 switch (r_type)
2732 {
2733 default:
2734 bad_ifunc_reloc:
2735 if (h->root.root.string)
2736 name = h->root.root.string;
2737 else
2738 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2739 NULL);
2740 _bfd_error_handler
2741 /* xgettext:c-format */
2742 (_("%pB: relocation %s against STT_GNU_IFUNC "
2743 "symbol `%s' isn't supported"), input_bfd,
2744 howto->name, name);
2745 bfd_set_error (bfd_error_bad_value);
2746 return FALSE;
2747
2748 case R_X86_64_32S:
2749 if (bfd_link_pic (info))
2750 abort ();
2751 goto do_relocation;
2752
2753 case R_X86_64_32:
2754 if (ABI_64_P (output_bfd))
2755 goto do_relocation;
2756 /* FALLTHROUGH */
2757 case R_X86_64_64:
2758 do_ifunc_pointer:
2759 if (rel->r_addend != 0)
2760 {
2761 if (h->root.root.string)
2762 name = h->root.root.string;
2763 else
2764 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2765 sym, NULL);
2766 _bfd_error_handler
2767 /* xgettext:c-format */
2768 (_("%pB: relocation %s against STT_GNU_IFUNC "
2769 "symbol `%s' has non-zero addend: %" PRId64),
2770 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2771 bfd_set_error (bfd_error_bad_value);
2772 return FALSE;
2773 }
2774
2775 /* Generate dynamic relcoation only when there is a
2776 non-GOT reference in a shared object or there is no
2777 PLT. */
2778 if ((bfd_link_pic (info) && h->non_got_ref)
2779 || h->plt.offset == (bfd_vma) -1)
2780 {
2781 Elf_Internal_Rela outrel;
2782 asection *sreloc;
2783
2784 /* Need a dynamic relocation to get the real function
2785 address. */
2786 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2787 info,
2788 input_section,
2789 rel->r_offset);
2790 if (outrel.r_offset == (bfd_vma) -1
2791 || outrel.r_offset == (bfd_vma) -2)
2792 abort ();
2793
2794 outrel.r_offset += (input_section->output_section->vma
2795 + input_section->output_offset);
2796
2797 if (POINTER_LOCAL_IFUNC_P (info, h))
2798 {
2799 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2800 h->root.root.string,
2801 h->root.u.def.section->owner);
2802
2803 /* This symbol is resolved locally. */
2804 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2805 outrel.r_addend = (h->root.u.def.value
2806 + h->root.u.def.section->output_section->vma
2807 + h->root.u.def.section->output_offset);
2808 }
2809 else
2810 {
2811 outrel.r_info = htab->r_info (h->dynindx, r_type);
2812 outrel.r_addend = 0;
2813 }
2814
2815 /* Dynamic relocations are stored in
2816 1. .rela.ifunc section in PIC object.
2817 2. .rela.got section in dynamic executable.
2818 3. .rela.iplt section in static executable. */
2819 if (bfd_link_pic (info))
2820 sreloc = htab->elf.irelifunc;
2821 else if (htab->elf.splt != NULL)
2822 sreloc = htab->elf.srelgot;
2823 else
2824 sreloc = htab->elf.irelplt;
2825 elf_append_rela (output_bfd, sreloc, &outrel);
2826
2827 /* If this reloc is against an external symbol, we
2828 do not want to fiddle with the addend. Otherwise,
2829 we need to include the symbol value so that it
2830 becomes an addend for the dynamic reloc. For an
2831 internal symbol, we have updated addend. */
2832 continue;
2833 }
2834 /* FALLTHROUGH */
2835 case R_X86_64_PC32:
2836 case R_X86_64_PC32_BND:
2837 case R_X86_64_PC64:
2838 case R_X86_64_PLT32:
2839 case R_X86_64_PLT32_BND:
2840 goto do_relocation;
2841 }
2842 }
2843
2844 skip_ifunc:
2845 resolved_to_zero = (eh != NULL
2846 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2847
2848 /* When generating a shared object, the relocations handled here are
2849 copied into the output file to be resolved at run time. */
2850 switch (r_type)
2851 {
2852 case R_X86_64_GOT32:
2853 case R_X86_64_GOT64:
2854 /* Relocation is to the entry for this symbol in the global
2855 offset table. */
2856 case R_X86_64_GOTPCREL:
2857 case R_X86_64_GOTPCRELX:
2858 case R_X86_64_REX_GOTPCRELX:
2859 case R_X86_64_GOTPCREL64:
2860 /* Use global offset table entry as symbol value. */
2861 case R_X86_64_GOTPLT64:
2862 /* This is obsolete and treated the same as GOT64. */
2863 base_got = htab->elf.sgot;
2864
2865 if (htab->elf.sgot == NULL)
2866 abort ();
2867
2868 relative_reloc = FALSE;
2869 if (h != NULL)
2870 {
2871 off = h->got.offset;
2872 if (h->needs_plt
2873 && h->plt.offset != (bfd_vma)-1
2874 && off == (bfd_vma)-1)
2875 {
2876 /* We can't use h->got.offset here to save
2877 state, or even just remember the offset, as
2878 finish_dynamic_symbol would use that as offset into
2879 .got. */
2880 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2881 - htab->plt.has_plt0);
2882 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2883 base_got = htab->elf.sgotplt;
2884 }
2885
2886 if (RESOLVED_LOCALLY_P (info, h, htab))
2887 {
2888 /* We must initialize this entry in the global offset
2889 table. Since the offset must always be a multiple
2890 of 8, we use the least significant bit to record
2891 whether we have initialized it already.
2892
2893 When doing a dynamic link, we create a .rela.got
2894 relocation entry to initialize the value. This is
2895 done in the finish_dynamic_symbol routine. */
2896 if ((off & 1) != 0)
2897 off &= ~1;
2898 else
2899 {
2900 bfd_put_64 (output_bfd, relocation,
2901 base_got->contents + off);
2902 /* Note that this is harmless for the GOTPLT64 case,
2903 as -1 | 1 still is -1. */
2904 h->got.offset |= 1;
2905
2906 if (GENERATE_RELATIVE_RELOC_P (info, h))
2907 {
2908 /* If this symbol isn't dynamic in PIC,
2909 generate R_X86_64_RELATIVE here. */
2910 eh->no_finish_dynamic_symbol = 1;
2911 relative_reloc = TRUE;
2912 }
2913 }
2914 }
2915 else
2916 unresolved_reloc = FALSE;
2917 }
2918 else
2919 {
2920 if (local_got_offsets == NULL)
2921 abort ();
2922
2923 off = local_got_offsets[r_symndx];
2924
2925 /* The offset must always be a multiple of 8. We use
2926 the least significant bit to record whether we have
2927 already generated the necessary reloc. */
2928 if ((off & 1) != 0)
2929 off &= ~1;
2930 else
2931 {
2932 bfd_put_64 (output_bfd, relocation,
2933 base_got->contents + off);
2934 local_got_offsets[r_symndx] |= 1;
2935
2936 if (bfd_link_pic (info))
2937 relative_reloc = TRUE;
2938 }
2939 }
2940
2941 if (relative_reloc)
2942 {
2943 asection *s;
2944 Elf_Internal_Rela outrel;
2945
2946 /* We need to generate a R_X86_64_RELATIVE reloc
2947 for the dynamic linker. */
2948 s = htab->elf.srelgot;
2949 if (s == NULL)
2950 abort ();
2951
2952 outrel.r_offset = (base_got->output_section->vma
2953 + base_got->output_offset
2954 + off);
2955 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2956 outrel.r_addend = relocation;
2957 elf_append_rela (output_bfd, s, &outrel);
2958 }
2959
2960 if (off >= (bfd_vma) -2)
2961 abort ();
2962
2963 relocation = base_got->output_section->vma
2964 + base_got->output_offset + off;
2965 if (r_type != R_X86_64_GOTPCREL
2966 && r_type != R_X86_64_GOTPCRELX
2967 && r_type != R_X86_64_REX_GOTPCRELX
2968 && r_type != R_X86_64_GOTPCREL64)
2969 relocation -= htab->elf.sgotplt->output_section->vma
2970 - htab->elf.sgotplt->output_offset;
2971
2972 break;
2973
2974 case R_X86_64_GOTOFF64:
2975 /* Relocation is relative to the start of the global offset
2976 table. */
2977
2978 /* Check to make sure it isn't a protected function or data
2979 symbol for shared library since it may not be local when
2980 used as function address or with copy relocation. We also
2981 need to make sure that a symbol is referenced locally. */
2982 if (bfd_link_pic (info) && h)
2983 {
2984 if (!h->def_regular)
2985 {
2986 const char *v;
2987
2988 switch (ELF_ST_VISIBILITY (h->other))
2989 {
2990 case STV_HIDDEN:
2991 v = _("hidden symbol");
2992 break;
2993 case STV_INTERNAL:
2994 v = _("internal symbol");
2995 break;
2996 case STV_PROTECTED:
2997 v = _("protected symbol");
2998 break;
2999 default:
3000 v = _("symbol");
3001 break;
3002 }
3003
3004 _bfd_error_handler
3005 /* xgettext:c-format */
3006 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3007 " `%s' can not be used when making a shared object"),
3008 input_bfd, v, h->root.root.string);
3009 bfd_set_error (bfd_error_bad_value);
3010 return FALSE;
3011 }
3012 else if (!bfd_link_executable (info)
3013 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3014 && (h->type == STT_FUNC
3015 || h->type == STT_OBJECT)
3016 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3017 {
3018 _bfd_error_handler
3019 /* xgettext:c-format */
3020 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3021 " `%s' can not be used when making a shared object"),
3022 input_bfd,
3023 h->type == STT_FUNC ? "function" : "data",
3024 h->root.root.string);
3025 bfd_set_error (bfd_error_bad_value);
3026 return FALSE;
3027 }
3028 }
3029
3030 /* Note that sgot is not involved in this
3031 calculation. We always want the start of .got.plt. If we
3032 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3033 permitted by the ABI, we might have to change this
3034 calculation. */
3035 relocation -= htab->elf.sgotplt->output_section->vma
3036 + htab->elf.sgotplt->output_offset;
3037 break;
3038
3039 case R_X86_64_GOTPC32:
3040 case R_X86_64_GOTPC64:
3041 /* Use global offset table as symbol value. */
3042 relocation = htab->elf.sgotplt->output_section->vma
3043 + htab->elf.sgotplt->output_offset;
3044 unresolved_reloc = FALSE;
3045 break;
3046
3047 case R_X86_64_PLTOFF64:
3048 /* Relocation is PLT entry relative to GOT. For local
3049 symbols it's the symbol itself relative to GOT. */
3050 if (h != NULL
3051 /* See PLT32 handling. */
3052 && (h->plt.offset != (bfd_vma) -1
3053 || eh->plt_got.offset != (bfd_vma) -1)
3054 && htab->elf.splt != NULL)
3055 {
3056 if (eh->plt_got.offset != (bfd_vma) -1)
3057 {
3058 /* Use the GOT PLT. */
3059 resolved_plt = htab->plt_got;
3060 plt_offset = eh->plt_got.offset;
3061 }
3062 else if (htab->plt_second != NULL)
3063 {
3064 resolved_plt = htab->plt_second;
3065 plt_offset = eh->plt_second.offset;
3066 }
3067 else
3068 {
3069 resolved_plt = htab->elf.splt;
3070 plt_offset = h->plt.offset;
3071 }
3072
3073 relocation = (resolved_plt->output_section->vma
3074 + resolved_plt->output_offset
3075 + plt_offset);
3076 unresolved_reloc = FALSE;
3077 }
3078
3079 relocation -= htab->elf.sgotplt->output_section->vma
3080 + htab->elf.sgotplt->output_offset;
3081 break;
3082
3083 case R_X86_64_PLT32:
3084 case R_X86_64_PLT32_BND:
3085 /* Relocation is to the entry for this symbol in the
3086 procedure linkage table. */
3087
3088 /* Resolve a PLT32 reloc against a local symbol directly,
3089 without using the procedure linkage table. */
3090 if (h == NULL)
3091 break;
3092
3093 if ((h->plt.offset == (bfd_vma) -1
3094 && eh->plt_got.offset == (bfd_vma) -1)
3095 || htab->elf.splt == NULL)
3096 {
3097 /* We didn't make a PLT entry for this symbol. This
3098 happens when statically linking PIC code, or when
3099 using -Bsymbolic. */
3100 break;
3101 }
3102
3103 use_plt:
3104 if (h->plt.offset != (bfd_vma) -1)
3105 {
3106 if (htab->plt_second != NULL)
3107 {
3108 resolved_plt = htab->plt_second;
3109 plt_offset = eh->plt_second.offset;
3110 }
3111 else
3112 {
3113 resolved_plt = htab->elf.splt;
3114 plt_offset = h->plt.offset;
3115 }
3116 }
3117 else
3118 {
3119 /* Use the GOT PLT. */
3120 resolved_plt = htab->plt_got;
3121 plt_offset = eh->plt_got.offset;
3122 }
3123
3124 relocation = (resolved_plt->output_section->vma
3125 + resolved_plt->output_offset
3126 + plt_offset);
3127 unresolved_reloc = FALSE;
3128 break;
3129
3130 case R_X86_64_SIZE32:
3131 case R_X86_64_SIZE64:
3132 /* Set to symbol size. */
3133 relocation = st_size;
3134 goto direct;
3135
3136 case R_X86_64_PC8:
3137 case R_X86_64_PC16:
3138 case R_X86_64_PC32:
3139 case R_X86_64_PC32_BND:
3140 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3141 as function address. */
3142 if (h != NULL
3143 && (input_section->flags & SEC_CODE) == 0
3144 && bfd_link_pie (info)
3145 && h->type == STT_FUNC
3146 && !h->def_regular
3147 && h->def_dynamic)
3148 goto use_plt;
3149 /* Fall through. */
3150
3151 case R_X86_64_8:
3152 case R_X86_64_16:
3153 case R_X86_64_32:
3154 case R_X86_64_PC64:
3155 case R_X86_64_64:
3156 /* FIXME: The ABI says the linker should make sure the value is
3157 the same when it's zeroextended to 64 bit. */
3158
3159 direct:
3160 if ((input_section->flags & SEC_ALLOC) == 0)
3161 break;
3162
3163 need_copy_reloc_in_pie = (bfd_link_pie (info)
3164 && h != NULL
3165 && (h->needs_copy
3166 || eh->needs_copy
3167 || (h->root.type
3168 == bfd_link_hash_undefined))
3169 && (X86_PCREL_TYPE_P (r_type)
3170 || X86_SIZE_TYPE_P (r_type)));
3171
3172 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3173 need_copy_reloc_in_pie,
3174 resolved_to_zero, FALSE))
3175 {
3176 Elf_Internal_Rela outrel;
3177 bfd_boolean skip, relocate;
3178 asection *sreloc;
3179
3180 /* When generating a shared object, these relocations
3181 are copied into the output file to be resolved at run
3182 time. */
3183 skip = FALSE;
3184 relocate = FALSE;
3185
3186 outrel.r_offset =
3187 _bfd_elf_section_offset (output_bfd, info, input_section,
3188 rel->r_offset);
3189 if (outrel.r_offset == (bfd_vma) -1)
3190 skip = TRUE;
3191 else if (outrel.r_offset == (bfd_vma) -2)
3192 skip = TRUE, relocate = TRUE;
3193
3194 outrel.r_offset += (input_section->output_section->vma
3195 + input_section->output_offset);
3196
3197 if (skip)
3198 memset (&outrel, 0, sizeof outrel);
3199
3200 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3201 {
3202 outrel.r_info = htab->r_info (h->dynindx, r_type);
3203 outrel.r_addend = rel->r_addend;
3204 }
3205 else
3206 {
3207 /* This symbol is local, or marked to become local.
3208 When relocation overflow check is disabled, we
3209 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3210 if (r_type == htab->pointer_r_type
3211 || (r_type == R_X86_64_32
3212 && htab->params->no_reloc_overflow_check))
3213 {
3214 relocate = TRUE;
3215 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3216 outrel.r_addend = relocation + rel->r_addend;
3217 }
3218 else if (r_type == R_X86_64_64
3219 && !ABI_64_P (output_bfd))
3220 {
3221 relocate = TRUE;
3222 outrel.r_info = htab->r_info (0,
3223 R_X86_64_RELATIVE64);
3224 outrel.r_addend = relocation + rel->r_addend;
3225 /* Check addend overflow. */
3226 if ((outrel.r_addend & 0x80000000)
3227 != (rel->r_addend & 0x80000000))
3228 {
3229 const char *name;
3230 int addend = rel->r_addend;
3231 if (h && h->root.root.string)
3232 name = h->root.root.string;
3233 else
3234 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3235 sym, NULL);
3236 _bfd_error_handler
3237 /* xgettext:c-format */
3238 (_("%pB: addend %s%#x in relocation %s against "
3239 "symbol `%s' at %#" PRIx64
3240 " in section `%pA' is out of range"),
3241 input_bfd, addend < 0 ? "-" : "", addend,
3242 howto->name, name, (uint64_t) rel->r_offset,
3243 input_section);
3244 bfd_set_error (bfd_error_bad_value);
3245 return FALSE;
3246 }
3247 }
3248 else
3249 {
3250 long sindx;
3251
3252 if (bfd_is_abs_section (sec))
3253 sindx = 0;
3254 else if (sec == NULL || sec->owner == NULL)
3255 {
3256 bfd_set_error (bfd_error_bad_value);
3257 return FALSE;
3258 }
3259 else
3260 {
3261 asection *osec;
3262
3263 /* We are turning this relocation into one
3264 against a section symbol. It would be
3265 proper to subtract the symbol's value,
3266 osec->vma, from the emitted reloc addend,
3267 but ld.so expects buggy relocs. */
3268 osec = sec->output_section;
3269 sindx = elf_section_data (osec)->dynindx;
3270 if (sindx == 0)
3271 {
3272 asection *oi = htab->elf.text_index_section;
3273 sindx = elf_section_data (oi)->dynindx;
3274 }
3275 BFD_ASSERT (sindx != 0);
3276 }
3277
3278 outrel.r_info = htab->r_info (sindx, r_type);
3279 outrel.r_addend = relocation + rel->r_addend;
3280 }
3281 }
3282
3283 sreloc = elf_section_data (input_section)->sreloc;
3284
3285 if (sreloc == NULL || sreloc->contents == NULL)
3286 {
3287 r = bfd_reloc_notsupported;
3288 goto check_relocation_error;
3289 }
3290
3291 elf_append_rela (output_bfd, sreloc, &outrel);
3292
3293 /* If this reloc is against an external symbol, we do
3294 not want to fiddle with the addend. Otherwise, we
3295 need to include the symbol value so that it becomes
3296 an addend for the dynamic reloc. */
3297 if (! relocate)
3298 continue;
3299 }
3300
3301 break;
3302
3303 case R_X86_64_TLSGD:
3304 case R_X86_64_GOTPC32_TLSDESC:
3305 case R_X86_64_TLSDESC_CALL:
3306 case R_X86_64_GOTTPOFF:
3307 tls_type = GOT_UNKNOWN;
3308 if (h == NULL && local_got_offsets)
3309 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3310 else if (h != NULL)
3311 tls_type = elf_x86_hash_entry (h)->tls_type;
3312
3313 r_type_tls = r_type;
3314 if (! elf_x86_64_tls_transition (info, input_bfd,
3315 input_section, contents,
3316 symtab_hdr, sym_hashes,
3317 &r_type_tls, tls_type, rel,
3318 relend, h, r_symndx, TRUE))
3319 return FALSE;
3320
3321 if (r_type_tls == R_X86_64_TPOFF32)
3322 {
3323 bfd_vma roff = rel->r_offset;
3324
3325 BFD_ASSERT (! unresolved_reloc);
3326
3327 if (r_type == R_X86_64_TLSGD)
3328 {
3329 /* GD->LE transition. For 64bit, change
3330 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3331 .word 0x6666; rex64; call __tls_get_addr@PLT
3332 or
3333 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3334 .byte 0x66; rex64
3335 call *__tls_get_addr@GOTPCREL(%rip)
3336 which may be converted to
3337 addr32 call __tls_get_addr
3338 into:
3339 movq %fs:0, %rax
3340 leaq foo@tpoff(%rax), %rax
3341 For 32bit, change
3342 leaq foo@tlsgd(%rip), %rdi
3343 .word 0x6666; rex64; call __tls_get_addr@PLT
3344 or
3345 leaq foo@tlsgd(%rip), %rdi
3346 .byte 0x66; rex64
3347 call *__tls_get_addr@GOTPCREL(%rip)
3348 which may be converted to
3349 addr32 call __tls_get_addr
3350 into:
3351 movl %fs:0, %eax
3352 leaq foo@tpoff(%rax), %rax
3353 For largepic, change:
3354 leaq foo@tlsgd(%rip), %rdi
3355 movabsq $__tls_get_addr@pltoff, %rax
3356 addq %r15, %rax
3357 call *%rax
3358 into:
3359 movq %fs:0, %rax
3360 leaq foo@tpoff(%rax), %rax
3361 nopw 0x0(%rax,%rax,1) */
3362 int largepic = 0;
3363 if (ABI_64_P (output_bfd))
3364 {
3365 if (contents[roff + 5] == 0xb8)
3366 {
3367 if (roff < 3
3368 || (roff - 3 + 22) > input_section->size)
3369 {
3370 corrupt_input:
3371 info->callbacks->einfo
3372 (_("%F%P: corrupt input: %pB\n"),
3373 input_bfd);
3374 return FALSE;
3375 }
3376 memcpy (contents + roff - 3,
3377 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3378 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3379 largepic = 1;
3380 }
3381 else
3382 {
3383 if (roff < 4
3384 || (roff - 4 + 16) > input_section->size)
3385 goto corrupt_input;
3386 memcpy (contents + roff - 4,
3387 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3388 16);
3389 }
3390 }
3391 else
3392 {
3393 if (roff < 3
3394 || (roff - 3 + 15) > input_section->size)
3395 goto corrupt_input;
3396 memcpy (contents + roff - 3,
3397 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3398 15);
3399 }
3400 bfd_put_32 (output_bfd,
3401 elf_x86_64_tpoff (info, relocation),
3402 contents + roff + 8 + largepic);
3403 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3404 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3405 rel++;
3406 wrel++;
3407 continue;
3408 }
3409 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3410 {
3411 /* GDesc -> LE transition.
3412 It's originally something like:
3413 leaq x@tlsdesc(%rip), %rax
3414
3415 Change it to:
3416 movl $x@tpoff, %rax. */
3417
3418 unsigned int val, type;
3419
3420 if (roff < 3)
3421 goto corrupt_input;
3422 type = bfd_get_8 (input_bfd, contents + roff - 3);
3423 val = bfd_get_8 (input_bfd, contents + roff - 1);
3424 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3425 contents + roff - 3);
3426 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3427 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3428 contents + roff - 1);
3429 bfd_put_32 (output_bfd,
3430 elf_x86_64_tpoff (info, relocation),
3431 contents + roff);
3432 continue;
3433 }
3434 else if (r_type == R_X86_64_TLSDESC_CALL)
3435 {
3436 /* GDesc -> LE transition.
3437 It's originally:
3438 call *(%rax)
3439 Turn it into:
3440 xchg %ax,%ax. */
3441 bfd_put_8 (output_bfd, 0x66, contents + roff);
3442 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3443 continue;
3444 }
3445 else if (r_type == R_X86_64_GOTTPOFF)
3446 {
3447 /* IE->LE transition:
3448 For 64bit, originally it can be one of:
3449 movq foo@gottpoff(%rip), %reg
3450 addq foo@gottpoff(%rip), %reg
3451 We change it into:
3452 movq $foo, %reg
3453 leaq foo(%reg), %reg
3454 addq $foo, %reg.
3455 For 32bit, originally it can be one of:
3456 movq foo@gottpoff(%rip), %reg
3457 addl foo@gottpoff(%rip), %reg
3458 We change it into:
3459 movq $foo, %reg
3460 leal foo(%reg), %reg
3461 addl $foo, %reg. */
3462
3463 unsigned int val, type, reg;
3464
3465 if (roff >= 3)
3466 val = bfd_get_8 (input_bfd, contents + roff - 3);
3467 else
3468 {
3469 if (roff < 2)
3470 goto corrupt_input;
3471 val = 0;
3472 }
3473 type = bfd_get_8 (input_bfd, contents + roff - 2);
3474 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3475 reg >>= 3;
3476 if (type == 0x8b)
3477 {
3478 /* movq */
3479 if (val == 0x4c)
3480 {
3481 if (roff < 3)
3482 goto corrupt_input;
3483 bfd_put_8 (output_bfd, 0x49,
3484 contents + roff - 3);
3485 }
3486 else if (!ABI_64_P (output_bfd) && val == 0x44)
3487 {
3488 if (roff < 3)
3489 goto corrupt_input;
3490 bfd_put_8 (output_bfd, 0x41,
3491 contents + roff - 3);
3492 }
3493 bfd_put_8 (output_bfd, 0xc7,
3494 contents + roff - 2);
3495 bfd_put_8 (output_bfd, 0xc0 | reg,
3496 contents + roff - 1);
3497 }
3498 else if (reg == 4)
3499 {
3500 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3501 is special */
3502 if (val == 0x4c)
3503 {
3504 if (roff < 3)
3505 goto corrupt_input;
3506 bfd_put_8 (output_bfd, 0x49,
3507 contents + roff - 3);
3508 }
3509 else if (!ABI_64_P (output_bfd) && val == 0x44)
3510 {
3511 if (roff < 3)
3512 goto corrupt_input;
3513 bfd_put_8 (output_bfd, 0x41,
3514 contents + roff - 3);
3515 }
3516 bfd_put_8 (output_bfd, 0x81,
3517 contents + roff - 2);
3518 bfd_put_8 (output_bfd, 0xc0 | reg,
3519 contents + roff - 1);
3520 }
3521 else
3522 {
3523 /* addq/addl -> leaq/leal */
3524 if (val == 0x4c)
3525 {
3526 if (roff < 3)
3527 goto corrupt_input;
3528 bfd_put_8 (output_bfd, 0x4d,
3529 contents + roff - 3);
3530 }
3531 else if (!ABI_64_P (output_bfd) && val == 0x44)
3532 {
3533 if (roff < 3)
3534 goto corrupt_input;
3535 bfd_put_8 (output_bfd, 0x45,
3536 contents + roff - 3);
3537 }
3538 bfd_put_8 (output_bfd, 0x8d,
3539 contents + roff - 2);
3540 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3541 contents + roff - 1);
3542 }
3543 bfd_put_32 (output_bfd,
3544 elf_x86_64_tpoff (info, relocation),
3545 contents + roff);
3546 continue;
3547 }
3548 else
3549 BFD_ASSERT (FALSE);
3550 }
3551
3552 if (htab->elf.sgot == NULL)
3553 abort ();
3554
3555 if (h != NULL)
3556 {
3557 off = h->got.offset;
3558 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3559 }
3560 else
3561 {
3562 if (local_got_offsets == NULL)
3563 abort ();
3564
3565 off = local_got_offsets[r_symndx];
3566 offplt = local_tlsdesc_gotents[r_symndx];
3567 }
3568
3569 if ((off & 1) != 0)
3570 off &= ~1;
3571 else
3572 {
3573 Elf_Internal_Rela outrel;
3574 int dr_type, indx;
3575 asection *sreloc;
3576
3577 if (htab->elf.srelgot == NULL)
3578 abort ();
3579
3580 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3581
3582 if (GOT_TLS_GDESC_P (tls_type))
3583 {
3584 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3585 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3586 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3587 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3588 + htab->elf.sgotplt->output_offset
3589 + offplt
3590 + htab->sgotplt_jump_table_size);
3591 sreloc = htab->elf.srelplt;
3592 if (indx == 0)
3593 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3594 else
3595 outrel.r_addend = 0;
3596 elf_append_rela (output_bfd, sreloc, &outrel);
3597 }
3598
3599 sreloc = htab->elf.srelgot;
3600
3601 outrel.r_offset = (htab->elf.sgot->output_section->vma
3602 + htab->elf.sgot->output_offset + off);
3603
3604 if (GOT_TLS_GD_P (tls_type))
3605 dr_type = R_X86_64_DTPMOD64;
3606 else if (GOT_TLS_GDESC_P (tls_type))
3607 goto dr_done;
3608 else
3609 dr_type = R_X86_64_TPOFF64;
3610
3611 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3612 outrel.r_addend = 0;
3613 if ((dr_type == R_X86_64_TPOFF64
3614 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3615 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3616 outrel.r_info = htab->r_info (indx, dr_type);
3617
3618 elf_append_rela (output_bfd, sreloc, &outrel);
3619
3620 if (GOT_TLS_GD_P (tls_type))
3621 {
3622 if (indx == 0)
3623 {
3624 BFD_ASSERT (! unresolved_reloc);
3625 bfd_put_64 (output_bfd,
3626 relocation - _bfd_x86_elf_dtpoff_base (info),
3627 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3628 }
3629 else
3630 {
3631 bfd_put_64 (output_bfd, 0,
3632 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3633 outrel.r_info = htab->r_info (indx,
3634 R_X86_64_DTPOFF64);
3635 outrel.r_offset += GOT_ENTRY_SIZE;
3636 elf_append_rela (output_bfd, sreloc,
3637 &outrel);
3638 }
3639 }
3640
3641 dr_done:
3642 if (h != NULL)
3643 h->got.offset |= 1;
3644 else
3645 local_got_offsets[r_symndx] |= 1;
3646 }
3647
3648 if (off >= (bfd_vma) -2
3649 && ! GOT_TLS_GDESC_P (tls_type))
3650 abort ();
3651 if (r_type_tls == r_type)
3652 {
3653 if (r_type == R_X86_64_GOTPC32_TLSDESC
3654 || r_type == R_X86_64_TLSDESC_CALL)
3655 relocation = htab->elf.sgotplt->output_section->vma
3656 + htab->elf.sgotplt->output_offset
3657 + offplt + htab->sgotplt_jump_table_size;
3658 else
3659 relocation = htab->elf.sgot->output_section->vma
3660 + htab->elf.sgot->output_offset + off;
3661 unresolved_reloc = FALSE;
3662 }
3663 else
3664 {
3665 bfd_vma roff = rel->r_offset;
3666
3667 if (r_type == R_X86_64_TLSGD)
3668 {
3669 /* GD->IE transition. For 64bit, change
3670 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3671 .word 0x6666; rex64; call __tls_get_addr@PLT
3672 or
3673 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3674 .byte 0x66; rex64
3675 call *__tls_get_addr@GOTPCREL(%rip
3676 which may be converted to
3677 addr32 call __tls_get_addr
3678 into:
3679 movq %fs:0, %rax
3680 addq foo@gottpoff(%rip), %rax
3681 For 32bit, change
3682 leaq foo@tlsgd(%rip), %rdi
3683 .word 0x6666; rex64; call __tls_get_addr@PLT
3684 or
3685 leaq foo@tlsgd(%rip), %rdi
3686 .byte 0x66; rex64;
3687 call *__tls_get_addr@GOTPCREL(%rip)
3688 which may be converted to
3689 addr32 call __tls_get_addr
3690 into:
3691 movl %fs:0, %eax
3692 addq foo@gottpoff(%rip), %rax
3693 For largepic, change:
3694 leaq foo@tlsgd(%rip), %rdi
3695 movabsq $__tls_get_addr@pltoff, %rax
3696 addq %r15, %rax
3697 call *%rax
3698 into:
3699 movq %fs:0, %rax
3700 addq foo@gottpoff(%rax), %rax
3701 nopw 0x0(%rax,%rax,1) */
3702 int largepic = 0;
3703 if (ABI_64_P (output_bfd))
3704 {
3705 if (contents[roff + 5] == 0xb8)
3706 {
3707 if (roff < 3
3708 || (roff - 3 + 22) > input_section->size)
3709 goto corrupt_input;
3710 memcpy (contents + roff - 3,
3711 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3712 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3713 largepic = 1;
3714 }
3715 else
3716 {
3717 if (roff < 4
3718 || (roff - 4 + 16) > input_section->size)
3719 goto corrupt_input;
3720 memcpy (contents + roff - 4,
3721 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3722 16);
3723 }
3724 }
3725 else
3726 {
3727 if (roff < 3
3728 || (roff - 3 + 15) > input_section->size)
3729 goto corrupt_input;
3730 memcpy (contents + roff - 3,
3731 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3732 15);
3733 }
3734
3735 relocation = (htab->elf.sgot->output_section->vma
3736 + htab->elf.sgot->output_offset + off
3737 - roff
3738 - largepic
3739 - input_section->output_section->vma
3740 - input_section->output_offset
3741 - 12);
3742 bfd_put_32 (output_bfd, relocation,
3743 contents + roff + 8 + largepic);
3744 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3745 rel++;
3746 wrel++;
3747 continue;
3748 }
3749 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3750 {
3751 /* GDesc -> IE transition.
3752 It's originally something like:
3753 leaq x@tlsdesc(%rip), %rax
3754
3755 Change it to:
3756 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3757
3758 /* Now modify the instruction as appropriate. To
3759 turn a leaq into a movq in the form we use it, it
3760 suffices to change the second byte from 0x8d to
3761 0x8b. */
3762 if (roff < 2)
3763 goto corrupt_input;
3764 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3765
3766 bfd_put_32 (output_bfd,
3767 htab->elf.sgot->output_section->vma
3768 + htab->elf.sgot->output_offset + off
3769 - rel->r_offset
3770 - input_section->output_section->vma
3771 - input_section->output_offset
3772 - 4,
3773 contents + roff);
3774 continue;
3775 }
3776 else if (r_type == R_X86_64_TLSDESC_CALL)
3777 {
3778 /* GDesc -> IE transition.
3779 It's originally:
3780 call *(%rax)
3781
3782 Change it to:
3783 xchg %ax, %ax. */
3784
3785 bfd_put_8 (output_bfd, 0x66, contents + roff);
3786 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3787 continue;
3788 }
3789 else
3790 BFD_ASSERT (FALSE);
3791 }
3792 break;
3793
3794 case R_X86_64_TLSLD:
3795 if (! elf_x86_64_tls_transition (info, input_bfd,
3796 input_section, contents,
3797 symtab_hdr, sym_hashes,
3798 &r_type, GOT_UNKNOWN, rel,
3799 relend, h, r_symndx, TRUE))
3800 return FALSE;
3801
3802 if (r_type != R_X86_64_TLSLD)
3803 {
3804 /* LD->LE transition:
3805 leaq foo@tlsld(%rip), %rdi
3806 call __tls_get_addr@PLT
3807 For 64bit, we change it into:
3808 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3809 For 32bit, we change it into:
3810 nopl 0x0(%rax); movl %fs:0, %eax
3811 Or
3812 leaq foo@tlsld(%rip), %rdi;
3813 call *__tls_get_addr@GOTPCREL(%rip)
3814 which may be converted to
3815 addr32 call __tls_get_addr
3816 For 64bit, we change it into:
3817 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3818 For 32bit, we change it into:
3819 nopw 0x0(%rax); movl %fs:0, %eax
3820 For largepic, change:
3821 leaq foo@tlsgd(%rip), %rdi
3822 movabsq $__tls_get_addr@pltoff, %rax
3823 addq %rbx, %rax
3824 call *%rax
3825 into
3826 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3827 movq %fs:0, %eax */
3828
3829 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3830 if (ABI_64_P (output_bfd))
3831 {
3832 if ((rel->r_offset + 5) >= input_section->size)
3833 goto corrupt_input;
3834 if (contents[rel->r_offset + 5] == 0xb8)
3835 {
3836 if (rel->r_offset < 3
3837 || (rel->r_offset - 3 + 22) > input_section->size)
3838 goto corrupt_input;
3839 memcpy (contents + rel->r_offset - 3,
3840 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3841 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3842 }
3843 else if (contents[rel->r_offset + 4] == 0xff
3844 || contents[rel->r_offset + 4] == 0x67)
3845 {
3846 if (rel->r_offset < 3
3847 || (rel->r_offset - 3 + 13) > input_section->size)
3848 goto corrupt_input;
3849 memcpy (contents + rel->r_offset - 3,
3850 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3851 13);
3852
3853 }
3854 else
3855 {
3856 if (rel->r_offset < 3
3857 || (rel->r_offset - 3 + 12) > input_section->size)
3858 goto corrupt_input;
3859 memcpy (contents + rel->r_offset - 3,
3860 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3861 }
3862 }
3863 else
3864 {
3865 if ((rel->r_offset + 4) >= input_section->size)
3866 goto corrupt_input;
3867 if (contents[rel->r_offset + 4] == 0xff)
3868 {
3869 if (rel->r_offset < 3
3870 || (rel->r_offset - 3 + 13) > input_section->size)
3871 goto corrupt_input;
3872 memcpy (contents + rel->r_offset - 3,
3873 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3874 13);
3875 }
3876 else
3877 {
3878 if (rel->r_offset < 3
3879 || (rel->r_offset - 3 + 12) > input_section->size)
3880 goto corrupt_input;
3881 memcpy (contents + rel->r_offset - 3,
3882 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3883 }
3884 }
3885 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3886 and R_X86_64_PLTOFF64. */
3887 rel++;
3888 wrel++;
3889 continue;
3890 }
3891
3892 if (htab->elf.sgot == NULL)
3893 abort ();
3894
3895 off = htab->tls_ld_or_ldm_got.offset;
3896 if (off & 1)
3897 off &= ~1;
3898 else
3899 {
3900 Elf_Internal_Rela outrel;
3901
3902 if (htab->elf.srelgot == NULL)
3903 abort ();
3904
3905 outrel.r_offset = (htab->elf.sgot->output_section->vma
3906 + htab->elf.sgot->output_offset + off);
3907
3908 bfd_put_64 (output_bfd, 0,
3909 htab->elf.sgot->contents + off);
3910 bfd_put_64 (output_bfd, 0,
3911 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3912 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3913 outrel.r_addend = 0;
3914 elf_append_rela (output_bfd, htab->elf.srelgot,
3915 &outrel);
3916 htab->tls_ld_or_ldm_got.offset |= 1;
3917 }
3918 relocation = htab->elf.sgot->output_section->vma
3919 + htab->elf.sgot->output_offset + off;
3920 unresolved_reloc = FALSE;
3921 break;
3922
3923 case R_X86_64_DTPOFF32:
3924 if (!bfd_link_executable (info)
3925 || (input_section->flags & SEC_CODE) == 0)
3926 relocation -= _bfd_x86_elf_dtpoff_base (info);
3927 else
3928 relocation = elf_x86_64_tpoff (info, relocation);
3929 break;
3930
3931 case R_X86_64_TPOFF32:
3932 case R_X86_64_TPOFF64:
3933 BFD_ASSERT (bfd_link_executable (info));
3934 relocation = elf_x86_64_tpoff (info, relocation);
3935 break;
3936
3937 case R_X86_64_DTPOFF64:
3938 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3939 relocation -= _bfd_x86_elf_dtpoff_base (info);
3940 break;
3941
3942 default:
3943 break;
3944 }
3945
3946 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3947 because such sections are not SEC_ALLOC and thus ld.so will
3948 not process them. */
3949 if (unresolved_reloc
3950 && !((input_section->flags & SEC_DEBUGGING) != 0
3951 && h->def_dynamic)
3952 && _bfd_elf_section_offset (output_bfd, info, input_section,
3953 rel->r_offset) != (bfd_vma) -1)
3954 {
3955 switch (r_type)
3956 {
3957 case R_X86_64_32S:
3958 sec = h->root.u.def.section;
3959 if ((info->nocopyreloc
3960 || (eh->def_protected
3961 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3962 && !(h->root.u.def.section->flags & SEC_CODE))
3963 return elf_x86_64_need_pic (info, input_bfd, input_section,
3964 h, NULL, NULL, howto);
3965 /* Fall through. */
3966
3967 default:
3968 _bfd_error_handler
3969 /* xgettext:c-format */
3970 (_("%pB(%pA+%#" PRIx64 "): "
3971 "unresolvable %s relocation against symbol `%s'"),
3972 input_bfd,
3973 input_section,
3974 (uint64_t) rel->r_offset,
3975 howto->name,
3976 h->root.root.string);
3977 return FALSE;
3978 }
3979 }
3980
3981 do_relocation:
3982 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3983 contents, rel->r_offset,
3984 relocation, rel->r_addend);
3985
3986 check_relocation_error:
3987 if (r != bfd_reloc_ok)
3988 {
3989 const char *name;
3990
3991 if (h != NULL)
3992 name = h->root.root.string;
3993 else
3994 {
3995 name = bfd_elf_string_from_elf_section (input_bfd,
3996 symtab_hdr->sh_link,
3997 sym->st_name);
3998 if (name == NULL)
3999 return FALSE;
4000 if (*name == '\0')
4001 name = bfd_section_name (input_bfd, sec);
4002 }
4003
4004 if (r == bfd_reloc_overflow)
4005 {
4006 if (converted_reloc)
4007 {
4008 info->callbacks->einfo
4009 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4010 return FALSE;
4011 }
4012 (*info->callbacks->reloc_overflow)
4013 (info, (h ? &h->root : NULL), name, howto->name,
4014 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4015 }
4016 else
4017 {
4018 _bfd_error_handler
4019 /* xgettext:c-format */
4020 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4021 input_bfd, input_section,
4022 (uint64_t) rel->r_offset, name, (int) r);
4023 return FALSE;
4024 }
4025 }
4026
4027 if (wrel != rel)
4028 *wrel = *rel;
4029 }
4030
4031 if (wrel != rel)
4032 {
4033 Elf_Internal_Shdr *rel_hdr;
4034 size_t deleted = rel - wrel;
4035
4036 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4037 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4038 if (rel_hdr->sh_size == 0)
4039 {
4040 /* It is too late to remove an empty reloc section. Leave
4041 one NONE reloc.
4042 ??? What is wrong with an empty section??? */
4043 rel_hdr->sh_size = rel_hdr->sh_entsize;
4044 deleted -= 1;
4045 }
4046 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4047 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4048 input_section->reloc_count -= deleted;
4049 }
4050
4051 return TRUE;
4052 }
4053
4054 /* Finish up dynamic symbol handling. We set the contents of various
4055 dynamic sections here. */
4056
4057 static bfd_boolean
4058 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4059 struct bfd_link_info *info,
4060 struct elf_link_hash_entry *h,
4061 Elf_Internal_Sym *sym)
4062 {
4063 struct elf_x86_link_hash_table *htab;
4064 bfd_boolean use_plt_second;
4065 struct elf_x86_link_hash_entry *eh;
4066 bfd_boolean local_undefweak;
4067
4068 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4069 if (htab == NULL)
4070 return FALSE;
4071
4072 /* Use the second PLT section only if there is .plt section. */
4073 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4074
4075 eh = (struct elf_x86_link_hash_entry *) h;
4076 if (eh->no_finish_dynamic_symbol)
4077 abort ();
4078
4079 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4080 resolved undefined weak symbols in executable so that their
4081 references have value 0 at run-time. */
4082 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4083
4084 if (h->plt.offset != (bfd_vma) -1)
4085 {
4086 bfd_vma plt_index;
4087 bfd_vma got_offset, plt_offset;
4088 Elf_Internal_Rela rela;
4089 bfd_byte *loc;
4090 asection *plt, *gotplt, *relplt, *resolved_plt;
4091 const struct elf_backend_data *bed;
4092 bfd_vma plt_got_pcrel_offset;
4093
4094 /* When building a static executable, use .iplt, .igot.plt and
4095 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4096 if (htab->elf.splt != NULL)
4097 {
4098 plt = htab->elf.splt;
4099 gotplt = htab->elf.sgotplt;
4100 relplt = htab->elf.srelplt;
4101 }
4102 else
4103 {
4104 plt = htab->elf.iplt;
4105 gotplt = htab->elf.igotplt;
4106 relplt = htab->elf.irelplt;
4107 }
4108
4109 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4110
4111 /* Get the index in the procedure linkage table which
4112 corresponds to this symbol. This is the index of this symbol
4113 in all the symbols for which we are making plt entries. The
4114 first entry in the procedure linkage table is reserved.
4115
4116 Get the offset into the .got table of the entry that
4117 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4118 bytes. The first three are reserved for the dynamic linker.
4119
4120 For static executables, we don't reserve anything. */
4121
4122 if (plt == htab->elf.splt)
4123 {
4124 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4125 - htab->plt.has_plt0);
4126 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4127 }
4128 else
4129 {
4130 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4131 got_offset = got_offset * GOT_ENTRY_SIZE;
4132 }
4133
4134 /* Fill in the entry in the procedure linkage table. */
4135 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4136 htab->plt.plt_entry_size);
4137 if (use_plt_second)
4138 {
4139 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4140 htab->non_lazy_plt->plt_entry,
4141 htab->non_lazy_plt->plt_entry_size);
4142
4143 resolved_plt = htab->plt_second;
4144 plt_offset = eh->plt_second.offset;
4145 }
4146 else
4147 {
4148 resolved_plt = plt;
4149 plt_offset = h->plt.offset;
4150 }
4151
4152 /* Insert the relocation positions of the plt section. */
4153
4154 /* Put offset the PC-relative instruction referring to the GOT entry,
4155 subtracting the size of that instruction. */
4156 plt_got_pcrel_offset = (gotplt->output_section->vma
4157 + gotplt->output_offset
4158 + got_offset
4159 - resolved_plt->output_section->vma
4160 - resolved_plt->output_offset
4161 - plt_offset
4162 - htab->plt.plt_got_insn_size);
4163
4164 /* Check PC-relative offset overflow in PLT entry. */
4165 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4166 /* xgettext:c-format */
4167 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4168 output_bfd, h->root.root.string);
4169
4170 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4171 (resolved_plt->contents + plt_offset
4172 + htab->plt.plt_got_offset));
4173
4174 /* Fill in the entry in the global offset table, initially this
4175 points to the second part of the PLT entry. Leave the entry
4176 as zero for undefined weak symbol in PIE. No PLT relocation
4177 against undefined weak symbol in PIE. */
4178 if (!local_undefweak)
4179 {
4180 if (htab->plt.has_plt0)
4181 bfd_put_64 (output_bfd, (plt->output_section->vma
4182 + plt->output_offset
4183 + h->plt.offset
4184 + htab->lazy_plt->plt_lazy_offset),
4185 gotplt->contents + got_offset);
4186
4187 /* Fill in the entry in the .rela.plt section. */
4188 rela.r_offset = (gotplt->output_section->vma
4189 + gotplt->output_offset
4190 + got_offset);
4191 if (PLT_LOCAL_IFUNC_P (info, h))
4192 {
4193 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4194 h->root.root.string,
4195 h->root.u.def.section->owner);
4196
4197 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4198 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4199 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4200 rela.r_addend = (h->root.u.def.value
4201 + h->root.u.def.section->output_section->vma
4202 + h->root.u.def.section->output_offset);
4203 /* R_X86_64_IRELATIVE comes last. */
4204 plt_index = htab->next_irelative_index--;
4205 }
4206 else
4207 {
4208 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4209 rela.r_addend = 0;
4210 plt_index = htab->next_jump_slot_index++;
4211 }
4212
4213 /* Don't fill the second and third slots in PLT entry for
4214 static executables nor without PLT0. */
4215 if (plt == htab->elf.splt && htab->plt.has_plt0)
4216 {
4217 bfd_vma plt0_offset
4218 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4219
4220 /* Put relocation index. */
4221 bfd_put_32 (output_bfd, plt_index,
4222 (plt->contents + h->plt.offset
4223 + htab->lazy_plt->plt_reloc_offset));
4224
4225 /* Put offset for jmp .PLT0 and check for overflow. We don't
4226 check relocation index for overflow since branch displacement
4227 will overflow first. */
4228 if (plt0_offset > 0x80000000)
4229 /* xgettext:c-format */
4230 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4231 output_bfd, h->root.root.string);
4232 bfd_put_32 (output_bfd, - plt0_offset,
4233 (plt->contents + h->plt.offset
4234 + htab->lazy_plt->plt_plt_offset));
4235 }
4236
4237 bed = get_elf_backend_data (output_bfd);
4238 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4239 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4240 }
4241 }
4242 else if (eh->plt_got.offset != (bfd_vma) -1)
4243 {
4244 bfd_vma got_offset, plt_offset;
4245 asection *plt, *got;
4246 bfd_boolean got_after_plt;
4247 int32_t got_pcrel_offset;
4248
4249 /* Set the entry in the GOT procedure linkage table. */
4250 plt = htab->plt_got;
4251 got = htab->elf.sgot;
4252 got_offset = h->got.offset;
4253
4254 if (got_offset == (bfd_vma) -1
4255 || (h->type == STT_GNU_IFUNC && h->def_regular)
4256 || plt == NULL
4257 || got == NULL)
4258 abort ();
4259
4260 /* Use the non-lazy PLT entry template for the GOT PLT since they
4261 are the identical. */
4262 /* Fill in the entry in the GOT procedure linkage table. */
4263 plt_offset = eh->plt_got.offset;
4264 memcpy (plt->contents + plt_offset,
4265 htab->non_lazy_plt->plt_entry,
4266 htab->non_lazy_plt->plt_entry_size);
4267
4268 /* Put offset the PC-relative instruction referring to the GOT
4269 entry, subtracting the size of that instruction. */
4270 got_pcrel_offset = (got->output_section->vma
4271 + got->output_offset
4272 + got_offset
4273 - plt->output_section->vma
4274 - plt->output_offset
4275 - plt_offset
4276 - htab->non_lazy_plt->plt_got_insn_size);
4277
4278 /* Check PC-relative offset overflow in GOT PLT entry. */
4279 got_after_plt = got->output_section->vma > plt->output_section->vma;
4280 if ((got_after_plt && got_pcrel_offset < 0)
4281 || (!got_after_plt && got_pcrel_offset > 0))
4282 /* xgettext:c-format */
4283 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4284 output_bfd, h->root.root.string);
4285
4286 bfd_put_32 (output_bfd, got_pcrel_offset,
4287 (plt->contents + plt_offset
4288 + htab->non_lazy_plt->plt_got_offset));
4289 }
4290
4291 if (!local_undefweak
4292 && !h->def_regular
4293 && (h->plt.offset != (bfd_vma) -1
4294 || eh->plt_got.offset != (bfd_vma) -1))
4295 {
4296 /* Mark the symbol as undefined, rather than as defined in
4297 the .plt section. Leave the value if there were any
4298 relocations where pointer equality matters (this is a clue
4299 for the dynamic linker, to make function pointer
4300 comparisons work between an application and shared
4301 library), otherwise set it to zero. If a function is only
4302 called from a binary, there is no need to slow down
4303 shared libraries because of that. */
4304 sym->st_shndx = SHN_UNDEF;
4305 if (!h->pointer_equality_needed)
4306 sym->st_value = 0;
4307 }
4308
4309 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4310
4311 /* Don't generate dynamic GOT relocation against undefined weak
4312 symbol in executable. */
4313 if (h->got.offset != (bfd_vma) -1
4314 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4315 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4316 && !local_undefweak)
4317 {
4318 Elf_Internal_Rela rela;
4319 asection *relgot = htab->elf.srelgot;
4320
4321 /* This symbol has an entry in the global offset table. Set it
4322 up. */
4323 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4324 abort ();
4325
4326 rela.r_offset = (htab->elf.sgot->output_section->vma
4327 + htab->elf.sgot->output_offset
4328 + (h->got.offset &~ (bfd_vma) 1));
4329
4330 /* If this is a static link, or it is a -Bsymbolic link and the
4331 symbol is defined locally or was forced to be local because
4332 of a version file, we just want to emit a RELATIVE reloc.
4333 The entry in the global offset table will already have been
4334 initialized in the relocate_section function. */
4335 if (h->def_regular
4336 && h->type == STT_GNU_IFUNC)
4337 {
4338 if (h->plt.offset == (bfd_vma) -1)
4339 {
4340 /* STT_GNU_IFUNC is referenced without PLT. */
4341 if (htab->elf.splt == NULL)
4342 {
4343 /* use .rel[a].iplt section to store .got relocations
4344 in static executable. */
4345 relgot = htab->elf.irelplt;
4346 }
4347 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4348 {
4349 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4350 h->root.root.string,
4351 h->root.u.def.section->owner);
4352
4353 rela.r_info = htab->r_info (0,
4354 R_X86_64_IRELATIVE);
4355 rela.r_addend = (h->root.u.def.value
4356 + h->root.u.def.section->output_section->vma
4357 + h->root.u.def.section->output_offset);
4358 }
4359 else
4360 goto do_glob_dat;
4361 }
4362 else if (bfd_link_pic (info))
4363 {
4364 /* Generate R_X86_64_GLOB_DAT. */
4365 goto do_glob_dat;
4366 }
4367 else
4368 {
4369 asection *plt;
4370 bfd_vma plt_offset;
4371
4372 if (!h->pointer_equality_needed)
4373 abort ();
4374
4375 /* For non-shared object, we can't use .got.plt, which
4376 contains the real function addres if we need pointer
4377 equality. We load the GOT entry with the PLT entry. */
4378 if (htab->plt_second != NULL)
4379 {
4380 plt = htab->plt_second;
4381 plt_offset = eh->plt_second.offset;
4382 }
4383 else
4384 {
4385 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4386 plt_offset = h->plt.offset;
4387 }
4388 bfd_put_64 (output_bfd, (plt->output_section->vma
4389 + plt->output_offset
4390 + plt_offset),
4391 htab->elf.sgot->contents + h->got.offset);
4392 return TRUE;
4393 }
4394 }
4395 else if (bfd_link_pic (info)
4396 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4397 {
4398 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4399 return FALSE;
4400 BFD_ASSERT((h->got.offset & 1) != 0);
4401 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4402 rela.r_addend = (h->root.u.def.value
4403 + h->root.u.def.section->output_section->vma
4404 + h->root.u.def.section->output_offset);
4405 }
4406 else
4407 {
4408 BFD_ASSERT((h->got.offset & 1) == 0);
4409 do_glob_dat:
4410 bfd_put_64 (output_bfd, (bfd_vma) 0,
4411 htab->elf.sgot->contents + h->got.offset);
4412 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4413 rela.r_addend = 0;
4414 }
4415
4416 elf_append_rela (output_bfd, relgot, &rela);
4417 }
4418
4419 if (h->needs_copy)
4420 {
4421 Elf_Internal_Rela rela;
4422 asection *s;
4423
4424 /* This symbol needs a copy reloc. Set it up. */
4425 VERIFY_COPY_RELOC (h, htab)
4426
4427 rela.r_offset = (h->root.u.def.value
4428 + h->root.u.def.section->output_section->vma
4429 + h->root.u.def.section->output_offset);
4430 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4431 rela.r_addend = 0;
4432 if (h->root.u.def.section == htab->elf.sdynrelro)
4433 s = htab->elf.sreldynrelro;
4434 else
4435 s = htab->elf.srelbss;
4436 elf_append_rela (output_bfd, s, &rela);
4437 }
4438
4439 return TRUE;
4440 }
4441
4442 /* Finish up local dynamic symbol handling. We set the contents of
4443 various dynamic sections here. */
4444
4445 static bfd_boolean
4446 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4447 {
4448 struct elf_link_hash_entry *h
4449 = (struct elf_link_hash_entry *) *slot;
4450 struct bfd_link_info *info
4451 = (struct bfd_link_info *) inf;
4452
4453 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4454 info, h, NULL);
4455 }
4456
4457 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4458 here since undefined weak symbol may not be dynamic and may not be
4459 called for elf_x86_64_finish_dynamic_symbol. */
4460
4461 static bfd_boolean
4462 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4463 void *inf)
4464 {
4465 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4466 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4467
4468 if (h->root.type != bfd_link_hash_undefweak
4469 || h->dynindx != -1)
4470 return TRUE;
4471
4472 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4473 info, h, NULL);
4474 }
4475
4476 /* Used to decide how to sort relocs in an optimal manner for the
4477 dynamic linker, before writing them out. */
4478
4479 static enum elf_reloc_type_class
4480 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4481 const asection *rel_sec ATTRIBUTE_UNUSED,
4482 const Elf_Internal_Rela *rela)
4483 {
4484 bfd *abfd = info->output_bfd;
4485 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4486 struct elf_x86_link_hash_table *htab
4487 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4488
4489 if (htab->elf.dynsym != NULL
4490 && htab->elf.dynsym->contents != NULL)
4491 {
4492 /* Check relocation against STT_GNU_IFUNC symbol if there are
4493 dynamic symbols. */
4494 unsigned long r_symndx = htab->r_sym (rela->r_info);
4495 if (r_symndx != STN_UNDEF)
4496 {
4497 Elf_Internal_Sym sym;
4498 if (!bed->s->swap_symbol_in (abfd,
4499 (htab->elf.dynsym->contents
4500 + r_symndx * bed->s->sizeof_sym),
4501 0, &sym))
4502 abort ();
4503
4504 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4505 return reloc_class_ifunc;
4506 }
4507 }
4508
4509 switch ((int) ELF32_R_TYPE (rela->r_info))
4510 {
4511 case R_X86_64_IRELATIVE:
4512 return reloc_class_ifunc;
4513 case R_X86_64_RELATIVE:
4514 case R_X86_64_RELATIVE64:
4515 return reloc_class_relative;
4516 case R_X86_64_JUMP_SLOT:
4517 return reloc_class_plt;
4518 case R_X86_64_COPY:
4519 return reloc_class_copy;
4520 default:
4521 return reloc_class_normal;
4522 }
4523 }
4524
4525 /* Finish up the dynamic sections. */
4526
4527 static bfd_boolean
4528 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4529 struct bfd_link_info *info)
4530 {
4531 struct elf_x86_link_hash_table *htab;
4532
4533 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4534 if (htab == NULL)
4535 return FALSE;
4536
4537 if (! htab->elf.dynamic_sections_created)
4538 return TRUE;
4539
4540 if (htab->elf.splt && htab->elf.splt->size > 0)
4541 {
4542 elf_section_data (htab->elf.splt->output_section)
4543 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4544
4545 if (htab->plt.has_plt0)
4546 {
4547 /* Fill in the special first entry in the procedure linkage
4548 table. */
4549 memcpy (htab->elf.splt->contents,
4550 htab->lazy_plt->plt0_entry,
4551 htab->lazy_plt->plt0_entry_size);
4552 /* Add offset for pushq GOT+8(%rip), since the instruction
4553 uses 6 bytes subtract this value. */
4554 bfd_put_32 (output_bfd,
4555 (htab->elf.sgotplt->output_section->vma
4556 + htab->elf.sgotplt->output_offset
4557 + 8
4558 - htab->elf.splt->output_section->vma
4559 - htab->elf.splt->output_offset
4560 - 6),
4561 (htab->elf.splt->contents
4562 + htab->lazy_plt->plt0_got1_offset));
4563 /* Add offset for the PC-relative instruction accessing
4564 GOT+16, subtracting the offset to the end of that
4565 instruction. */
4566 bfd_put_32 (output_bfd,
4567 (htab->elf.sgotplt->output_section->vma
4568 + htab->elf.sgotplt->output_offset
4569 + 16
4570 - htab->elf.splt->output_section->vma
4571 - htab->elf.splt->output_offset
4572 - htab->lazy_plt->plt0_got2_insn_end),
4573 (htab->elf.splt->contents
4574 + htab->lazy_plt->plt0_got2_offset));
4575 }
4576
4577 if (htab->tlsdesc_plt)
4578 {
4579 bfd_put_64 (output_bfd, (bfd_vma) 0,
4580 htab->elf.sgot->contents + htab->tlsdesc_got);
4581
4582 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4583 htab->lazy_plt->plt_tlsdesc_entry,
4584 htab->lazy_plt->plt_tlsdesc_entry_size);
4585
4586 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4587 bytes and the instruction uses 6 bytes, subtract these
4588 values. */
4589 bfd_put_32 (output_bfd,
4590 (htab->elf.sgotplt->output_section->vma
4591 + htab->elf.sgotplt->output_offset
4592 + 8
4593 - htab->elf.splt->output_section->vma
4594 - htab->elf.splt->output_offset
4595 - htab->tlsdesc_plt
4596 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4597 (htab->elf.splt->contents
4598 + htab->tlsdesc_plt
4599 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4600 /* Add offset for indirect branch via GOT+TDG, where TDG
4601 stands for htab->tlsdesc_got, subtracting the offset
4602 to the end of that instruction. */
4603 bfd_put_32 (output_bfd,
4604 (htab->elf.sgot->output_section->vma
4605 + htab->elf.sgot->output_offset
4606 + htab->tlsdesc_got
4607 - htab->elf.splt->output_section->vma
4608 - htab->elf.splt->output_offset
4609 - htab->tlsdesc_plt
4610 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4611 (htab->elf.splt->contents
4612 + htab->tlsdesc_plt
4613 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4614 }
4615 }
4616
4617 /* Fill PLT entries for undefined weak symbols in PIE. */
4618 if (bfd_link_pie (info))
4619 bfd_hash_traverse (&info->hash->table,
4620 elf_x86_64_pie_finish_undefweak_symbol,
4621 info);
4622
4623 return TRUE;
4624 }
4625
4626 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4627 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4628 It has to be done before elf_link_sort_relocs is called so that
4629 dynamic relocations are properly sorted. */
4630
4631 static bfd_boolean
4632 elf_x86_64_output_arch_local_syms
4633 (bfd *output_bfd ATTRIBUTE_UNUSED,
4634 struct bfd_link_info *info,
4635 void *flaginfo ATTRIBUTE_UNUSED,
4636 int (*func) (void *, const char *,
4637 Elf_Internal_Sym *,
4638 asection *,
4639 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4640 {
4641 struct elf_x86_link_hash_table *htab
4642 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4643 if (htab == NULL)
4644 return FALSE;
4645
4646 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4647 htab_traverse (htab->loc_hash_table,
4648 elf_x86_64_finish_local_dynamic_symbol,
4649 info);
4650
4651 return TRUE;
4652 }
4653
4654 /* Forward declaration. */
4655 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4656
4657 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4658 dynamic relocations. */
4659
4660 static long
4661 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4662 long symcount ATTRIBUTE_UNUSED,
4663 asymbol **syms ATTRIBUTE_UNUSED,
4664 long dynsymcount,
4665 asymbol **dynsyms,
4666 asymbol **ret)
4667 {
4668 long count, i, n;
4669 int j;
4670 bfd_byte *plt_contents;
4671 long relsize;
4672 const struct elf_x86_lazy_plt_layout *lazy_plt;
4673 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4674 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4675 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4676 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4677 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4678 asection *plt;
4679 enum elf_x86_plt_type plt_type;
4680 struct elf_x86_plt plts[] =
4681 {
4682 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4683 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4684 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4685 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4686 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4687 };
4688
4689 *ret = NULL;
4690
4691 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4692 return 0;
4693
4694 if (dynsymcount <= 0)
4695 return 0;
4696
4697 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4698 if (relsize <= 0)
4699 return -1;
4700
4701 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4702 {
4703 lazy_plt = &elf_x86_64_lazy_plt;
4704 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4705 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4706 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4707 if (ABI_64_P (abfd))
4708 {
4709 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4710 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4711 }
4712 else
4713 {
4714 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4715 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4716 }
4717 }
4718 else
4719 {
4720 lazy_plt = &elf_x86_64_nacl_plt;
4721 non_lazy_plt = NULL;
4722 lazy_bnd_plt = NULL;
4723 non_lazy_bnd_plt = NULL;
4724 lazy_ibt_plt = NULL;
4725 non_lazy_ibt_plt = NULL;
4726 }
4727
4728 count = 0;
4729 for (j = 0; plts[j].name != NULL; j++)
4730 {
4731 plt = bfd_get_section_by_name (abfd, plts[j].name);
4732 if (plt == NULL || plt->size == 0)
4733 continue;
4734
4735 /* Get the PLT section contents. */
4736 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4737 if (plt_contents == NULL)
4738 break;
4739 if (!bfd_get_section_contents (abfd, (asection *) plt,
4740 plt_contents, 0, plt->size))
4741 {
4742 free (plt_contents);
4743 break;
4744 }
4745
4746 /* Check what kind of PLT it is. */
4747 plt_type = plt_unknown;
4748 if (plts[j].type == plt_unknown
4749 && (plt->size >= (lazy_plt->plt_entry_size
4750 + lazy_plt->plt_entry_size)))
4751 {
4752 /* Match lazy PLT first. Need to check the first two
4753 instructions. */
4754 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4755 lazy_plt->plt0_got1_offset) == 0)
4756 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4757 2) == 0))
4758 plt_type = plt_lazy;
4759 else if (lazy_bnd_plt != NULL
4760 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4761 lazy_bnd_plt->plt0_got1_offset) == 0)
4762 && (memcmp (plt_contents + 6,
4763 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4764 {
4765 plt_type = plt_lazy | plt_second;
4766 /* The fist entry in the lazy IBT PLT is the same as the
4767 lazy BND PLT. */
4768 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4769 lazy_ibt_plt->plt_entry,
4770 lazy_ibt_plt->plt_got_offset) == 0))
4771 lazy_plt = lazy_ibt_plt;
4772 else
4773 lazy_plt = lazy_bnd_plt;
4774 }
4775 }
4776
4777 if (non_lazy_plt != NULL
4778 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4779 && plt->size >= non_lazy_plt->plt_entry_size)
4780 {
4781 /* Match non-lazy PLT. */
4782 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4783 non_lazy_plt->plt_got_offset) == 0)
4784 plt_type = plt_non_lazy;
4785 }
4786
4787 if (plt_type == plt_unknown || plt_type == plt_second)
4788 {
4789 if (non_lazy_bnd_plt != NULL
4790 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4791 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4792 non_lazy_bnd_plt->plt_got_offset) == 0))
4793 {
4794 /* Match BND PLT. */
4795 plt_type = plt_second;
4796 non_lazy_plt = non_lazy_bnd_plt;
4797 }
4798 else if (non_lazy_ibt_plt != NULL
4799 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4800 && (memcmp (plt_contents,
4801 non_lazy_ibt_plt->plt_entry,
4802 non_lazy_ibt_plt->plt_got_offset) == 0))
4803 {
4804 /* Match IBT PLT. */
4805 plt_type = plt_second;
4806 non_lazy_plt = non_lazy_ibt_plt;
4807 }
4808 }
4809
4810 if (plt_type == plt_unknown)
4811 {
4812 free (plt_contents);
4813 continue;
4814 }
4815
4816 plts[j].sec = plt;
4817 plts[j].type = plt_type;
4818
4819 if ((plt_type & plt_lazy))
4820 {
4821 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4822 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4823 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4824 /* Skip PLT0 in lazy PLT. */
4825 i = 1;
4826 }
4827 else
4828 {
4829 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4830 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4831 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4832 i = 0;
4833 }
4834
4835 /* Skip lazy PLT when the second PLT is used. */
4836 if (plt_type == (plt_lazy | plt_second))
4837 plts[j].count = 0;
4838 else
4839 {
4840 n = plt->size / plts[j].plt_entry_size;
4841 plts[j].count = n;
4842 count += n - i;
4843 }
4844
4845 plts[j].contents = plt_contents;
4846 }
4847
4848 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4849 (bfd_vma) 0, plts, dynsyms,
4850 ret);
4851 }
4852
4853 /* Handle an x86-64 specific section when reading an object file. This
4854 is called when elfcode.h finds a section with an unknown type. */
4855
4856 static bfd_boolean
4857 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4858 const char *name, int shindex)
4859 {
4860 if (hdr->sh_type != SHT_X86_64_UNWIND)
4861 return FALSE;
4862
4863 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4864 return FALSE;
4865
4866 return TRUE;
4867 }
4868
4869 /* Hook called by the linker routine which adds symbols from an object
4870 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4871 of .bss. */
4872
4873 static bfd_boolean
4874 elf_x86_64_add_symbol_hook (bfd *abfd,
4875 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4876 Elf_Internal_Sym *sym,
4877 const char **namep ATTRIBUTE_UNUSED,
4878 flagword *flagsp ATTRIBUTE_UNUSED,
4879 asection **secp,
4880 bfd_vma *valp)
4881 {
4882 asection *lcomm;
4883
4884 switch (sym->st_shndx)
4885 {
4886 case SHN_X86_64_LCOMMON:
4887 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4888 if (lcomm == NULL)
4889 {
4890 lcomm = bfd_make_section_with_flags (abfd,
4891 "LARGE_COMMON",
4892 (SEC_ALLOC
4893 | SEC_IS_COMMON
4894 | SEC_LINKER_CREATED));
4895 if (lcomm == NULL)
4896 return FALSE;
4897 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4898 }
4899 *secp = lcomm;
4900 *valp = sym->st_size;
4901 return TRUE;
4902 }
4903
4904 return TRUE;
4905 }
4906
4907
4908 /* Given a BFD section, try to locate the corresponding ELF section
4909 index. */
4910
4911 static bfd_boolean
4912 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4913 asection *sec, int *index_return)
4914 {
4915 if (sec == &_bfd_elf_large_com_section)
4916 {
4917 *index_return = SHN_X86_64_LCOMMON;
4918 return TRUE;
4919 }
4920 return FALSE;
4921 }
4922
4923 /* Process a symbol. */
4924
4925 static void
4926 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4927 asymbol *asym)
4928 {
4929 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4930
4931 switch (elfsym->internal_elf_sym.st_shndx)
4932 {
4933 case SHN_X86_64_LCOMMON:
4934 asym->section = &_bfd_elf_large_com_section;
4935 asym->value = elfsym->internal_elf_sym.st_size;
4936 /* Common symbol doesn't set BSF_GLOBAL. */
4937 asym->flags &= ~BSF_GLOBAL;
4938 break;
4939 }
4940 }
4941
4942 static bfd_boolean
4943 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4944 {
4945 return (sym->st_shndx == SHN_COMMON
4946 || sym->st_shndx == SHN_X86_64_LCOMMON);
4947 }
4948
4949 static unsigned int
4950 elf_x86_64_common_section_index (asection *sec)
4951 {
4952 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4953 return SHN_COMMON;
4954 else
4955 return SHN_X86_64_LCOMMON;
4956 }
4957
4958 static asection *
4959 elf_x86_64_common_section (asection *sec)
4960 {
4961 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4962 return bfd_com_section_ptr;
4963 else
4964 return &_bfd_elf_large_com_section;
4965 }
4966
4967 static bfd_boolean
4968 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4969 const Elf_Internal_Sym *sym,
4970 asection **psec,
4971 bfd_boolean newdef,
4972 bfd_boolean olddef,
4973 bfd *oldbfd,
4974 const asection *oldsec)
4975 {
4976 /* A normal common symbol and a large common symbol result in a
4977 normal common symbol. We turn the large common symbol into a
4978 normal one. */
4979 if (!olddef
4980 && h->root.type == bfd_link_hash_common
4981 && !newdef
4982 && bfd_is_com_section (*psec)
4983 && oldsec != *psec)
4984 {
4985 if (sym->st_shndx == SHN_COMMON
4986 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4987 {
4988 h->root.u.c.p->section
4989 = bfd_make_section_old_way (oldbfd, "COMMON");
4990 h->root.u.c.p->section->flags = SEC_ALLOC;
4991 }
4992 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4993 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4994 *psec = bfd_com_section_ptr;
4995 }
4996
4997 return TRUE;
4998 }
4999
5000 static int
5001 elf_x86_64_additional_program_headers (bfd *abfd,
5002 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5003 {
5004 asection *s;
5005 int count = 0;
5006
5007 /* Check to see if we need a large readonly segment. */
5008 s = bfd_get_section_by_name (abfd, ".lrodata");
5009 if (s && (s->flags & SEC_LOAD))
5010 count++;
5011
5012 /* Check to see if we need a large data segment. Since .lbss sections
5013 is placed right after the .bss section, there should be no need for
5014 a large data segment just because of .lbss. */
5015 s = bfd_get_section_by_name (abfd, ".ldata");
5016 if (s && (s->flags & SEC_LOAD))
5017 count++;
5018
5019 return count;
5020 }
5021
5022 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5023
5024 static bfd_boolean
5025 elf_x86_64_relocs_compatible (const bfd_target *input,
5026 const bfd_target *output)
5027 {
5028 return ((xvec_get_elf_backend_data (input)->s->elfclass
5029 == xvec_get_elf_backend_data (output)->s->elfclass)
5030 && _bfd_elf_relocs_compatible (input, output));
5031 }
5032
5033 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5034 with GNU properties if found. Otherwise, return NULL. */
5035
5036 static bfd *
5037 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5038 {
5039 struct elf_x86_init_table init_table;
5040
5041 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5042 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5043 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5044 != (int) R_X86_64_GNU_VTINHERIT)
5045 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5046 != (int) R_X86_64_GNU_VTENTRY))
5047 abort ();
5048
5049 /* This is unused for x86-64. */
5050 init_table.plt0_pad_byte = 0x90;
5051
5052 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5053 {
5054 const struct elf_backend_data *bed
5055 = get_elf_backend_data (info->output_bfd);
5056 struct elf_x86_link_hash_table *htab
5057 = elf_x86_hash_table (info, bed->target_id);
5058 if (!htab)
5059 abort ();
5060 if (htab->params->bndplt)
5061 {
5062 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5063 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5064 }
5065 else
5066 {
5067 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5068 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5069 }
5070
5071 if (ABI_64_P (info->output_bfd))
5072 {
5073 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5074 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5075 }
5076 else
5077 {
5078 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5079 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5080 }
5081 }
5082 else
5083 {
5084 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5085 init_table.non_lazy_plt = NULL;
5086 init_table.lazy_ibt_plt = NULL;
5087 init_table.non_lazy_ibt_plt = NULL;
5088 }
5089
5090 if (ABI_64_P (info->output_bfd))
5091 {
5092 init_table.r_info = elf64_r_info;
5093 init_table.r_sym = elf64_r_sym;
5094 }
5095 else
5096 {
5097 init_table.r_info = elf32_r_info;
5098 init_table.r_sym = elf32_r_sym;
5099 }
5100
5101 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5102 }
5103
5104 static const struct bfd_elf_special_section
5105 elf_x86_64_special_sections[]=
5106 {
5107 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5108 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5109 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5110 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5111 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5112 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5113 { NULL, 0, 0, 0, 0 }
5114 };
5115
5116 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5117 #define TARGET_LITTLE_NAME "elf64-x86-64"
5118 #define ELF_ARCH bfd_arch_i386
5119 #define ELF_TARGET_ID X86_64_ELF_DATA
5120 #define ELF_MACHINE_CODE EM_X86_64
5121 #if DEFAULT_LD_Z_SEPARATE_CODE
5122 # define ELF_MAXPAGESIZE 0x1000
5123 #else
5124 # define ELF_MAXPAGESIZE 0x200000
5125 #endif
5126 #define ELF_MINPAGESIZE 0x1000
5127 #define ELF_COMMONPAGESIZE 0x1000
5128
5129 #define elf_backend_can_gc_sections 1
5130 #define elf_backend_can_refcount 1
5131 #define elf_backend_want_got_plt 1
5132 #define elf_backend_plt_readonly 1
5133 #define elf_backend_want_plt_sym 0
5134 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5135 #define elf_backend_rela_normal 1
5136 #define elf_backend_plt_alignment 4
5137 #define elf_backend_extern_protected_data 1
5138 #define elf_backend_caches_rawsize 1
5139 #define elf_backend_dtrel_excludes_plt 1
5140 #define elf_backend_want_dynrelro 1
5141
5142 #define elf_info_to_howto elf_x86_64_info_to_howto
5143
5144 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5145 #define bfd_elf64_bfd_reloc_name_lookup \
5146 elf_x86_64_reloc_name_lookup
5147
5148 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5149 #define elf_backend_check_relocs elf_x86_64_check_relocs
5150 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5151 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5152 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5153 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5154 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5155 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5156 #ifdef CORE_HEADER
5157 #define elf_backend_write_core_note elf_x86_64_write_core_note
5158 #endif
5159 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5160 #define elf_backend_relocate_section elf_x86_64_relocate_section
5161 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5162 #define elf_backend_object_p elf64_x86_64_elf_object_p
5163 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5164
5165 #define elf_backend_section_from_shdr \
5166 elf_x86_64_section_from_shdr
5167
5168 #define elf_backend_section_from_bfd_section \
5169 elf_x86_64_elf_section_from_bfd_section
5170 #define elf_backend_add_symbol_hook \
5171 elf_x86_64_add_symbol_hook
5172 #define elf_backend_symbol_processing \
5173 elf_x86_64_symbol_processing
5174 #define elf_backend_common_section_index \
5175 elf_x86_64_common_section_index
5176 #define elf_backend_common_section \
5177 elf_x86_64_common_section
5178 #define elf_backend_common_definition \
5179 elf_x86_64_common_definition
5180 #define elf_backend_merge_symbol \
5181 elf_x86_64_merge_symbol
5182 #define elf_backend_special_sections \
5183 elf_x86_64_special_sections
5184 #define elf_backend_additional_program_headers \
5185 elf_x86_64_additional_program_headers
5186 #define elf_backend_setup_gnu_properties \
5187 elf_x86_64_link_setup_gnu_properties
5188 #define elf_backend_hide_symbol \
5189 _bfd_x86_elf_hide_symbol
5190
5191 #undef elf64_bed
5192 #define elf64_bed elf64_x86_64_bed
5193
5194 #include "elf64-target.h"
5195
5196 /* CloudABI support. */
5197
5198 #undef TARGET_LITTLE_SYM
5199 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5200 #undef TARGET_LITTLE_NAME
5201 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5202
5203 #undef ELF_OSABI
5204 #define ELF_OSABI ELFOSABI_CLOUDABI
5205
5206 #undef elf64_bed
5207 #define elf64_bed elf64_x86_64_cloudabi_bed
5208
5209 #include "elf64-target.h"
5210
5211 /* FreeBSD support. */
5212
5213 #undef TARGET_LITTLE_SYM
5214 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5215 #undef TARGET_LITTLE_NAME
5216 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5217
5218 #undef ELF_OSABI
5219 #define ELF_OSABI ELFOSABI_FREEBSD
5220
5221 #undef elf64_bed
5222 #define elf64_bed elf64_x86_64_fbsd_bed
5223
5224 #include "elf64-target.h"
5225
5226 /* Solaris 2 support. */
5227
5228 #undef TARGET_LITTLE_SYM
5229 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5230 #undef TARGET_LITTLE_NAME
5231 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5232
5233 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5234 {
5235 is_solaris /* os */
5236 };
5237
5238 #undef elf_backend_arch_data
5239 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5240
5241 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5242 objects won't be recognized. */
5243 #undef ELF_OSABI
5244
5245 #undef elf64_bed
5246 #define elf64_bed elf64_x86_64_sol2_bed
5247
5248 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5249 boundary. */
5250 #undef elf_backend_static_tls_alignment
5251 #define elf_backend_static_tls_alignment 16
5252
5253 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5254
5255 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5256 File, p.63. */
5257 #undef elf_backend_want_plt_sym
5258 #define elf_backend_want_plt_sym 1
5259
5260 #undef elf_backend_strtab_flags
5261 #define elf_backend_strtab_flags SHF_STRINGS
5262
5263 static bfd_boolean
5264 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5265 bfd *obfd ATTRIBUTE_UNUSED,
5266 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5267 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5268 {
5269 /* PR 19938: FIXME: Need to add code for setting the sh_info
5270 and sh_link fields of Solaris specific section types. */
5271 return FALSE;
5272 }
5273
5274 #undef elf_backend_copy_special_section_fields
5275 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5276
5277 #include "elf64-target.h"
5278
5279 /* Native Client support. */
5280
5281 static bfd_boolean
5282 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5283 {
5284 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5285 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5286 return TRUE;
5287 }
5288
5289 #undef TARGET_LITTLE_SYM
5290 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5291 #undef TARGET_LITTLE_NAME
5292 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5293 #undef elf64_bed
5294 #define elf64_bed elf64_x86_64_nacl_bed
5295
5296 #undef ELF_MAXPAGESIZE
5297 #undef ELF_MINPAGESIZE
5298 #undef ELF_COMMONPAGESIZE
5299 #define ELF_MAXPAGESIZE 0x10000
5300 #define ELF_MINPAGESIZE 0x10000
5301 #define ELF_COMMONPAGESIZE 0x10000
5302
5303 /* Restore defaults. */
5304 #undef ELF_OSABI
5305 #undef elf_backend_static_tls_alignment
5306 #undef elf_backend_want_plt_sym
5307 #define elf_backend_want_plt_sym 0
5308 #undef elf_backend_strtab_flags
5309 #undef elf_backend_copy_special_section_fields
5310
5311 /* NaCl uses substantially different PLT entries for the same effects. */
5312
5313 #undef elf_backend_plt_alignment
5314 #define elf_backend_plt_alignment 5
5315 #define NACL_PLT_ENTRY_SIZE 64
5316 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5317
5318 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5319 {
5320 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5321 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5322 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5323 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5324 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5325
5326 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5327 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5328
5329 /* 32 bytes of nop to pad out to the standard size. */
5330 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5331 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5332 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5333 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5334 0x66, /* excess data16 prefix */
5335 0x90 /* nop */
5336 };
5337
5338 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5339 {
5340 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5341 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5342 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5343 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5344
5345 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5346 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5347 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5348
5349 /* Lazy GOT entries point here (32-byte aligned). */
5350 0x68, /* pushq immediate */
5351 0, 0, 0, 0, /* replaced with index into relocation table. */
5352 0xe9, /* jmp relative */
5353 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5354
5355 /* 22 bytes of nop to pad out to the standard size. */
5356 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5357 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5358 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5359 };
5360
5361 /* .eh_frame covering the .plt section. */
5362
5363 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5364 {
5365 #if (PLT_CIE_LENGTH != 20 \
5366 || PLT_FDE_LENGTH != 36 \
5367 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5368 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5369 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5370 #endif
5371 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5372 0, 0, 0, 0, /* CIE ID */
5373 1, /* CIE version */
5374 'z', 'R', 0, /* Augmentation string */
5375 1, /* Code alignment factor */
5376 0x78, /* Data alignment factor */
5377 16, /* Return address column */
5378 1, /* Augmentation size */
5379 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5380 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5381 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5382 DW_CFA_nop, DW_CFA_nop,
5383
5384 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5385 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5386 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5387 0, 0, 0, 0, /* .plt size goes here */
5388 0, /* Augmentation size */
5389 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5390 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5391 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5392 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5393 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5394 13, /* Block length */
5395 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5396 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5397 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5398 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5399 DW_CFA_nop, DW_CFA_nop
5400 };
5401
5402 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5403 {
5404 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5405 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5406 elf_x86_64_nacl_plt_entry, /* plt_entry */
5407 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5408 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5409 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5410 2, /* plt_tlsdesc_got1_offset */
5411 9, /* plt_tlsdesc_got2_offset */
5412 6, /* plt_tlsdesc_got1_insn_end */
5413 13, /* plt_tlsdesc_got2_insn_end */
5414 2, /* plt0_got1_offset */
5415 9, /* plt0_got2_offset */
5416 13, /* plt0_got2_insn_end */
5417 3, /* plt_got_offset */
5418 33, /* plt_reloc_offset */
5419 38, /* plt_plt_offset */
5420 7, /* plt_got_insn_size */
5421 42, /* plt_plt_insn_end */
5422 32, /* plt_lazy_offset */
5423 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5424 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5425 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5426 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5427 };
5428
5429 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5430 {
5431 is_nacl /* os */
5432 };
5433
5434 #undef elf_backend_arch_data
5435 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5436
5437 #undef elf_backend_object_p
5438 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5439 #undef elf_backend_modify_segment_map
5440 #define elf_backend_modify_segment_map nacl_modify_segment_map
5441 #undef elf_backend_modify_program_headers
5442 #define elf_backend_modify_program_headers nacl_modify_program_headers
5443 #undef elf_backend_final_write_processing
5444 #define elf_backend_final_write_processing nacl_final_write_processing
5445
5446 #include "elf64-target.h"
5447
5448 /* Native Client x32 support. */
5449
5450 static bfd_boolean
5451 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5452 {
5453 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5454 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5455 return TRUE;
5456 }
5457
5458 #undef TARGET_LITTLE_SYM
5459 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5460 #undef TARGET_LITTLE_NAME
5461 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5462 #undef elf32_bed
5463 #define elf32_bed elf32_x86_64_nacl_bed
5464
5465 #define bfd_elf32_bfd_reloc_type_lookup \
5466 elf_x86_64_reloc_type_lookup
5467 #define bfd_elf32_bfd_reloc_name_lookup \
5468 elf_x86_64_reloc_name_lookup
5469 #define bfd_elf32_get_synthetic_symtab \
5470 elf_x86_64_get_synthetic_symtab
5471
5472 #undef elf_backend_object_p
5473 #define elf_backend_object_p \
5474 elf32_x86_64_nacl_elf_object_p
5475
5476 #undef elf_backend_bfd_from_remote_memory
5477 #define elf_backend_bfd_from_remote_memory \
5478 _bfd_elf32_bfd_from_remote_memory
5479
5480 #undef elf_backend_size_info
5481 #define elf_backend_size_info \
5482 _bfd_elf32_size_info
5483
5484 #undef elf32_bed
5485 #define elf32_bed elf32_x86_64_bed
5486
5487 #include "elf32-target.h"
5488
5489 /* Restore defaults. */
5490 #undef elf_backend_object_p
5491 #define elf_backend_object_p elf64_x86_64_elf_object_p
5492 #undef elf_backend_bfd_from_remote_memory
5493 #undef elf_backend_size_info
5494 #undef elf_backend_modify_segment_map
5495 #undef elf_backend_modify_program_headers
5496 #undef elf_backend_final_write_processing
5497
5498 /* Intel L1OM support. */
5499
5500 static bfd_boolean
5501 elf64_l1om_elf_object_p (bfd *abfd)
5502 {
5503 /* Set the right machine number for an L1OM elf64 file. */
5504 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5505 return TRUE;
5506 }
5507
5508 #undef TARGET_LITTLE_SYM
5509 #define TARGET_LITTLE_SYM l1om_elf64_vec
5510 #undef TARGET_LITTLE_NAME
5511 #define TARGET_LITTLE_NAME "elf64-l1om"
5512 #undef ELF_ARCH
5513 #define ELF_ARCH bfd_arch_l1om
5514
5515 #undef ELF_MACHINE_CODE
5516 #define ELF_MACHINE_CODE EM_L1OM
5517
5518 #undef ELF_OSABI
5519
5520 #undef elf64_bed
5521 #define elf64_bed elf64_l1om_bed
5522
5523 #undef elf_backend_object_p
5524 #define elf_backend_object_p elf64_l1om_elf_object_p
5525
5526 /* Restore defaults. */
5527 #undef ELF_MAXPAGESIZE
5528 #undef ELF_MINPAGESIZE
5529 #undef ELF_COMMONPAGESIZE
5530 #if DEFAULT_LD_Z_SEPARATE_CODE
5531 # define ELF_MAXPAGESIZE 0x1000
5532 #else
5533 # define ELF_MAXPAGESIZE 0x200000
5534 #endif
5535 #define ELF_MINPAGESIZE 0x1000
5536 #define ELF_COMMONPAGESIZE 0x1000
5537 #undef elf_backend_plt_alignment
5538 #define elf_backend_plt_alignment 4
5539 #undef elf_backend_arch_data
5540 #define elf_backend_arch_data &elf_x86_64_arch_bed
5541
5542 #include "elf64-target.h"
5543
5544 /* FreeBSD L1OM support. */
5545
5546 #undef TARGET_LITTLE_SYM
5547 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5548 #undef TARGET_LITTLE_NAME
5549 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5550
5551 #undef ELF_OSABI
5552 #define ELF_OSABI ELFOSABI_FREEBSD
5553
5554 #undef elf64_bed
5555 #define elf64_bed elf64_l1om_fbsd_bed
5556
5557 #include "elf64-target.h"
5558
5559 /* Intel K1OM support. */
5560
5561 static bfd_boolean
5562 elf64_k1om_elf_object_p (bfd *abfd)
5563 {
5564 /* Set the right machine number for an K1OM elf64 file. */
5565 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5566 return TRUE;
5567 }
5568
5569 #undef TARGET_LITTLE_SYM
5570 #define TARGET_LITTLE_SYM k1om_elf64_vec
5571 #undef TARGET_LITTLE_NAME
5572 #define TARGET_LITTLE_NAME "elf64-k1om"
5573 #undef ELF_ARCH
5574 #define ELF_ARCH bfd_arch_k1om
5575
5576 #undef ELF_MACHINE_CODE
5577 #define ELF_MACHINE_CODE EM_K1OM
5578
5579 #undef ELF_OSABI
5580
5581 #undef elf64_bed
5582 #define elf64_bed elf64_k1om_bed
5583
5584 #undef elf_backend_object_p
5585 #define elf_backend_object_p elf64_k1om_elf_object_p
5586
5587 #undef elf_backend_static_tls_alignment
5588
5589 #undef elf_backend_want_plt_sym
5590 #define elf_backend_want_plt_sym 0
5591
5592 #include "elf64-target.h"
5593
5594 /* FreeBSD K1OM support. */
5595
5596 #undef TARGET_LITTLE_SYM
5597 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5598 #undef TARGET_LITTLE_NAME
5599 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5600
5601 #undef ELF_OSABI
5602 #define ELF_OSABI ELFOSABI_FREEBSD
5603
5604 #undef elf64_bed
5605 #define elf64_bed elf64_k1om_fbsd_bed
5606
5607 #include "elf64-target.h"
5608
5609 /* 32bit x86-64 support. */
5610
5611 #undef TARGET_LITTLE_SYM
5612 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5613 #undef TARGET_LITTLE_NAME
5614 #define TARGET_LITTLE_NAME "elf32-x86-64"
5615 #undef elf32_bed
5616
5617 #undef ELF_ARCH
5618 #define ELF_ARCH bfd_arch_i386
5619
5620 #undef ELF_MACHINE_CODE
5621 #define ELF_MACHINE_CODE EM_X86_64
5622
5623 #undef ELF_OSABI
5624
5625 #undef elf_backend_object_p
5626 #define elf_backend_object_p \
5627 elf32_x86_64_elf_object_p
5628
5629 #undef elf_backend_bfd_from_remote_memory
5630 #define elf_backend_bfd_from_remote_memory \
5631 _bfd_elf32_bfd_from_remote_memory
5632
5633 #undef elf_backend_size_info
5634 #define elf_backend_size_info \
5635 _bfd_elf32_size_info
5636
5637 #include "elf32-target.h"
This page took 0.176214 seconds and 4 git commands to generate.