bfd_size_type to size_t
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2020 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1227 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1228
1229 Make sure it's a leaq adding rip to a 32-bit offset
1230 into any register, although it's probably almost always
1231 going to be rax. */
1232
1233 if (offset < 3 || (offset + 4) > sec->size)
1234 return FALSE;
1235
1236 val = bfd_get_8 (abfd, contents + offset - 3);
1237 val &= 0xfb;
1238 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1239 return FALSE;
1240
1241 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1242 return FALSE;
1243
1244 val = bfd_get_8 (abfd, contents + offset - 1);
1245 return (val & 0xc7) == 0x05;
1246
1247 case R_X86_64_TLSDESC_CALL:
1248 /* Check transition from GDesc access model:
1249 call *x@tlsdesc(%rax) <--- LP64 mode.
1250 call *x@tlsdesc(%eax) <--- X32 mode.
1251 */
1252 if (offset + 2 <= sec->size)
1253 {
1254 unsigned int prefix;
1255 call = contents + offset;
1256 prefix = 0;
1257 if (!ABI_64_P (abfd))
1258 {
1259 /* Check for call *x@tlsdesc(%eax). */
1260 if (call[0] == 0x67)
1261 {
1262 prefix = 1;
1263 if (offset + 3 > sec->size)
1264 return FALSE;
1265 }
1266 }
1267 /* Make sure that it's a call *x@tlsdesc(%rax). */
1268 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1269 }
1270
1271 return FALSE;
1272
1273 default:
1274 abort ();
1275 }
1276 }
1277
1278 /* Return TRUE if the TLS access transition is OK or no transition
1279 will be performed. Update R_TYPE if there is a transition. */
1280
1281 static bfd_boolean
1282 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1283 asection *sec, bfd_byte *contents,
1284 Elf_Internal_Shdr *symtab_hdr,
1285 struct elf_link_hash_entry **sym_hashes,
1286 unsigned int *r_type, int tls_type,
1287 const Elf_Internal_Rela *rel,
1288 const Elf_Internal_Rela *relend,
1289 struct elf_link_hash_entry *h,
1290 unsigned long r_symndx,
1291 bfd_boolean from_relocate_section)
1292 {
1293 unsigned int from_type = *r_type;
1294 unsigned int to_type = from_type;
1295 bfd_boolean check = TRUE;
1296
1297 /* Skip TLS transition for functions. */
1298 if (h != NULL
1299 && (h->type == STT_FUNC
1300 || h->type == STT_GNU_IFUNC))
1301 return TRUE;
1302
1303 switch (from_type)
1304 {
1305 case R_X86_64_TLSGD:
1306 case R_X86_64_GOTPC32_TLSDESC:
1307 case R_X86_64_TLSDESC_CALL:
1308 case R_X86_64_GOTTPOFF:
1309 if (bfd_link_executable (info))
1310 {
1311 if (h == NULL)
1312 to_type = R_X86_64_TPOFF32;
1313 else
1314 to_type = R_X86_64_GOTTPOFF;
1315 }
1316
1317 /* When we are called from elf_x86_64_relocate_section, there may
1318 be additional transitions based on TLS_TYPE. */
1319 if (from_relocate_section)
1320 {
1321 unsigned int new_to_type = to_type;
1322
1323 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1324 new_to_type = R_X86_64_TPOFF32;
1325
1326 if (to_type == R_X86_64_TLSGD
1327 || to_type == R_X86_64_GOTPC32_TLSDESC
1328 || to_type == R_X86_64_TLSDESC_CALL)
1329 {
1330 if (tls_type == GOT_TLS_IE)
1331 new_to_type = R_X86_64_GOTTPOFF;
1332 }
1333
1334 /* We checked the transition before when we were called from
1335 elf_x86_64_check_relocs. We only want to check the new
1336 transition which hasn't been checked before. */
1337 check = new_to_type != to_type && from_type == to_type;
1338 to_type = new_to_type;
1339 }
1340
1341 break;
1342
1343 case R_X86_64_TLSLD:
1344 if (bfd_link_executable (info))
1345 to_type = R_X86_64_TPOFF32;
1346 break;
1347
1348 default:
1349 return TRUE;
1350 }
1351
1352 /* Return TRUE if there is no transition. */
1353 if (from_type == to_type)
1354 return TRUE;
1355
1356 /* Check if the transition can be performed. */
1357 if (check
1358 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1359 symtab_hdr, sym_hashes,
1360 from_type, rel, relend))
1361 {
1362 reloc_howto_type *from, *to;
1363 const char *name;
1364
1365 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1366 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1367
1368 if (from == NULL || to == NULL)
1369 return FALSE;
1370
1371 if (h)
1372 name = h->root.root.string;
1373 else
1374 {
1375 struct elf_x86_link_hash_table *htab;
1376
1377 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1378 if (htab == NULL)
1379 name = "*unknown*";
1380 else
1381 {
1382 Elf_Internal_Sym *isym;
1383
1384 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1385 abfd, r_symndx);
1386 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1387 }
1388 }
1389
1390 _bfd_error_handler
1391 /* xgettext:c-format */
1392 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1393 " in section `%pA' failed"),
1394 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1395 bfd_set_error (bfd_error_bad_value);
1396 return FALSE;
1397 }
1398
1399 *r_type = to_type;
1400 return TRUE;
1401 }
1402
1403 /* Rename some of the generic section flags to better document how they
1404 are used here. */
1405 #define check_relocs_failed sec_flg0
1406
1407 static bfd_boolean
1408 elf_x86_64_need_pic (struct bfd_link_info *info,
1409 bfd *input_bfd, asection *sec,
1410 struct elf_link_hash_entry *h,
1411 Elf_Internal_Shdr *symtab_hdr,
1412 Elf_Internal_Sym *isym,
1413 reloc_howto_type *howto)
1414 {
1415 const char *v = "";
1416 const char *und = "";
1417 const char *pic = "";
1418 const char *object;
1419
1420 const char *name;
1421 if (h)
1422 {
1423 name = h->root.root.string;
1424 switch (ELF_ST_VISIBILITY (h->other))
1425 {
1426 case STV_HIDDEN:
1427 v = _("hidden symbol ");
1428 break;
1429 case STV_INTERNAL:
1430 v = _("internal symbol ");
1431 break;
1432 case STV_PROTECTED:
1433 v = _("protected symbol ");
1434 break;
1435 default:
1436 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1437 v = _("protected symbol ");
1438 else
1439 v = _("symbol ");
1440 pic = NULL;
1441 break;
1442 }
1443
1444 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1445 und = _("undefined ");
1446 }
1447 else
1448 {
1449 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1450 pic = NULL;
1451 }
1452
1453 if (bfd_link_dll (info))
1454 {
1455 object = _("a shared object");
1456 if (!pic)
1457 pic = _("; recompile with -fPIC");
1458 }
1459 else
1460 {
1461 if (bfd_link_pie (info))
1462 object = _("a PIE object");
1463 else
1464 object = _("a PDE object");
1465 if (!pic)
1466 pic = _("; recompile with -fPIE");
1467 }
1468
1469 /* xgettext:c-format */
1470 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1471 "not be used when making %s%s"),
1472 input_bfd, howto->name, und, v, name,
1473 object, pic);
1474 bfd_set_error (bfd_error_bad_value);
1475 sec->check_relocs_failed = 1;
1476 return FALSE;
1477 }
1478
1479 /* With the local symbol, foo, we convert
1480 mov foo@GOTPCREL(%rip), %reg
1481 to
1482 lea foo(%rip), %reg
1483 and convert
1484 call/jmp *foo@GOTPCREL(%rip)
1485 to
1486 nop call foo/jmp foo nop
1487 When PIC is false, convert
1488 test %reg, foo@GOTPCREL(%rip)
1489 to
1490 test $foo, %reg
1491 and convert
1492 binop foo@GOTPCREL(%rip), %reg
1493 to
1494 binop $foo, %reg
1495 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1496 instructions. */
1497
1498 static bfd_boolean
1499 elf_x86_64_convert_load_reloc (bfd *abfd,
1500 bfd_byte *contents,
1501 unsigned int *r_type_p,
1502 Elf_Internal_Rela *irel,
1503 struct elf_link_hash_entry *h,
1504 bfd_boolean *converted,
1505 struct bfd_link_info *link_info)
1506 {
1507 struct elf_x86_link_hash_table *htab;
1508 bfd_boolean is_pic;
1509 bfd_boolean no_overflow;
1510 bfd_boolean relocx;
1511 bfd_boolean to_reloc_pc32;
1512 asection *tsec;
1513 bfd_signed_vma raddend;
1514 unsigned int opcode;
1515 unsigned int modrm;
1516 unsigned int r_type = *r_type_p;
1517 unsigned int r_symndx;
1518 bfd_vma roff = irel->r_offset;
1519
1520 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1521 return TRUE;
1522
1523 raddend = irel->r_addend;
1524 /* Addend for 32-bit PC-relative relocation must be -4. */
1525 if (raddend != -4)
1526 return TRUE;
1527
1528 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1529 is_pic = bfd_link_pic (link_info);
1530
1531 relocx = (r_type == R_X86_64_GOTPCRELX
1532 || r_type == R_X86_64_REX_GOTPCRELX);
1533
1534 /* TRUE if --no-relax is used. */
1535 no_overflow = link_info->disable_target_specific_optimizations > 1;
1536
1537 r_symndx = htab->r_sym (irel->r_info);
1538
1539 opcode = bfd_get_8 (abfd, contents + roff - 2);
1540
1541 /* Convert mov to lea since it has been done for a while. */
1542 if (opcode != 0x8b)
1543 {
1544 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1545 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1546 test, xor instructions. */
1547 if (!relocx)
1548 return TRUE;
1549 }
1550
1551 /* We convert only to R_X86_64_PC32:
1552 1. Branch.
1553 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1554 3. no_overflow is true.
1555 4. PIC.
1556 */
1557 to_reloc_pc32 = (opcode == 0xff
1558 || !relocx
1559 || no_overflow
1560 || is_pic);
1561
1562 /* Get the symbol referred to by the reloc. */
1563 if (h == NULL)
1564 {
1565 Elf_Internal_Sym *isym
1566 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1567
1568 /* Skip relocation against undefined symbols. */
1569 if (isym->st_shndx == SHN_UNDEF)
1570 return TRUE;
1571
1572 if (isym->st_shndx == SHN_ABS)
1573 tsec = bfd_abs_section_ptr;
1574 else if (isym->st_shndx == SHN_COMMON)
1575 tsec = bfd_com_section_ptr;
1576 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1577 tsec = &_bfd_elf_large_com_section;
1578 else
1579 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1580 }
1581 else
1582 {
1583 /* Undefined weak symbol is only bound locally in executable
1584 and its reference is resolved as 0 without relocation
1585 overflow. We can only perform this optimization for
1586 GOTPCRELX relocations since we need to modify REX byte.
1587 It is OK convert mov with R_X86_64_GOTPCREL to
1588 R_X86_64_PC32. */
1589 bfd_boolean local_ref;
1590 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1591
1592 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1593 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1594 if ((relocx || opcode == 0x8b)
1595 && (h->root.type == bfd_link_hash_undefweak
1596 && !eh->linker_def
1597 && local_ref))
1598 {
1599 if (opcode == 0xff)
1600 {
1601 /* Skip for branch instructions since R_X86_64_PC32
1602 may overflow. */
1603 if (no_overflow)
1604 return TRUE;
1605 }
1606 else if (relocx)
1607 {
1608 /* For non-branch instructions, we can convert to
1609 R_X86_64_32/R_X86_64_32S since we know if there
1610 is a REX byte. */
1611 to_reloc_pc32 = FALSE;
1612 }
1613
1614 /* Since we don't know the current PC when PIC is true,
1615 we can't convert to R_X86_64_PC32. */
1616 if (to_reloc_pc32 && is_pic)
1617 return TRUE;
1618
1619 goto convert;
1620 }
1621 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1622 ld.so may use its link-time address. */
1623 else if (h->start_stop
1624 || eh->linker_def
1625 || ((h->def_regular
1626 || h->root.type == bfd_link_hash_defined
1627 || h->root.type == bfd_link_hash_defweak)
1628 && h != htab->elf.hdynamic
1629 && local_ref))
1630 {
1631 /* bfd_link_hash_new or bfd_link_hash_undefined is
1632 set by an assignment in a linker script in
1633 bfd_elf_record_link_assignment. start_stop is set
1634 on __start_SECNAME/__stop_SECNAME which mark section
1635 SECNAME. */
1636 if (h->start_stop
1637 || eh->linker_def
1638 || (h->def_regular
1639 && (h->root.type == bfd_link_hash_new
1640 || h->root.type == bfd_link_hash_undefined
1641 || ((h->root.type == bfd_link_hash_defined
1642 || h->root.type == bfd_link_hash_defweak)
1643 && h->root.u.def.section == bfd_und_section_ptr))))
1644 {
1645 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1646 if (no_overflow)
1647 return TRUE;
1648 goto convert;
1649 }
1650 tsec = h->root.u.def.section;
1651 }
1652 else
1653 return TRUE;
1654 }
1655
1656 /* Don't convert GOTPCREL relocation against large section. */
1657 if (elf_section_data (tsec) != NULL
1658 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1659 return TRUE;
1660
1661 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1662 if (no_overflow)
1663 return TRUE;
1664
1665 convert:
1666 if (opcode == 0xff)
1667 {
1668 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1669 unsigned int nop;
1670 unsigned int disp;
1671 bfd_vma nop_offset;
1672
1673 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1674 R_X86_64_PC32. */
1675 modrm = bfd_get_8 (abfd, contents + roff - 1);
1676 if (modrm == 0x25)
1677 {
1678 /* Convert to "jmp foo nop". */
1679 modrm = 0xe9;
1680 nop = NOP_OPCODE;
1681 nop_offset = irel->r_offset + 3;
1682 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1683 irel->r_offset -= 1;
1684 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1685 }
1686 else
1687 {
1688 struct elf_x86_link_hash_entry *eh
1689 = (struct elf_x86_link_hash_entry *) h;
1690
1691 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1692 is a nop prefix. */
1693 modrm = 0xe8;
1694 /* To support TLS optimization, always use addr32 prefix for
1695 "call *__tls_get_addr@GOTPCREL(%rip)". */
1696 if (eh && eh->tls_get_addr)
1697 {
1698 nop = 0x67;
1699 nop_offset = irel->r_offset - 2;
1700 }
1701 else
1702 {
1703 nop = htab->params->call_nop_byte;
1704 if (htab->params->call_nop_as_suffix)
1705 {
1706 nop_offset = irel->r_offset + 3;
1707 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1708 irel->r_offset -= 1;
1709 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1710 }
1711 else
1712 nop_offset = irel->r_offset - 2;
1713 }
1714 }
1715 bfd_put_8 (abfd, nop, contents + nop_offset);
1716 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1717 r_type = R_X86_64_PC32;
1718 }
1719 else
1720 {
1721 unsigned int rex;
1722 unsigned int rex_mask = REX_R;
1723
1724 if (r_type == R_X86_64_REX_GOTPCRELX)
1725 rex = bfd_get_8 (abfd, contents + roff - 3);
1726 else
1727 rex = 0;
1728
1729 if (opcode == 0x8b)
1730 {
1731 if (to_reloc_pc32)
1732 {
1733 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1734 "lea foo(%rip), %reg". */
1735 opcode = 0x8d;
1736 r_type = R_X86_64_PC32;
1737 }
1738 else
1739 {
1740 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1741 "mov $foo, %reg". */
1742 opcode = 0xc7;
1743 modrm = bfd_get_8 (abfd, contents + roff - 1);
1744 modrm = 0xc0 | (modrm & 0x38) >> 3;
1745 if ((rex & REX_W) != 0
1746 && ABI_64_P (link_info->output_bfd))
1747 {
1748 /* Keep the REX_W bit in REX byte for LP64. */
1749 r_type = R_X86_64_32S;
1750 goto rewrite_modrm_rex;
1751 }
1752 else
1753 {
1754 /* If the REX_W bit in REX byte isn't needed,
1755 use R_X86_64_32 and clear the W bit to avoid
1756 sign-extend imm32 to imm64. */
1757 r_type = R_X86_64_32;
1758 /* Clear the W bit in REX byte. */
1759 rex_mask |= REX_W;
1760 goto rewrite_modrm_rex;
1761 }
1762 }
1763 }
1764 else
1765 {
1766 /* R_X86_64_PC32 isn't supported. */
1767 if (to_reloc_pc32)
1768 return TRUE;
1769
1770 modrm = bfd_get_8 (abfd, contents + roff - 1);
1771 if (opcode == 0x85)
1772 {
1773 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1774 "test $foo, %reg". */
1775 modrm = 0xc0 | (modrm & 0x38) >> 3;
1776 opcode = 0xf7;
1777 }
1778 else
1779 {
1780 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1781 "binop $foo, %reg". */
1782 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1783 opcode = 0x81;
1784 }
1785
1786 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1787 overflow when sign-extending imm32 to imm64. */
1788 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1789
1790 rewrite_modrm_rex:
1791 bfd_put_8 (abfd, modrm, contents + roff - 1);
1792
1793 if (rex)
1794 {
1795 /* Move the R bit to the B bit in REX byte. */
1796 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1797 bfd_put_8 (abfd, rex, contents + roff - 3);
1798 }
1799
1800 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1801 irel->r_addend = 0;
1802 }
1803
1804 bfd_put_8 (abfd, opcode, contents + roff - 2);
1805 }
1806
1807 *r_type_p = r_type;
1808 irel->r_info = htab->r_info (r_symndx,
1809 r_type | R_X86_64_converted_reloc_bit);
1810
1811 *converted = TRUE;
1812
1813 return TRUE;
1814 }
1815
1816 /* Look through the relocs for a section during the first phase, and
1817 calculate needed space in the global offset table, procedure
1818 linkage table, and dynamic reloc sections. */
1819
1820 static bfd_boolean
1821 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1822 asection *sec,
1823 const Elf_Internal_Rela *relocs)
1824 {
1825 struct elf_x86_link_hash_table *htab;
1826 Elf_Internal_Shdr *symtab_hdr;
1827 struct elf_link_hash_entry **sym_hashes;
1828 const Elf_Internal_Rela *rel;
1829 const Elf_Internal_Rela *rel_end;
1830 asection *sreloc;
1831 bfd_byte *contents;
1832 bfd_boolean converted;
1833
1834 if (bfd_link_relocatable (info))
1835 return TRUE;
1836
1837 /* Don't do anything special with non-loaded, non-alloced sections.
1838 In particular, any relocs in such sections should not affect GOT
1839 and PLT reference counting (ie. we don't allow them to create GOT
1840 or PLT entries), there's no possibility or desire to optimize TLS
1841 relocs, and there's not much point in propagating relocs to shared
1842 libs that the dynamic linker won't relocate. */
1843 if ((sec->flags & SEC_ALLOC) == 0)
1844 return TRUE;
1845
1846 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1847 if (htab == NULL)
1848 {
1849 sec->check_relocs_failed = 1;
1850 return FALSE;
1851 }
1852
1853 BFD_ASSERT (is_x86_elf (abfd, htab));
1854
1855 /* Get the section contents. */
1856 if (elf_section_data (sec)->this_hdr.contents != NULL)
1857 contents = elf_section_data (sec)->this_hdr.contents;
1858 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1859 {
1860 sec->check_relocs_failed = 1;
1861 return FALSE;
1862 }
1863
1864 symtab_hdr = &elf_symtab_hdr (abfd);
1865 sym_hashes = elf_sym_hashes (abfd);
1866
1867 converted = FALSE;
1868
1869 sreloc = NULL;
1870
1871 rel_end = relocs + sec->reloc_count;
1872 for (rel = relocs; rel < rel_end; rel++)
1873 {
1874 unsigned int r_type;
1875 unsigned int r_symndx;
1876 struct elf_link_hash_entry *h;
1877 struct elf_x86_link_hash_entry *eh;
1878 Elf_Internal_Sym *isym;
1879 const char *name;
1880 bfd_boolean size_reloc;
1881 bfd_boolean converted_reloc;
1882
1883 r_symndx = htab->r_sym (rel->r_info);
1884 r_type = ELF32_R_TYPE (rel->r_info);
1885
1886 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1887 {
1888 /* xgettext:c-format */
1889 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1890 abfd, r_symndx);
1891 goto error_return;
1892 }
1893
1894 if (r_symndx < symtab_hdr->sh_info)
1895 {
1896 /* A local symbol. */
1897 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1898 abfd, r_symndx);
1899 if (isym == NULL)
1900 goto error_return;
1901
1902 /* Check relocation against local STT_GNU_IFUNC symbol. */
1903 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1904 {
1905 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1906 TRUE);
1907 if (h == NULL)
1908 goto error_return;
1909
1910 /* Fake a STT_GNU_IFUNC symbol. */
1911 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1912 isym, NULL);
1913 h->type = STT_GNU_IFUNC;
1914 h->def_regular = 1;
1915 h->ref_regular = 1;
1916 h->forced_local = 1;
1917 h->root.type = bfd_link_hash_defined;
1918 }
1919 else
1920 h = NULL;
1921 }
1922 else
1923 {
1924 isym = NULL;
1925 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1926 while (h->root.type == bfd_link_hash_indirect
1927 || h->root.type == bfd_link_hash_warning)
1928 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1929 }
1930
1931 /* Check invalid x32 relocations. */
1932 if (!ABI_64_P (abfd))
1933 switch (r_type)
1934 {
1935 default:
1936 break;
1937
1938 case R_X86_64_DTPOFF64:
1939 case R_X86_64_TPOFF64:
1940 case R_X86_64_PC64:
1941 case R_X86_64_GOTOFF64:
1942 case R_X86_64_GOT64:
1943 case R_X86_64_GOTPCREL64:
1944 case R_X86_64_GOTPC64:
1945 case R_X86_64_GOTPLT64:
1946 case R_X86_64_PLTOFF64:
1947 {
1948 if (h)
1949 name = h->root.root.string;
1950 else
1951 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1952 NULL);
1953 _bfd_error_handler
1954 /* xgettext:c-format */
1955 (_("%pB: relocation %s against symbol `%s' isn't "
1956 "supported in x32 mode"), abfd,
1957 x86_64_elf_howto_table[r_type].name, name);
1958 bfd_set_error (bfd_error_bad_value);
1959 goto error_return;
1960 }
1961 break;
1962 }
1963
1964 if (h != NULL)
1965 {
1966 /* It is referenced by a non-shared object. */
1967 h->ref_regular = 1;
1968 }
1969
1970 converted_reloc = FALSE;
1971 if ((r_type == R_X86_64_GOTPCREL
1972 || r_type == R_X86_64_GOTPCRELX
1973 || r_type == R_X86_64_REX_GOTPCRELX)
1974 && (h == NULL || h->type != STT_GNU_IFUNC))
1975 {
1976 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1977 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1978 irel, h, &converted_reloc,
1979 info))
1980 goto error_return;
1981
1982 if (converted_reloc)
1983 converted = TRUE;
1984 }
1985
1986 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1987 symtab_hdr, sym_hashes,
1988 &r_type, GOT_UNKNOWN,
1989 rel, rel_end, h, r_symndx, FALSE))
1990 goto error_return;
1991
1992 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1993 if (h == htab->elf.hgot)
1994 htab->got_referenced = TRUE;
1995
1996 eh = (struct elf_x86_link_hash_entry *) h;
1997 switch (r_type)
1998 {
1999 case R_X86_64_TLSLD:
2000 htab->tls_ld_or_ldm_got.refcount = 1;
2001 goto create_got;
2002
2003 case R_X86_64_TPOFF32:
2004 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2005 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2006 &x86_64_elf_howto_table[r_type]);
2007 if (eh != NULL)
2008 eh->zero_undefweak &= 0x2;
2009 break;
2010
2011 case R_X86_64_GOTTPOFF:
2012 if (!bfd_link_executable (info))
2013 info->flags |= DF_STATIC_TLS;
2014 /* Fall through */
2015
2016 case R_X86_64_GOT32:
2017 case R_X86_64_GOTPCREL:
2018 case R_X86_64_GOTPCRELX:
2019 case R_X86_64_REX_GOTPCRELX:
2020 case R_X86_64_TLSGD:
2021 case R_X86_64_GOT64:
2022 case R_X86_64_GOTPCREL64:
2023 case R_X86_64_GOTPLT64:
2024 case R_X86_64_GOTPC32_TLSDESC:
2025 case R_X86_64_TLSDESC_CALL:
2026 /* This symbol requires a global offset table entry. */
2027 {
2028 int tls_type, old_tls_type;
2029
2030 switch (r_type)
2031 {
2032 default: tls_type = GOT_NORMAL; break;
2033 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2034 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2035 case R_X86_64_GOTPC32_TLSDESC:
2036 case R_X86_64_TLSDESC_CALL:
2037 tls_type = GOT_TLS_GDESC; break;
2038 }
2039
2040 if (h != NULL)
2041 {
2042 h->got.refcount = 1;
2043 old_tls_type = eh->tls_type;
2044 }
2045 else
2046 {
2047 bfd_signed_vma *local_got_refcounts;
2048
2049 /* This is a global offset table entry for a local symbol. */
2050 local_got_refcounts = elf_local_got_refcounts (abfd);
2051 if (local_got_refcounts == NULL)
2052 {
2053 bfd_size_type size;
2054
2055 size = symtab_hdr->sh_info;
2056 size *= sizeof (bfd_signed_vma)
2057 + sizeof (bfd_vma) + sizeof (char);
2058 local_got_refcounts = ((bfd_signed_vma *)
2059 bfd_zalloc (abfd, size));
2060 if (local_got_refcounts == NULL)
2061 goto error_return;
2062 elf_local_got_refcounts (abfd) = local_got_refcounts;
2063 elf_x86_local_tlsdesc_gotent (abfd)
2064 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2065 elf_x86_local_got_tls_type (abfd)
2066 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2067 }
2068 local_got_refcounts[r_symndx] = 1;
2069 old_tls_type
2070 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2071 }
2072
2073 /* If a TLS symbol is accessed using IE at least once,
2074 there is no point to use dynamic model for it. */
2075 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2076 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2077 || tls_type != GOT_TLS_IE))
2078 {
2079 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2080 tls_type = old_tls_type;
2081 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2082 && GOT_TLS_GD_ANY_P (tls_type))
2083 tls_type |= old_tls_type;
2084 else
2085 {
2086 if (h)
2087 name = h->root.root.string;
2088 else
2089 name = bfd_elf_sym_name (abfd, symtab_hdr,
2090 isym, NULL);
2091 _bfd_error_handler
2092 /* xgettext:c-format */
2093 (_("%pB: '%s' accessed both as normal and"
2094 " thread local symbol"),
2095 abfd, name);
2096 bfd_set_error (bfd_error_bad_value);
2097 goto error_return;
2098 }
2099 }
2100
2101 if (old_tls_type != tls_type)
2102 {
2103 if (eh != NULL)
2104 eh->tls_type = tls_type;
2105 else
2106 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2107 }
2108 }
2109 /* Fall through */
2110
2111 case R_X86_64_GOTOFF64:
2112 case R_X86_64_GOTPC32:
2113 case R_X86_64_GOTPC64:
2114 create_got:
2115 if (eh != NULL)
2116 eh->zero_undefweak &= 0x2;
2117 break;
2118
2119 case R_X86_64_PLT32:
2120 case R_X86_64_PLT32_BND:
2121 /* This symbol requires a procedure linkage table entry. We
2122 actually build the entry in adjust_dynamic_symbol,
2123 because this might be a case of linking PIC code which is
2124 never referenced by a dynamic object, in which case we
2125 don't need to generate a procedure linkage table entry
2126 after all. */
2127
2128 /* If this is a local symbol, we resolve it directly without
2129 creating a procedure linkage table entry. */
2130 if (h == NULL)
2131 continue;
2132
2133 eh->zero_undefweak &= 0x2;
2134 h->needs_plt = 1;
2135 h->plt.refcount = 1;
2136 break;
2137
2138 case R_X86_64_PLTOFF64:
2139 /* This tries to form the 'address' of a function relative
2140 to GOT. For global symbols we need a PLT entry. */
2141 if (h != NULL)
2142 {
2143 h->needs_plt = 1;
2144 h->plt.refcount = 1;
2145 }
2146 goto create_got;
2147
2148 case R_X86_64_SIZE32:
2149 case R_X86_64_SIZE64:
2150 size_reloc = TRUE;
2151 goto do_size;
2152
2153 case R_X86_64_32:
2154 if (!ABI_64_P (abfd))
2155 goto pointer;
2156 /* Fall through. */
2157 case R_X86_64_8:
2158 case R_X86_64_16:
2159 case R_X86_64_32S:
2160 /* Check relocation overflow as these relocs may lead to
2161 run-time relocation overflow. Don't error out for
2162 sections we don't care about, such as debug sections or
2163 when relocation overflow check is disabled. */
2164 if (!htab->params->no_reloc_overflow_check
2165 && !converted_reloc
2166 && (bfd_link_pic (info)
2167 || (bfd_link_executable (info)
2168 && h != NULL
2169 && !h->def_regular
2170 && h->def_dynamic
2171 && (sec->flags & SEC_READONLY) == 0)))
2172 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2173 &x86_64_elf_howto_table[r_type]);
2174 /* Fall through. */
2175
2176 case R_X86_64_PC8:
2177 case R_X86_64_PC16:
2178 case R_X86_64_PC32:
2179 case R_X86_64_PC32_BND:
2180 case R_X86_64_PC64:
2181 case R_X86_64_64:
2182 pointer:
2183 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2184 eh->zero_undefweak |= 0x2;
2185 /* We are called after all symbols have been resolved. Only
2186 relocation against STT_GNU_IFUNC symbol must go through
2187 PLT. */
2188 if (h != NULL
2189 && (bfd_link_executable (info)
2190 || h->type == STT_GNU_IFUNC))
2191 {
2192 bfd_boolean func_pointer_ref = FALSE;
2193
2194 if (r_type == R_X86_64_PC32)
2195 {
2196 /* Since something like ".long foo - ." may be used
2197 as pointer, make sure that PLT is used if foo is
2198 a function defined in a shared library. */
2199 if ((sec->flags & SEC_CODE) == 0)
2200 {
2201 h->pointer_equality_needed = 1;
2202 if (bfd_link_pie (info)
2203 && h->type == STT_FUNC
2204 && !h->def_regular
2205 && h->def_dynamic)
2206 {
2207 h->needs_plt = 1;
2208 h->plt.refcount = 1;
2209 }
2210 }
2211 }
2212 else if (r_type != R_X86_64_PC32_BND
2213 && r_type != R_X86_64_PC64)
2214 {
2215 h->pointer_equality_needed = 1;
2216 /* At run-time, R_X86_64_64 can be resolved for both
2217 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2218 can only be resolved for x32. */
2219 if ((sec->flags & SEC_READONLY) == 0
2220 && (r_type == R_X86_64_64
2221 || (!ABI_64_P (abfd)
2222 && (r_type == R_X86_64_32
2223 || r_type == R_X86_64_32S))))
2224 func_pointer_ref = TRUE;
2225 }
2226
2227 if (!func_pointer_ref)
2228 {
2229 /* If this reloc is in a read-only section, we might
2230 need a copy reloc. We can't check reliably at this
2231 stage whether the section is read-only, as input
2232 sections have not yet been mapped to output sections.
2233 Tentatively set the flag for now, and correct in
2234 adjust_dynamic_symbol. */
2235 h->non_got_ref = 1;
2236
2237 /* We may need a .plt entry if the symbol is a function
2238 defined in a shared lib or is a function referenced
2239 from the code or read-only section. */
2240 if (!h->def_regular
2241 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2242 h->plt.refcount = 1;
2243 }
2244 }
2245
2246 size_reloc = FALSE;
2247 do_size:
2248 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2249 htab->pointer_r_type))
2250 {
2251 struct elf_dyn_relocs *p;
2252 struct elf_dyn_relocs **head;
2253
2254 /* We must copy these reloc types into the output file.
2255 Create a reloc section in dynobj and make room for
2256 this reloc. */
2257 if (sreloc == NULL)
2258 {
2259 sreloc = _bfd_elf_make_dynamic_reloc_section
2260 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2261 abfd, /*rela?*/ TRUE);
2262
2263 if (sreloc == NULL)
2264 goto error_return;
2265 }
2266
2267 /* If this is a global symbol, we count the number of
2268 relocations we need for this symbol. */
2269 if (h != NULL)
2270 head = &eh->dyn_relocs;
2271 else
2272 {
2273 /* Track dynamic relocs needed for local syms too.
2274 We really need local syms available to do this
2275 easily. Oh well. */
2276 asection *s;
2277 void **vpp;
2278
2279 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2280 abfd, r_symndx);
2281 if (isym == NULL)
2282 goto error_return;
2283
2284 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2285 if (s == NULL)
2286 s = sec;
2287
2288 /* Beware of type punned pointers vs strict aliasing
2289 rules. */
2290 vpp = &(elf_section_data (s)->local_dynrel);
2291 head = (struct elf_dyn_relocs **)vpp;
2292 }
2293
2294 p = *head;
2295 if (p == NULL || p->sec != sec)
2296 {
2297 size_t amt = sizeof *p;
2298
2299 p = ((struct elf_dyn_relocs *)
2300 bfd_alloc (htab->elf.dynobj, amt));
2301 if (p == NULL)
2302 goto error_return;
2303 p->next = *head;
2304 *head = p;
2305 p->sec = sec;
2306 p->count = 0;
2307 p->pc_count = 0;
2308 }
2309
2310 p->count += 1;
2311 /* Count size relocation as PC-relative relocation. */
2312 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2313 p->pc_count += 1;
2314 }
2315 break;
2316
2317 /* This relocation describes the C++ object vtable hierarchy.
2318 Reconstruct it for later use during GC. */
2319 case R_X86_64_GNU_VTINHERIT:
2320 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2321 goto error_return;
2322 break;
2323
2324 /* This relocation describes which C++ vtable entries are actually
2325 used. Record for later use during GC. */
2326 case R_X86_64_GNU_VTENTRY:
2327 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2328 goto error_return;
2329 break;
2330
2331 default:
2332 break;
2333 }
2334 }
2335
2336 if (elf_section_data (sec)->this_hdr.contents != contents)
2337 {
2338 if (!converted && !info->keep_memory)
2339 free (contents);
2340 else
2341 {
2342 /* Cache the section contents for elf_link_input_bfd if any
2343 load is converted or --no-keep-memory isn't used. */
2344 elf_section_data (sec)->this_hdr.contents = contents;
2345 }
2346 }
2347
2348 /* Cache relocations if any load is converted. */
2349 if (elf_section_data (sec)->relocs != relocs && converted)
2350 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2351
2352 return TRUE;
2353
2354 error_return:
2355 if (elf_section_data (sec)->this_hdr.contents != contents)
2356 free (contents);
2357 sec->check_relocs_failed = 1;
2358 return FALSE;
2359 }
2360
2361 /* Return the relocation value for @tpoff relocation
2362 if STT_TLS virtual address is ADDRESS. */
2363
2364 static bfd_vma
2365 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2366 {
2367 struct elf_link_hash_table *htab = elf_hash_table (info);
2368 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2369 bfd_vma static_tls_size;
2370
2371 /* If tls_segment is NULL, we should have signalled an error already. */
2372 if (htab->tls_sec == NULL)
2373 return 0;
2374
2375 /* Consider special static TLS alignment requirements. */
2376 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2377 return address - static_tls_size - htab->tls_sec->vma;
2378 }
2379
2380 /* Relocate an x86_64 ELF section. */
2381
2382 static bfd_boolean
2383 elf_x86_64_relocate_section (bfd *output_bfd,
2384 struct bfd_link_info *info,
2385 bfd *input_bfd,
2386 asection *input_section,
2387 bfd_byte *contents,
2388 Elf_Internal_Rela *relocs,
2389 Elf_Internal_Sym *local_syms,
2390 asection **local_sections)
2391 {
2392 struct elf_x86_link_hash_table *htab;
2393 Elf_Internal_Shdr *symtab_hdr;
2394 struct elf_link_hash_entry **sym_hashes;
2395 bfd_vma *local_got_offsets;
2396 bfd_vma *local_tlsdesc_gotents;
2397 Elf_Internal_Rela *rel;
2398 Elf_Internal_Rela *wrel;
2399 Elf_Internal_Rela *relend;
2400 unsigned int plt_entry_size;
2401
2402 /* Skip if check_relocs failed. */
2403 if (input_section->check_relocs_failed)
2404 return FALSE;
2405
2406 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2407 if (htab == NULL)
2408 return FALSE;
2409
2410 if (!is_x86_elf (input_bfd, htab))
2411 {
2412 bfd_set_error (bfd_error_wrong_format);
2413 return FALSE;
2414 }
2415
2416 plt_entry_size = htab->plt.plt_entry_size;
2417 symtab_hdr = &elf_symtab_hdr (input_bfd);
2418 sym_hashes = elf_sym_hashes (input_bfd);
2419 local_got_offsets = elf_local_got_offsets (input_bfd);
2420 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2421
2422 _bfd_x86_elf_set_tls_module_base (info);
2423
2424 rel = wrel = relocs;
2425 relend = relocs + input_section->reloc_count;
2426 for (; rel < relend; wrel++, rel++)
2427 {
2428 unsigned int r_type, r_type_tls;
2429 reloc_howto_type *howto;
2430 unsigned long r_symndx;
2431 struct elf_link_hash_entry *h;
2432 struct elf_x86_link_hash_entry *eh;
2433 Elf_Internal_Sym *sym;
2434 asection *sec;
2435 bfd_vma off, offplt, plt_offset;
2436 bfd_vma relocation;
2437 bfd_boolean unresolved_reloc;
2438 bfd_reloc_status_type r;
2439 int tls_type;
2440 asection *base_got, *resolved_plt;
2441 bfd_vma st_size;
2442 bfd_boolean resolved_to_zero;
2443 bfd_boolean relative_reloc;
2444 bfd_boolean converted_reloc;
2445 bfd_boolean need_copy_reloc_in_pie;
2446 bfd_boolean no_copyreloc_p;
2447
2448 r_type = ELF32_R_TYPE (rel->r_info);
2449 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2450 || r_type == (int) R_X86_64_GNU_VTENTRY)
2451 {
2452 if (wrel != rel)
2453 *wrel = *rel;
2454 continue;
2455 }
2456
2457 r_symndx = htab->r_sym (rel->r_info);
2458 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2459 if (converted_reloc)
2460 {
2461 r_type &= ~R_X86_64_converted_reloc_bit;
2462 rel->r_info = htab->r_info (r_symndx, r_type);
2463 }
2464
2465 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2466 if (howto == NULL)
2467 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2468
2469 h = NULL;
2470 sym = NULL;
2471 sec = NULL;
2472 unresolved_reloc = FALSE;
2473 if (r_symndx < symtab_hdr->sh_info)
2474 {
2475 sym = local_syms + r_symndx;
2476 sec = local_sections[r_symndx];
2477
2478 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2479 &sec, rel);
2480 st_size = sym->st_size;
2481
2482 /* Relocate against local STT_GNU_IFUNC symbol. */
2483 if (!bfd_link_relocatable (info)
2484 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2485 {
2486 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2487 rel, FALSE);
2488 if (h == NULL)
2489 abort ();
2490
2491 /* Set STT_GNU_IFUNC symbol value. */
2492 h->root.u.def.value = sym->st_value;
2493 h->root.u.def.section = sec;
2494 }
2495 }
2496 else
2497 {
2498 bfd_boolean warned ATTRIBUTE_UNUSED;
2499 bfd_boolean ignored ATTRIBUTE_UNUSED;
2500
2501 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2502 r_symndx, symtab_hdr, sym_hashes,
2503 h, sec, relocation,
2504 unresolved_reloc, warned, ignored);
2505 st_size = h->size;
2506 }
2507
2508 if (sec != NULL && discarded_section (sec))
2509 {
2510 _bfd_clear_contents (howto, input_bfd, input_section,
2511 contents, rel->r_offset);
2512 wrel->r_offset = rel->r_offset;
2513 wrel->r_info = 0;
2514 wrel->r_addend = 0;
2515
2516 /* For ld -r, remove relocations in debug sections against
2517 sections defined in discarded sections. Not done for
2518 eh_frame editing code expects to be present. */
2519 if (bfd_link_relocatable (info)
2520 && (input_section->flags & SEC_DEBUGGING))
2521 wrel--;
2522
2523 continue;
2524 }
2525
2526 if (bfd_link_relocatable (info))
2527 {
2528 if (wrel != rel)
2529 *wrel = *rel;
2530 continue;
2531 }
2532
2533 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2534 {
2535 if (r_type == R_X86_64_64)
2536 {
2537 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2538 zero-extend it to 64bit if addend is zero. */
2539 r_type = R_X86_64_32;
2540 memset (contents + rel->r_offset + 4, 0, 4);
2541 }
2542 else if (r_type == R_X86_64_SIZE64)
2543 {
2544 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2545 zero-extend it to 64bit if addend is zero. */
2546 r_type = R_X86_64_SIZE32;
2547 memset (contents + rel->r_offset + 4, 0, 4);
2548 }
2549 }
2550
2551 eh = (struct elf_x86_link_hash_entry *) h;
2552
2553 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2554 it here if it is defined in a non-shared object. */
2555 if (h != NULL
2556 && h->type == STT_GNU_IFUNC
2557 && h->def_regular)
2558 {
2559 bfd_vma plt_index;
2560 const char *name;
2561
2562 if ((input_section->flags & SEC_ALLOC) == 0)
2563 {
2564 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2565 STT_GNU_IFUNC symbol as STT_FUNC. */
2566 if (elf_section_type (input_section) == SHT_NOTE)
2567 goto skip_ifunc;
2568 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2569 sections because such sections are not SEC_ALLOC and
2570 thus ld.so will not process them. */
2571 if ((input_section->flags & SEC_DEBUGGING) != 0)
2572 continue;
2573 abort ();
2574 }
2575
2576 switch (r_type)
2577 {
2578 default:
2579 break;
2580
2581 case R_X86_64_GOTPCREL:
2582 case R_X86_64_GOTPCRELX:
2583 case R_X86_64_REX_GOTPCRELX:
2584 case R_X86_64_GOTPCREL64:
2585 base_got = htab->elf.sgot;
2586 off = h->got.offset;
2587
2588 if (base_got == NULL)
2589 abort ();
2590
2591 if (off == (bfd_vma) -1)
2592 {
2593 /* We can't use h->got.offset here to save state, or
2594 even just remember the offset, as finish_dynamic_symbol
2595 would use that as offset into .got. */
2596
2597 if (h->plt.offset == (bfd_vma) -1)
2598 abort ();
2599
2600 if (htab->elf.splt != NULL)
2601 {
2602 plt_index = (h->plt.offset / plt_entry_size
2603 - htab->plt.has_plt0);
2604 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2605 base_got = htab->elf.sgotplt;
2606 }
2607 else
2608 {
2609 plt_index = h->plt.offset / plt_entry_size;
2610 off = plt_index * GOT_ENTRY_SIZE;
2611 base_got = htab->elf.igotplt;
2612 }
2613
2614 if (h->dynindx == -1
2615 || h->forced_local
2616 || info->symbolic)
2617 {
2618 /* This references the local defitionion. We must
2619 initialize this entry in the global offset table.
2620 Since the offset must always be a multiple of 8,
2621 we use the least significant bit to record
2622 whether we have initialized it already.
2623
2624 When doing a dynamic link, we create a .rela.got
2625 relocation entry to initialize the value. This
2626 is done in the finish_dynamic_symbol routine. */
2627 if ((off & 1) != 0)
2628 off &= ~1;
2629 else
2630 {
2631 bfd_put_64 (output_bfd, relocation,
2632 base_got->contents + off);
2633 /* Note that this is harmless for the GOTPLT64
2634 case, as -1 | 1 still is -1. */
2635 h->got.offset |= 1;
2636 }
2637 }
2638 }
2639
2640 relocation = (base_got->output_section->vma
2641 + base_got->output_offset + off);
2642
2643 goto do_relocation;
2644 }
2645
2646 if (h->plt.offset == (bfd_vma) -1)
2647 {
2648 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2649 if (r_type == htab->pointer_r_type
2650 && (input_section->flags & SEC_CODE) == 0)
2651 goto do_ifunc_pointer;
2652 goto bad_ifunc_reloc;
2653 }
2654
2655 /* STT_GNU_IFUNC symbol must go through PLT. */
2656 if (htab->elf.splt != NULL)
2657 {
2658 if (htab->plt_second != NULL)
2659 {
2660 resolved_plt = htab->plt_second;
2661 plt_offset = eh->plt_second.offset;
2662 }
2663 else
2664 {
2665 resolved_plt = htab->elf.splt;
2666 plt_offset = h->plt.offset;
2667 }
2668 }
2669 else
2670 {
2671 resolved_plt = htab->elf.iplt;
2672 plt_offset = h->plt.offset;
2673 }
2674
2675 relocation = (resolved_plt->output_section->vma
2676 + resolved_plt->output_offset + plt_offset);
2677
2678 switch (r_type)
2679 {
2680 default:
2681 bad_ifunc_reloc:
2682 if (h->root.root.string)
2683 name = h->root.root.string;
2684 else
2685 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2686 NULL);
2687 _bfd_error_handler
2688 /* xgettext:c-format */
2689 (_("%pB: relocation %s against STT_GNU_IFUNC "
2690 "symbol `%s' isn't supported"), input_bfd,
2691 howto->name, name);
2692 bfd_set_error (bfd_error_bad_value);
2693 return FALSE;
2694
2695 case R_X86_64_32S:
2696 if (bfd_link_pic (info))
2697 abort ();
2698 goto do_relocation;
2699
2700 case R_X86_64_32:
2701 if (ABI_64_P (output_bfd))
2702 goto do_relocation;
2703 /* FALLTHROUGH */
2704 case R_X86_64_64:
2705 do_ifunc_pointer:
2706 if (rel->r_addend != 0)
2707 {
2708 if (h->root.root.string)
2709 name = h->root.root.string;
2710 else
2711 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2712 sym, NULL);
2713 _bfd_error_handler
2714 /* xgettext:c-format */
2715 (_("%pB: relocation %s against STT_GNU_IFUNC "
2716 "symbol `%s' has non-zero addend: %" PRId64),
2717 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2718 bfd_set_error (bfd_error_bad_value);
2719 return FALSE;
2720 }
2721
2722 /* Generate dynamic relcoation only when there is a
2723 non-GOT reference in a shared object or there is no
2724 PLT. */
2725 if ((bfd_link_pic (info) && h->non_got_ref)
2726 || h->plt.offset == (bfd_vma) -1)
2727 {
2728 Elf_Internal_Rela outrel;
2729 asection *sreloc;
2730
2731 /* Need a dynamic relocation to get the real function
2732 address. */
2733 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2734 info,
2735 input_section,
2736 rel->r_offset);
2737 if (outrel.r_offset == (bfd_vma) -1
2738 || outrel.r_offset == (bfd_vma) -2)
2739 abort ();
2740
2741 outrel.r_offset += (input_section->output_section->vma
2742 + input_section->output_offset);
2743
2744 if (POINTER_LOCAL_IFUNC_P (info, h))
2745 {
2746 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2747 h->root.root.string,
2748 h->root.u.def.section->owner);
2749
2750 /* This symbol is resolved locally. */
2751 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2752 outrel.r_addend = (h->root.u.def.value
2753 + h->root.u.def.section->output_section->vma
2754 + h->root.u.def.section->output_offset);
2755 }
2756 else
2757 {
2758 outrel.r_info = htab->r_info (h->dynindx, r_type);
2759 outrel.r_addend = 0;
2760 }
2761
2762 /* Dynamic relocations are stored in
2763 1. .rela.ifunc section in PIC object.
2764 2. .rela.got section in dynamic executable.
2765 3. .rela.iplt section in static executable. */
2766 if (bfd_link_pic (info))
2767 sreloc = htab->elf.irelifunc;
2768 else if (htab->elf.splt != NULL)
2769 sreloc = htab->elf.srelgot;
2770 else
2771 sreloc = htab->elf.irelplt;
2772 elf_append_rela (output_bfd, sreloc, &outrel);
2773
2774 /* If this reloc is against an external symbol, we
2775 do not want to fiddle with the addend. Otherwise,
2776 we need to include the symbol value so that it
2777 becomes an addend for the dynamic reloc. For an
2778 internal symbol, we have updated addend. */
2779 continue;
2780 }
2781 /* FALLTHROUGH */
2782 case R_X86_64_PC32:
2783 case R_X86_64_PC32_BND:
2784 case R_X86_64_PC64:
2785 case R_X86_64_PLT32:
2786 case R_X86_64_PLT32_BND:
2787 goto do_relocation;
2788 }
2789 }
2790
2791 skip_ifunc:
2792 resolved_to_zero = (eh != NULL
2793 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2794
2795 /* When generating a shared object, the relocations handled here are
2796 copied into the output file to be resolved at run time. */
2797 switch (r_type)
2798 {
2799 case R_X86_64_GOT32:
2800 case R_X86_64_GOT64:
2801 /* Relocation is to the entry for this symbol in the global
2802 offset table. */
2803 case R_X86_64_GOTPCREL:
2804 case R_X86_64_GOTPCRELX:
2805 case R_X86_64_REX_GOTPCRELX:
2806 case R_X86_64_GOTPCREL64:
2807 /* Use global offset table entry as symbol value. */
2808 case R_X86_64_GOTPLT64:
2809 /* This is obsolete and treated the same as GOT64. */
2810 base_got = htab->elf.sgot;
2811
2812 if (htab->elf.sgot == NULL)
2813 abort ();
2814
2815 relative_reloc = FALSE;
2816 if (h != NULL)
2817 {
2818 off = h->got.offset;
2819 if (h->needs_plt
2820 && h->plt.offset != (bfd_vma)-1
2821 && off == (bfd_vma)-1)
2822 {
2823 /* We can't use h->got.offset here to save
2824 state, or even just remember the offset, as
2825 finish_dynamic_symbol would use that as offset into
2826 .got. */
2827 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2828 - htab->plt.has_plt0);
2829 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2830 base_got = htab->elf.sgotplt;
2831 }
2832
2833 if (RESOLVED_LOCALLY_P (info, h, htab))
2834 {
2835 /* We must initialize this entry in the global offset
2836 table. Since the offset must always be a multiple
2837 of 8, we use the least significant bit to record
2838 whether we have initialized it already.
2839
2840 When doing a dynamic link, we create a .rela.got
2841 relocation entry to initialize the value. This is
2842 done in the finish_dynamic_symbol routine. */
2843 if ((off & 1) != 0)
2844 off &= ~1;
2845 else
2846 {
2847 bfd_put_64 (output_bfd, relocation,
2848 base_got->contents + off);
2849 /* Note that this is harmless for the GOTPLT64 case,
2850 as -1 | 1 still is -1. */
2851 h->got.offset |= 1;
2852
2853 if (GENERATE_RELATIVE_RELOC_P (info, h))
2854 {
2855 /* If this symbol isn't dynamic in PIC,
2856 generate R_X86_64_RELATIVE here. */
2857 eh->no_finish_dynamic_symbol = 1;
2858 relative_reloc = TRUE;
2859 }
2860 }
2861 }
2862 else
2863 unresolved_reloc = FALSE;
2864 }
2865 else
2866 {
2867 if (local_got_offsets == NULL)
2868 abort ();
2869
2870 off = local_got_offsets[r_symndx];
2871
2872 /* The offset must always be a multiple of 8. We use
2873 the least significant bit to record whether we have
2874 already generated the necessary reloc. */
2875 if ((off & 1) != 0)
2876 off &= ~1;
2877 else
2878 {
2879 bfd_put_64 (output_bfd, relocation,
2880 base_got->contents + off);
2881 local_got_offsets[r_symndx] |= 1;
2882
2883 if (bfd_link_pic (info))
2884 relative_reloc = TRUE;
2885 }
2886 }
2887
2888 if (relative_reloc)
2889 {
2890 asection *s;
2891 Elf_Internal_Rela outrel;
2892
2893 /* We need to generate a R_X86_64_RELATIVE reloc
2894 for the dynamic linker. */
2895 s = htab->elf.srelgot;
2896 if (s == NULL)
2897 abort ();
2898
2899 outrel.r_offset = (base_got->output_section->vma
2900 + base_got->output_offset
2901 + off);
2902 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2903 outrel.r_addend = relocation;
2904 elf_append_rela (output_bfd, s, &outrel);
2905 }
2906
2907 if (off >= (bfd_vma) -2)
2908 abort ();
2909
2910 relocation = base_got->output_section->vma
2911 + base_got->output_offset + off;
2912 if (r_type != R_X86_64_GOTPCREL
2913 && r_type != R_X86_64_GOTPCRELX
2914 && r_type != R_X86_64_REX_GOTPCRELX
2915 && r_type != R_X86_64_GOTPCREL64)
2916 relocation -= htab->elf.sgotplt->output_section->vma
2917 - htab->elf.sgotplt->output_offset;
2918
2919 break;
2920
2921 case R_X86_64_GOTOFF64:
2922 /* Relocation is relative to the start of the global offset
2923 table. */
2924
2925 /* Check to make sure it isn't a protected function or data
2926 symbol for shared library since it may not be local when
2927 used as function address or with copy relocation. We also
2928 need to make sure that a symbol is referenced locally. */
2929 if (bfd_link_pic (info) && h)
2930 {
2931 if (!h->def_regular)
2932 {
2933 const char *v;
2934
2935 switch (ELF_ST_VISIBILITY (h->other))
2936 {
2937 case STV_HIDDEN:
2938 v = _("hidden symbol");
2939 break;
2940 case STV_INTERNAL:
2941 v = _("internal symbol");
2942 break;
2943 case STV_PROTECTED:
2944 v = _("protected symbol");
2945 break;
2946 default:
2947 v = _("symbol");
2948 break;
2949 }
2950
2951 _bfd_error_handler
2952 /* xgettext:c-format */
2953 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2954 " `%s' can not be used when making a shared object"),
2955 input_bfd, v, h->root.root.string);
2956 bfd_set_error (bfd_error_bad_value);
2957 return FALSE;
2958 }
2959 else if (!bfd_link_executable (info)
2960 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2961 && (h->type == STT_FUNC
2962 || h->type == STT_OBJECT)
2963 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2964 {
2965 _bfd_error_handler
2966 /* xgettext:c-format */
2967 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2968 " `%s' can not be used when making a shared object"),
2969 input_bfd,
2970 h->type == STT_FUNC ? "function" : "data",
2971 h->root.root.string);
2972 bfd_set_error (bfd_error_bad_value);
2973 return FALSE;
2974 }
2975 }
2976
2977 /* Note that sgot is not involved in this
2978 calculation. We always want the start of .got.plt. If we
2979 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2980 permitted by the ABI, we might have to change this
2981 calculation. */
2982 relocation -= htab->elf.sgotplt->output_section->vma
2983 + htab->elf.sgotplt->output_offset;
2984 break;
2985
2986 case R_X86_64_GOTPC32:
2987 case R_X86_64_GOTPC64:
2988 /* Use global offset table as symbol value. */
2989 relocation = htab->elf.sgotplt->output_section->vma
2990 + htab->elf.sgotplt->output_offset;
2991 unresolved_reloc = FALSE;
2992 break;
2993
2994 case R_X86_64_PLTOFF64:
2995 /* Relocation is PLT entry relative to GOT. For local
2996 symbols it's the symbol itself relative to GOT. */
2997 if (h != NULL
2998 /* See PLT32 handling. */
2999 && (h->plt.offset != (bfd_vma) -1
3000 || eh->plt_got.offset != (bfd_vma) -1)
3001 && htab->elf.splt != NULL)
3002 {
3003 if (eh->plt_got.offset != (bfd_vma) -1)
3004 {
3005 /* Use the GOT PLT. */
3006 resolved_plt = htab->plt_got;
3007 plt_offset = eh->plt_got.offset;
3008 }
3009 else if (htab->plt_second != NULL)
3010 {
3011 resolved_plt = htab->plt_second;
3012 plt_offset = eh->plt_second.offset;
3013 }
3014 else
3015 {
3016 resolved_plt = htab->elf.splt;
3017 plt_offset = h->plt.offset;
3018 }
3019
3020 relocation = (resolved_plt->output_section->vma
3021 + resolved_plt->output_offset
3022 + plt_offset);
3023 unresolved_reloc = FALSE;
3024 }
3025
3026 relocation -= htab->elf.sgotplt->output_section->vma
3027 + htab->elf.sgotplt->output_offset;
3028 break;
3029
3030 case R_X86_64_PLT32:
3031 case R_X86_64_PLT32_BND:
3032 /* Relocation is to the entry for this symbol in the
3033 procedure linkage table. */
3034
3035 /* Resolve a PLT32 reloc against a local symbol directly,
3036 without using the procedure linkage table. */
3037 if (h == NULL)
3038 break;
3039
3040 if ((h->plt.offset == (bfd_vma) -1
3041 && eh->plt_got.offset == (bfd_vma) -1)
3042 || htab->elf.splt == NULL)
3043 {
3044 /* We didn't make a PLT entry for this symbol. This
3045 happens when statically linking PIC code, or when
3046 using -Bsymbolic. */
3047 break;
3048 }
3049
3050 use_plt:
3051 if (h->plt.offset != (bfd_vma) -1)
3052 {
3053 if (htab->plt_second != NULL)
3054 {
3055 resolved_plt = htab->plt_second;
3056 plt_offset = eh->plt_second.offset;
3057 }
3058 else
3059 {
3060 resolved_plt = htab->elf.splt;
3061 plt_offset = h->plt.offset;
3062 }
3063 }
3064 else
3065 {
3066 /* Use the GOT PLT. */
3067 resolved_plt = htab->plt_got;
3068 plt_offset = eh->plt_got.offset;
3069 }
3070
3071 relocation = (resolved_plt->output_section->vma
3072 + resolved_plt->output_offset
3073 + plt_offset);
3074 unresolved_reloc = FALSE;
3075 break;
3076
3077 case R_X86_64_SIZE32:
3078 case R_X86_64_SIZE64:
3079 /* Set to symbol size. */
3080 relocation = st_size;
3081 goto direct;
3082
3083 case R_X86_64_PC8:
3084 case R_X86_64_PC16:
3085 case R_X86_64_PC32:
3086 case R_X86_64_PC32_BND:
3087 /* Don't complain about -fPIC if the symbol is undefined when
3088 building executable unless it is unresolved weak symbol,
3089 references a dynamic definition in PIE or -z nocopyreloc
3090 is used. */
3091 no_copyreloc_p
3092 = (info->nocopyreloc
3093 || (h != NULL
3094 && !h->root.linker_def
3095 && !h->root.ldscript_def
3096 && eh->def_protected
3097 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3098
3099 if ((input_section->flags & SEC_ALLOC) != 0
3100 && (input_section->flags & SEC_READONLY) != 0
3101 && h != NULL
3102 && ((bfd_link_executable (info)
3103 && ((h->root.type == bfd_link_hash_undefweak
3104 && (eh == NULL
3105 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3106 eh)))
3107 || (bfd_link_pie (info)
3108 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3109 && h->def_dynamic)
3110 || (no_copyreloc_p
3111 && h->def_dynamic
3112 && !(h->root.u.def.section->flags & SEC_CODE))))
3113 || bfd_link_dll (info)))
3114 {
3115 bfd_boolean fail = FALSE;
3116 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3117 {
3118 /* Symbol is referenced locally. Make sure it is
3119 defined locally. */
3120 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3121 }
3122 else if (bfd_link_pie (info))
3123 {
3124 /* We can only use PC-relative relocations in PIE
3125 from non-code sections. */
3126 if (h->type == STT_FUNC
3127 && (sec->flags & SEC_CODE) != 0)
3128 fail = TRUE;
3129 }
3130 else if (no_copyreloc_p || bfd_link_dll (info))
3131 {
3132 /* Symbol doesn't need copy reloc and isn't
3133 referenced locally. Don't allow PC-relative
3134 relocations against default and protected
3135 symbols since address of protected function
3136 and location of protected data may not be in
3137 the shared object. */
3138 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3139 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3140 }
3141
3142 if (fail)
3143 return elf_x86_64_need_pic (info, input_bfd, input_section,
3144 h, NULL, NULL, howto);
3145 }
3146 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3147 as function address. */
3148 else if (h != NULL
3149 && (input_section->flags & SEC_CODE) == 0
3150 && bfd_link_pie (info)
3151 && h->type == STT_FUNC
3152 && !h->def_regular
3153 && h->def_dynamic)
3154 goto use_plt;
3155 /* Fall through. */
3156
3157 case R_X86_64_8:
3158 case R_X86_64_16:
3159 case R_X86_64_32:
3160 case R_X86_64_PC64:
3161 case R_X86_64_64:
3162 /* FIXME: The ABI says the linker should make sure the value is
3163 the same when it's zeroextended to 64 bit. */
3164
3165 direct:
3166 if ((input_section->flags & SEC_ALLOC) == 0)
3167 break;
3168
3169 need_copy_reloc_in_pie = (bfd_link_pie (info)
3170 && h != NULL
3171 && (h->needs_copy
3172 || eh->needs_copy
3173 || (h->root.type
3174 == bfd_link_hash_undefined))
3175 && (X86_PCREL_TYPE_P (r_type)
3176 || X86_SIZE_TYPE_P (r_type)));
3177
3178 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3179 need_copy_reloc_in_pie,
3180 resolved_to_zero, FALSE))
3181 {
3182 Elf_Internal_Rela outrel;
3183 bfd_boolean skip, relocate;
3184 asection *sreloc;
3185
3186 /* When generating a shared object, these relocations
3187 are copied into the output file to be resolved at run
3188 time. */
3189 skip = FALSE;
3190 relocate = FALSE;
3191
3192 outrel.r_offset =
3193 _bfd_elf_section_offset (output_bfd, info, input_section,
3194 rel->r_offset);
3195 if (outrel.r_offset == (bfd_vma) -1)
3196 skip = TRUE;
3197 else if (outrel.r_offset == (bfd_vma) -2)
3198 skip = TRUE, relocate = TRUE;
3199
3200 outrel.r_offset += (input_section->output_section->vma
3201 + input_section->output_offset);
3202
3203 if (skip)
3204 memset (&outrel, 0, sizeof outrel);
3205
3206 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3207 {
3208 outrel.r_info = htab->r_info (h->dynindx, r_type);
3209 outrel.r_addend = rel->r_addend;
3210 }
3211 else
3212 {
3213 /* This symbol is local, or marked to become local.
3214 When relocation overflow check is disabled, we
3215 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3216 if (r_type == htab->pointer_r_type
3217 || (r_type == R_X86_64_32
3218 && htab->params->no_reloc_overflow_check))
3219 {
3220 relocate = TRUE;
3221 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3222 outrel.r_addend = relocation + rel->r_addend;
3223 }
3224 else if (r_type == R_X86_64_64
3225 && !ABI_64_P (output_bfd))
3226 {
3227 relocate = TRUE;
3228 outrel.r_info = htab->r_info (0,
3229 R_X86_64_RELATIVE64);
3230 outrel.r_addend = relocation + rel->r_addend;
3231 /* Check addend overflow. */
3232 if ((outrel.r_addend & 0x80000000)
3233 != (rel->r_addend & 0x80000000))
3234 {
3235 const char *name;
3236 int addend = rel->r_addend;
3237 if (h && h->root.root.string)
3238 name = h->root.root.string;
3239 else
3240 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3241 sym, NULL);
3242 _bfd_error_handler
3243 /* xgettext:c-format */
3244 (_("%pB: addend %s%#x in relocation %s against "
3245 "symbol `%s' at %#" PRIx64
3246 " in section `%pA' is out of range"),
3247 input_bfd, addend < 0 ? "-" : "", addend,
3248 howto->name, name, (uint64_t) rel->r_offset,
3249 input_section);
3250 bfd_set_error (bfd_error_bad_value);
3251 return FALSE;
3252 }
3253 }
3254 else
3255 {
3256 long sindx;
3257
3258 if (bfd_is_abs_section (sec))
3259 sindx = 0;
3260 else if (sec == NULL || sec->owner == NULL)
3261 {
3262 bfd_set_error (bfd_error_bad_value);
3263 return FALSE;
3264 }
3265 else
3266 {
3267 asection *osec;
3268
3269 /* We are turning this relocation into one
3270 against a section symbol. It would be
3271 proper to subtract the symbol's value,
3272 osec->vma, from the emitted reloc addend,
3273 but ld.so expects buggy relocs. */
3274 osec = sec->output_section;
3275 sindx = elf_section_data (osec)->dynindx;
3276 if (sindx == 0)
3277 {
3278 asection *oi = htab->elf.text_index_section;
3279 sindx = elf_section_data (oi)->dynindx;
3280 }
3281 BFD_ASSERT (sindx != 0);
3282 }
3283
3284 outrel.r_info = htab->r_info (sindx, r_type);
3285 outrel.r_addend = relocation + rel->r_addend;
3286 }
3287 }
3288
3289 sreloc = elf_section_data (input_section)->sreloc;
3290
3291 if (sreloc == NULL || sreloc->contents == NULL)
3292 {
3293 r = bfd_reloc_notsupported;
3294 goto check_relocation_error;
3295 }
3296
3297 elf_append_rela (output_bfd, sreloc, &outrel);
3298
3299 /* If this reloc is against an external symbol, we do
3300 not want to fiddle with the addend. Otherwise, we
3301 need to include the symbol value so that it becomes
3302 an addend for the dynamic reloc. */
3303 if (! relocate)
3304 continue;
3305 }
3306
3307 break;
3308
3309 case R_X86_64_TLSGD:
3310 case R_X86_64_GOTPC32_TLSDESC:
3311 case R_X86_64_TLSDESC_CALL:
3312 case R_X86_64_GOTTPOFF:
3313 tls_type = GOT_UNKNOWN;
3314 if (h == NULL && local_got_offsets)
3315 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3316 else if (h != NULL)
3317 tls_type = elf_x86_hash_entry (h)->tls_type;
3318
3319 r_type_tls = r_type;
3320 if (! elf_x86_64_tls_transition (info, input_bfd,
3321 input_section, contents,
3322 symtab_hdr, sym_hashes,
3323 &r_type_tls, tls_type, rel,
3324 relend, h, r_symndx, TRUE))
3325 return FALSE;
3326
3327 if (r_type_tls == R_X86_64_TPOFF32)
3328 {
3329 bfd_vma roff = rel->r_offset;
3330
3331 BFD_ASSERT (! unresolved_reloc);
3332
3333 if (r_type == R_X86_64_TLSGD)
3334 {
3335 /* GD->LE transition. For 64bit, change
3336 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3337 .word 0x6666; rex64; call __tls_get_addr@PLT
3338 or
3339 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3340 .byte 0x66; rex64
3341 call *__tls_get_addr@GOTPCREL(%rip)
3342 which may be converted to
3343 addr32 call __tls_get_addr
3344 into:
3345 movq %fs:0, %rax
3346 leaq foo@tpoff(%rax), %rax
3347 For 32bit, change
3348 leaq foo@tlsgd(%rip), %rdi
3349 .word 0x6666; rex64; call __tls_get_addr@PLT
3350 or
3351 leaq foo@tlsgd(%rip), %rdi
3352 .byte 0x66; rex64
3353 call *__tls_get_addr@GOTPCREL(%rip)
3354 which may be converted to
3355 addr32 call __tls_get_addr
3356 into:
3357 movl %fs:0, %eax
3358 leaq foo@tpoff(%rax), %rax
3359 For largepic, change:
3360 leaq foo@tlsgd(%rip), %rdi
3361 movabsq $__tls_get_addr@pltoff, %rax
3362 addq %r15, %rax
3363 call *%rax
3364 into:
3365 movq %fs:0, %rax
3366 leaq foo@tpoff(%rax), %rax
3367 nopw 0x0(%rax,%rax,1) */
3368 int largepic = 0;
3369 if (ABI_64_P (output_bfd))
3370 {
3371 if (contents[roff + 5] == 0xb8)
3372 {
3373 if (roff < 3
3374 || (roff - 3 + 22) > input_section->size)
3375 {
3376 corrupt_input:
3377 info->callbacks->einfo
3378 (_("%F%P: corrupt input: %pB\n"),
3379 input_bfd);
3380 return FALSE;
3381 }
3382 memcpy (contents + roff - 3,
3383 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3384 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3385 largepic = 1;
3386 }
3387 else
3388 {
3389 if (roff < 4
3390 || (roff - 4 + 16) > input_section->size)
3391 goto corrupt_input;
3392 memcpy (contents + roff - 4,
3393 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3394 16);
3395 }
3396 }
3397 else
3398 {
3399 if (roff < 3
3400 || (roff - 3 + 15) > input_section->size)
3401 goto corrupt_input;
3402 memcpy (contents + roff - 3,
3403 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3404 15);
3405 }
3406 bfd_put_32 (output_bfd,
3407 elf_x86_64_tpoff (info, relocation),
3408 contents + roff + 8 + largepic);
3409 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3410 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3411 rel++;
3412 wrel++;
3413 continue;
3414 }
3415 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3416 {
3417 /* GDesc -> LE transition.
3418 It's originally something like:
3419 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3420 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3421
3422 Change it to:
3423 movq $x@tpoff, %rax <--- LP64 mode.
3424 rex movl $x@tpoff, %eax <--- X32 mode.
3425 */
3426
3427 unsigned int val, type;
3428
3429 if (roff < 3)
3430 goto corrupt_input;
3431 type = bfd_get_8 (input_bfd, contents + roff - 3);
3432 val = bfd_get_8 (input_bfd, contents + roff - 1);
3433 bfd_put_8 (output_bfd,
3434 (type & 0x48) | ((type >> 2) & 1),
3435 contents + roff - 3);
3436 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3437 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3438 contents + roff - 1);
3439 bfd_put_32 (output_bfd,
3440 elf_x86_64_tpoff (info, relocation),
3441 contents + roff);
3442 continue;
3443 }
3444 else if (r_type == R_X86_64_TLSDESC_CALL)
3445 {
3446 /* GDesc -> LE transition.
3447 It's originally:
3448 call *(%rax) <--- LP64 mode.
3449 call *(%eax) <--- X32 mode.
3450 Turn it into:
3451 xchg %ax,%ax <-- LP64 mode.
3452 nopl (%rax) <-- X32 mode.
3453 */
3454 unsigned int prefix = 0;
3455 if (!ABI_64_P (input_bfd))
3456 {
3457 /* Check for call *x@tlsdesc(%eax). */
3458 if (contents[roff] == 0x67)
3459 prefix = 1;
3460 }
3461 if (prefix)
3462 {
3463 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3464 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3465 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3466 }
3467 else
3468 {
3469 bfd_put_8 (output_bfd, 0x66, contents + roff);
3470 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3471 }
3472 continue;
3473 }
3474 else if (r_type == R_X86_64_GOTTPOFF)
3475 {
3476 /* IE->LE transition:
3477 For 64bit, originally it can be one of:
3478 movq foo@gottpoff(%rip), %reg
3479 addq foo@gottpoff(%rip), %reg
3480 We change it into:
3481 movq $foo, %reg
3482 leaq foo(%reg), %reg
3483 addq $foo, %reg.
3484 For 32bit, originally it can be one of:
3485 movq foo@gottpoff(%rip), %reg
3486 addl foo@gottpoff(%rip), %reg
3487 We change it into:
3488 movq $foo, %reg
3489 leal foo(%reg), %reg
3490 addl $foo, %reg. */
3491
3492 unsigned int val, type, reg;
3493
3494 if (roff >= 3)
3495 val = bfd_get_8 (input_bfd, contents + roff - 3);
3496 else
3497 {
3498 if (roff < 2)
3499 goto corrupt_input;
3500 val = 0;
3501 }
3502 type = bfd_get_8 (input_bfd, contents + roff - 2);
3503 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3504 reg >>= 3;
3505 if (type == 0x8b)
3506 {
3507 /* movq */
3508 if (val == 0x4c)
3509 {
3510 if (roff < 3)
3511 goto corrupt_input;
3512 bfd_put_8 (output_bfd, 0x49,
3513 contents + roff - 3);
3514 }
3515 else if (!ABI_64_P (output_bfd) && val == 0x44)
3516 {
3517 if (roff < 3)
3518 goto corrupt_input;
3519 bfd_put_8 (output_bfd, 0x41,
3520 contents + roff - 3);
3521 }
3522 bfd_put_8 (output_bfd, 0xc7,
3523 contents + roff - 2);
3524 bfd_put_8 (output_bfd, 0xc0 | reg,
3525 contents + roff - 1);
3526 }
3527 else if (reg == 4)
3528 {
3529 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3530 is special */
3531 if (val == 0x4c)
3532 {
3533 if (roff < 3)
3534 goto corrupt_input;
3535 bfd_put_8 (output_bfd, 0x49,
3536 contents + roff - 3);
3537 }
3538 else if (!ABI_64_P (output_bfd) && val == 0x44)
3539 {
3540 if (roff < 3)
3541 goto corrupt_input;
3542 bfd_put_8 (output_bfd, 0x41,
3543 contents + roff - 3);
3544 }
3545 bfd_put_8 (output_bfd, 0x81,
3546 contents + roff - 2);
3547 bfd_put_8 (output_bfd, 0xc0 | reg,
3548 contents + roff - 1);
3549 }
3550 else
3551 {
3552 /* addq/addl -> leaq/leal */
3553 if (val == 0x4c)
3554 {
3555 if (roff < 3)
3556 goto corrupt_input;
3557 bfd_put_8 (output_bfd, 0x4d,
3558 contents + roff - 3);
3559 }
3560 else if (!ABI_64_P (output_bfd) && val == 0x44)
3561 {
3562 if (roff < 3)
3563 goto corrupt_input;
3564 bfd_put_8 (output_bfd, 0x45,
3565 contents + roff - 3);
3566 }
3567 bfd_put_8 (output_bfd, 0x8d,
3568 contents + roff - 2);
3569 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3570 contents + roff - 1);
3571 }
3572 bfd_put_32 (output_bfd,
3573 elf_x86_64_tpoff (info, relocation),
3574 contents + roff);
3575 continue;
3576 }
3577 else
3578 BFD_ASSERT (FALSE);
3579 }
3580
3581 if (htab->elf.sgot == NULL)
3582 abort ();
3583
3584 if (h != NULL)
3585 {
3586 off = h->got.offset;
3587 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3588 }
3589 else
3590 {
3591 if (local_got_offsets == NULL)
3592 abort ();
3593
3594 off = local_got_offsets[r_symndx];
3595 offplt = local_tlsdesc_gotents[r_symndx];
3596 }
3597
3598 if ((off & 1) != 0)
3599 off &= ~1;
3600 else
3601 {
3602 Elf_Internal_Rela outrel;
3603 int dr_type, indx;
3604 asection *sreloc;
3605
3606 if (htab->elf.srelgot == NULL)
3607 abort ();
3608
3609 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3610
3611 if (GOT_TLS_GDESC_P (tls_type))
3612 {
3613 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3614 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3615 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3616 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3617 + htab->elf.sgotplt->output_offset
3618 + offplt
3619 + htab->sgotplt_jump_table_size);
3620 sreloc = htab->elf.srelplt;
3621 if (indx == 0)
3622 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3623 else
3624 outrel.r_addend = 0;
3625 elf_append_rela (output_bfd, sreloc, &outrel);
3626 }
3627
3628 sreloc = htab->elf.srelgot;
3629
3630 outrel.r_offset = (htab->elf.sgot->output_section->vma
3631 + htab->elf.sgot->output_offset + off);
3632
3633 if (GOT_TLS_GD_P (tls_type))
3634 dr_type = R_X86_64_DTPMOD64;
3635 else if (GOT_TLS_GDESC_P (tls_type))
3636 goto dr_done;
3637 else
3638 dr_type = R_X86_64_TPOFF64;
3639
3640 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3641 outrel.r_addend = 0;
3642 if ((dr_type == R_X86_64_TPOFF64
3643 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3644 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3645 outrel.r_info = htab->r_info (indx, dr_type);
3646
3647 elf_append_rela (output_bfd, sreloc, &outrel);
3648
3649 if (GOT_TLS_GD_P (tls_type))
3650 {
3651 if (indx == 0)
3652 {
3653 BFD_ASSERT (! unresolved_reloc);
3654 bfd_put_64 (output_bfd,
3655 relocation - _bfd_x86_elf_dtpoff_base (info),
3656 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3657 }
3658 else
3659 {
3660 bfd_put_64 (output_bfd, 0,
3661 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3662 outrel.r_info = htab->r_info (indx,
3663 R_X86_64_DTPOFF64);
3664 outrel.r_offset += GOT_ENTRY_SIZE;
3665 elf_append_rela (output_bfd, sreloc,
3666 &outrel);
3667 }
3668 }
3669
3670 dr_done:
3671 if (h != NULL)
3672 h->got.offset |= 1;
3673 else
3674 local_got_offsets[r_symndx] |= 1;
3675 }
3676
3677 if (off >= (bfd_vma) -2
3678 && ! GOT_TLS_GDESC_P (tls_type))
3679 abort ();
3680 if (r_type_tls == r_type)
3681 {
3682 if (r_type == R_X86_64_GOTPC32_TLSDESC
3683 || r_type == R_X86_64_TLSDESC_CALL)
3684 relocation = htab->elf.sgotplt->output_section->vma
3685 + htab->elf.sgotplt->output_offset
3686 + offplt + htab->sgotplt_jump_table_size;
3687 else
3688 relocation = htab->elf.sgot->output_section->vma
3689 + htab->elf.sgot->output_offset + off;
3690 unresolved_reloc = FALSE;
3691 }
3692 else
3693 {
3694 bfd_vma roff = rel->r_offset;
3695
3696 if (r_type == R_X86_64_TLSGD)
3697 {
3698 /* GD->IE transition. For 64bit, change
3699 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3700 .word 0x6666; rex64; call __tls_get_addr@PLT
3701 or
3702 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3703 .byte 0x66; rex64
3704 call *__tls_get_addr@GOTPCREL(%rip
3705 which may be converted to
3706 addr32 call __tls_get_addr
3707 into:
3708 movq %fs:0, %rax
3709 addq foo@gottpoff(%rip), %rax
3710 For 32bit, change
3711 leaq foo@tlsgd(%rip), %rdi
3712 .word 0x6666; rex64; call __tls_get_addr@PLT
3713 or
3714 leaq foo@tlsgd(%rip), %rdi
3715 .byte 0x66; rex64;
3716 call *__tls_get_addr@GOTPCREL(%rip)
3717 which may be converted to
3718 addr32 call __tls_get_addr
3719 into:
3720 movl %fs:0, %eax
3721 addq foo@gottpoff(%rip), %rax
3722 For largepic, change:
3723 leaq foo@tlsgd(%rip), %rdi
3724 movabsq $__tls_get_addr@pltoff, %rax
3725 addq %r15, %rax
3726 call *%rax
3727 into:
3728 movq %fs:0, %rax
3729 addq foo@gottpoff(%rax), %rax
3730 nopw 0x0(%rax,%rax,1) */
3731 int largepic = 0;
3732 if (ABI_64_P (output_bfd))
3733 {
3734 if (contents[roff + 5] == 0xb8)
3735 {
3736 if (roff < 3
3737 || (roff - 3 + 22) > input_section->size)
3738 goto corrupt_input;
3739 memcpy (contents + roff - 3,
3740 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3741 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3742 largepic = 1;
3743 }
3744 else
3745 {
3746 if (roff < 4
3747 || (roff - 4 + 16) > input_section->size)
3748 goto corrupt_input;
3749 memcpy (contents + roff - 4,
3750 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3751 16);
3752 }
3753 }
3754 else
3755 {
3756 if (roff < 3
3757 || (roff - 3 + 15) > input_section->size)
3758 goto corrupt_input;
3759 memcpy (contents + roff - 3,
3760 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3761 15);
3762 }
3763
3764 relocation = (htab->elf.sgot->output_section->vma
3765 + htab->elf.sgot->output_offset + off
3766 - roff
3767 - largepic
3768 - input_section->output_section->vma
3769 - input_section->output_offset
3770 - 12);
3771 bfd_put_32 (output_bfd, relocation,
3772 contents + roff + 8 + largepic);
3773 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3774 rel++;
3775 wrel++;
3776 continue;
3777 }
3778 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3779 {
3780 /* GDesc -> IE transition.
3781 It's originally something like:
3782 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3783 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3784
3785 Change it to:
3786 # before xchg %ax,%ax in LP64 mode.
3787 movq x@gottpoff(%rip), %rax
3788 # before nopl (%rax) in X32 mode.
3789 rex movl x@gottpoff(%rip), %eax
3790 */
3791
3792 /* Now modify the instruction as appropriate. To
3793 turn a lea into a mov in the form we use it, it
3794 suffices to change the second byte from 0x8d to
3795 0x8b. */
3796 if (roff < 2)
3797 goto corrupt_input;
3798 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3799
3800 bfd_put_32 (output_bfd,
3801 htab->elf.sgot->output_section->vma
3802 + htab->elf.sgot->output_offset + off
3803 - rel->r_offset
3804 - input_section->output_section->vma
3805 - input_section->output_offset
3806 - 4,
3807 contents + roff);
3808 continue;
3809 }
3810 else if (r_type == R_X86_64_TLSDESC_CALL)
3811 {
3812 /* GDesc -> IE transition.
3813 It's originally:
3814 call *(%rax) <--- LP64 mode.
3815 call *(%eax) <--- X32 mode.
3816
3817 Change it to:
3818 xchg %ax, %ax <-- LP64 mode.
3819 nopl (%rax) <-- X32 mode.
3820 */
3821
3822 unsigned int prefix = 0;
3823 if (!ABI_64_P (input_bfd))
3824 {
3825 /* Check for call *x@tlsdesc(%eax). */
3826 if (contents[roff] == 0x67)
3827 prefix = 1;
3828 }
3829 if (prefix)
3830 {
3831 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3832 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3833 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3834 }
3835 else
3836 {
3837 bfd_put_8 (output_bfd, 0x66, contents + roff);
3838 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3839 }
3840 continue;
3841 }
3842 else
3843 BFD_ASSERT (FALSE);
3844 }
3845 break;
3846
3847 case R_X86_64_TLSLD:
3848 if (! elf_x86_64_tls_transition (info, input_bfd,
3849 input_section, contents,
3850 symtab_hdr, sym_hashes,
3851 &r_type, GOT_UNKNOWN, rel,
3852 relend, h, r_symndx, TRUE))
3853 return FALSE;
3854
3855 if (r_type != R_X86_64_TLSLD)
3856 {
3857 /* LD->LE transition:
3858 leaq foo@tlsld(%rip), %rdi
3859 call __tls_get_addr@PLT
3860 For 64bit, we change it into:
3861 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3862 For 32bit, we change it into:
3863 nopl 0x0(%rax); movl %fs:0, %eax
3864 Or
3865 leaq foo@tlsld(%rip), %rdi;
3866 call *__tls_get_addr@GOTPCREL(%rip)
3867 which may be converted to
3868 addr32 call __tls_get_addr
3869 For 64bit, we change it into:
3870 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3871 For 32bit, we change it into:
3872 nopw 0x0(%rax); movl %fs:0, %eax
3873 For largepic, change:
3874 leaq foo@tlsgd(%rip), %rdi
3875 movabsq $__tls_get_addr@pltoff, %rax
3876 addq %rbx, %rax
3877 call *%rax
3878 into
3879 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3880 movq %fs:0, %eax */
3881
3882 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3883 if (ABI_64_P (output_bfd))
3884 {
3885 if ((rel->r_offset + 5) >= input_section->size)
3886 goto corrupt_input;
3887 if (contents[rel->r_offset + 5] == 0xb8)
3888 {
3889 if (rel->r_offset < 3
3890 || (rel->r_offset - 3 + 22) > input_section->size)
3891 goto corrupt_input;
3892 memcpy (contents + rel->r_offset - 3,
3893 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3894 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3895 }
3896 else if (contents[rel->r_offset + 4] == 0xff
3897 || contents[rel->r_offset + 4] == 0x67)
3898 {
3899 if (rel->r_offset < 3
3900 || (rel->r_offset - 3 + 13) > input_section->size)
3901 goto corrupt_input;
3902 memcpy (contents + rel->r_offset - 3,
3903 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3904 13);
3905
3906 }
3907 else
3908 {
3909 if (rel->r_offset < 3
3910 || (rel->r_offset - 3 + 12) > input_section->size)
3911 goto corrupt_input;
3912 memcpy (contents + rel->r_offset - 3,
3913 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3914 }
3915 }
3916 else
3917 {
3918 if ((rel->r_offset + 4) >= input_section->size)
3919 goto corrupt_input;
3920 if (contents[rel->r_offset + 4] == 0xff)
3921 {
3922 if (rel->r_offset < 3
3923 || (rel->r_offset - 3 + 13) > input_section->size)
3924 goto corrupt_input;
3925 memcpy (contents + rel->r_offset - 3,
3926 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3927 13);
3928 }
3929 else
3930 {
3931 if (rel->r_offset < 3
3932 || (rel->r_offset - 3 + 12) > input_section->size)
3933 goto corrupt_input;
3934 memcpy (contents + rel->r_offset - 3,
3935 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3936 }
3937 }
3938 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3939 and R_X86_64_PLTOFF64. */
3940 rel++;
3941 wrel++;
3942 continue;
3943 }
3944
3945 if (htab->elf.sgot == NULL)
3946 abort ();
3947
3948 off = htab->tls_ld_or_ldm_got.offset;
3949 if (off & 1)
3950 off &= ~1;
3951 else
3952 {
3953 Elf_Internal_Rela outrel;
3954
3955 if (htab->elf.srelgot == NULL)
3956 abort ();
3957
3958 outrel.r_offset = (htab->elf.sgot->output_section->vma
3959 + htab->elf.sgot->output_offset + off);
3960
3961 bfd_put_64 (output_bfd, 0,
3962 htab->elf.sgot->contents + off);
3963 bfd_put_64 (output_bfd, 0,
3964 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3965 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3966 outrel.r_addend = 0;
3967 elf_append_rela (output_bfd, htab->elf.srelgot,
3968 &outrel);
3969 htab->tls_ld_or_ldm_got.offset |= 1;
3970 }
3971 relocation = htab->elf.sgot->output_section->vma
3972 + htab->elf.sgot->output_offset + off;
3973 unresolved_reloc = FALSE;
3974 break;
3975
3976 case R_X86_64_DTPOFF32:
3977 if (!bfd_link_executable (info)
3978 || (input_section->flags & SEC_CODE) == 0)
3979 relocation -= _bfd_x86_elf_dtpoff_base (info);
3980 else
3981 relocation = elf_x86_64_tpoff (info, relocation);
3982 break;
3983
3984 case R_X86_64_TPOFF32:
3985 case R_X86_64_TPOFF64:
3986 BFD_ASSERT (bfd_link_executable (info));
3987 relocation = elf_x86_64_tpoff (info, relocation);
3988 break;
3989
3990 case R_X86_64_DTPOFF64:
3991 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3992 relocation -= _bfd_x86_elf_dtpoff_base (info);
3993 break;
3994
3995 default:
3996 break;
3997 }
3998
3999 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4000 because such sections are not SEC_ALLOC and thus ld.so will
4001 not process them. */
4002 if (unresolved_reloc
4003 && !((input_section->flags & SEC_DEBUGGING) != 0
4004 && h->def_dynamic)
4005 && _bfd_elf_section_offset (output_bfd, info, input_section,
4006 rel->r_offset) != (bfd_vma) -1)
4007 {
4008 switch (r_type)
4009 {
4010 case R_X86_64_32S:
4011 sec = h->root.u.def.section;
4012 if ((info->nocopyreloc
4013 || (eh->def_protected
4014 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4015 && !(h->root.u.def.section->flags & SEC_CODE))
4016 return elf_x86_64_need_pic (info, input_bfd, input_section,
4017 h, NULL, NULL, howto);
4018 /* Fall through. */
4019
4020 default:
4021 _bfd_error_handler
4022 /* xgettext:c-format */
4023 (_("%pB(%pA+%#" PRIx64 "): "
4024 "unresolvable %s relocation against symbol `%s'"),
4025 input_bfd,
4026 input_section,
4027 (uint64_t) rel->r_offset,
4028 howto->name,
4029 h->root.root.string);
4030 return FALSE;
4031 }
4032 }
4033
4034 do_relocation:
4035 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4036 contents, rel->r_offset,
4037 relocation, rel->r_addend);
4038
4039 check_relocation_error:
4040 if (r != bfd_reloc_ok)
4041 {
4042 const char *name;
4043
4044 if (h != NULL)
4045 name = h->root.root.string;
4046 else
4047 {
4048 name = bfd_elf_string_from_elf_section (input_bfd,
4049 symtab_hdr->sh_link,
4050 sym->st_name);
4051 if (name == NULL)
4052 return FALSE;
4053 if (*name == '\0')
4054 name = bfd_section_name (sec);
4055 }
4056
4057 if (r == bfd_reloc_overflow)
4058 {
4059 if (converted_reloc)
4060 {
4061 info->callbacks->einfo
4062 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4063 return FALSE;
4064 }
4065 (*info->callbacks->reloc_overflow)
4066 (info, (h ? &h->root : NULL), name, howto->name,
4067 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4068 }
4069 else
4070 {
4071 _bfd_error_handler
4072 /* xgettext:c-format */
4073 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4074 input_bfd, input_section,
4075 (uint64_t) rel->r_offset, name, (int) r);
4076 return FALSE;
4077 }
4078 }
4079
4080 if (wrel != rel)
4081 *wrel = *rel;
4082 }
4083
4084 if (wrel != rel)
4085 {
4086 Elf_Internal_Shdr *rel_hdr;
4087 size_t deleted = rel - wrel;
4088
4089 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4090 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4091 if (rel_hdr->sh_size == 0)
4092 {
4093 /* It is too late to remove an empty reloc section. Leave
4094 one NONE reloc.
4095 ??? What is wrong with an empty section??? */
4096 rel_hdr->sh_size = rel_hdr->sh_entsize;
4097 deleted -= 1;
4098 }
4099 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4100 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4101 input_section->reloc_count -= deleted;
4102 }
4103
4104 return TRUE;
4105 }
4106
4107 /* Finish up dynamic symbol handling. We set the contents of various
4108 dynamic sections here. */
4109
4110 static bfd_boolean
4111 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4112 struct bfd_link_info *info,
4113 struct elf_link_hash_entry *h,
4114 Elf_Internal_Sym *sym)
4115 {
4116 struct elf_x86_link_hash_table *htab;
4117 bfd_boolean use_plt_second;
4118 struct elf_x86_link_hash_entry *eh;
4119 bfd_boolean local_undefweak;
4120
4121 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4122 if (htab == NULL)
4123 return FALSE;
4124
4125 /* Use the second PLT section only if there is .plt section. */
4126 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4127
4128 eh = (struct elf_x86_link_hash_entry *) h;
4129 if (eh->no_finish_dynamic_symbol)
4130 abort ();
4131
4132 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4133 resolved undefined weak symbols in executable so that their
4134 references have value 0 at run-time. */
4135 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4136
4137 if (h->plt.offset != (bfd_vma) -1)
4138 {
4139 bfd_vma plt_index;
4140 bfd_vma got_offset, plt_offset;
4141 Elf_Internal_Rela rela;
4142 bfd_byte *loc;
4143 asection *plt, *gotplt, *relplt, *resolved_plt;
4144 const struct elf_backend_data *bed;
4145 bfd_vma plt_got_pcrel_offset;
4146
4147 /* When building a static executable, use .iplt, .igot.plt and
4148 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4149 if (htab->elf.splt != NULL)
4150 {
4151 plt = htab->elf.splt;
4152 gotplt = htab->elf.sgotplt;
4153 relplt = htab->elf.srelplt;
4154 }
4155 else
4156 {
4157 plt = htab->elf.iplt;
4158 gotplt = htab->elf.igotplt;
4159 relplt = htab->elf.irelplt;
4160 }
4161
4162 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4163
4164 /* Get the index in the procedure linkage table which
4165 corresponds to this symbol. This is the index of this symbol
4166 in all the symbols for which we are making plt entries. The
4167 first entry in the procedure linkage table is reserved.
4168
4169 Get the offset into the .got table of the entry that
4170 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4171 bytes. The first three are reserved for the dynamic linker.
4172
4173 For static executables, we don't reserve anything. */
4174
4175 if (plt == htab->elf.splt)
4176 {
4177 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4178 - htab->plt.has_plt0);
4179 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4180 }
4181 else
4182 {
4183 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4184 got_offset = got_offset * GOT_ENTRY_SIZE;
4185 }
4186
4187 /* Fill in the entry in the procedure linkage table. */
4188 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4189 htab->plt.plt_entry_size);
4190 if (use_plt_second)
4191 {
4192 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4193 htab->non_lazy_plt->plt_entry,
4194 htab->non_lazy_plt->plt_entry_size);
4195
4196 resolved_plt = htab->plt_second;
4197 plt_offset = eh->plt_second.offset;
4198 }
4199 else
4200 {
4201 resolved_plt = plt;
4202 plt_offset = h->plt.offset;
4203 }
4204
4205 /* Insert the relocation positions of the plt section. */
4206
4207 /* Put offset the PC-relative instruction referring to the GOT entry,
4208 subtracting the size of that instruction. */
4209 plt_got_pcrel_offset = (gotplt->output_section->vma
4210 + gotplt->output_offset
4211 + got_offset
4212 - resolved_plt->output_section->vma
4213 - resolved_plt->output_offset
4214 - plt_offset
4215 - htab->plt.plt_got_insn_size);
4216
4217 /* Check PC-relative offset overflow in PLT entry. */
4218 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4219 /* xgettext:c-format */
4220 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4221 output_bfd, h->root.root.string);
4222
4223 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4224 (resolved_plt->contents + plt_offset
4225 + htab->plt.plt_got_offset));
4226
4227 /* Fill in the entry in the global offset table, initially this
4228 points to the second part of the PLT entry. Leave the entry
4229 as zero for undefined weak symbol in PIE. No PLT relocation
4230 against undefined weak symbol in PIE. */
4231 if (!local_undefweak)
4232 {
4233 if (htab->plt.has_plt0)
4234 bfd_put_64 (output_bfd, (plt->output_section->vma
4235 + plt->output_offset
4236 + h->plt.offset
4237 + htab->lazy_plt->plt_lazy_offset),
4238 gotplt->contents + got_offset);
4239
4240 /* Fill in the entry in the .rela.plt section. */
4241 rela.r_offset = (gotplt->output_section->vma
4242 + gotplt->output_offset
4243 + got_offset);
4244 if (PLT_LOCAL_IFUNC_P (info, h))
4245 {
4246 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4247 h->root.root.string,
4248 h->root.u.def.section->owner);
4249
4250 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4251 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4252 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4253 rela.r_addend = (h->root.u.def.value
4254 + h->root.u.def.section->output_section->vma
4255 + h->root.u.def.section->output_offset);
4256 /* R_X86_64_IRELATIVE comes last. */
4257 plt_index = htab->next_irelative_index--;
4258 }
4259 else
4260 {
4261 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4262 rela.r_addend = 0;
4263 plt_index = htab->next_jump_slot_index++;
4264 }
4265
4266 /* Don't fill the second and third slots in PLT entry for
4267 static executables nor without PLT0. */
4268 if (plt == htab->elf.splt && htab->plt.has_plt0)
4269 {
4270 bfd_vma plt0_offset
4271 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4272
4273 /* Put relocation index. */
4274 bfd_put_32 (output_bfd, plt_index,
4275 (plt->contents + h->plt.offset
4276 + htab->lazy_plt->plt_reloc_offset));
4277
4278 /* Put offset for jmp .PLT0 and check for overflow. We don't
4279 check relocation index for overflow since branch displacement
4280 will overflow first. */
4281 if (plt0_offset > 0x80000000)
4282 /* xgettext:c-format */
4283 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4284 output_bfd, h->root.root.string);
4285 bfd_put_32 (output_bfd, - plt0_offset,
4286 (plt->contents + h->plt.offset
4287 + htab->lazy_plt->plt_plt_offset));
4288 }
4289
4290 bed = get_elf_backend_data (output_bfd);
4291 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4292 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4293 }
4294 }
4295 else if (eh->plt_got.offset != (bfd_vma) -1)
4296 {
4297 bfd_vma got_offset, plt_offset;
4298 asection *plt, *got;
4299 bfd_boolean got_after_plt;
4300 int32_t got_pcrel_offset;
4301
4302 /* Set the entry in the GOT procedure linkage table. */
4303 plt = htab->plt_got;
4304 got = htab->elf.sgot;
4305 got_offset = h->got.offset;
4306
4307 if (got_offset == (bfd_vma) -1
4308 || (h->type == STT_GNU_IFUNC && h->def_regular)
4309 || plt == NULL
4310 || got == NULL)
4311 abort ();
4312
4313 /* Use the non-lazy PLT entry template for the GOT PLT since they
4314 are the identical. */
4315 /* Fill in the entry in the GOT procedure linkage table. */
4316 plt_offset = eh->plt_got.offset;
4317 memcpy (plt->contents + plt_offset,
4318 htab->non_lazy_plt->plt_entry,
4319 htab->non_lazy_plt->plt_entry_size);
4320
4321 /* Put offset the PC-relative instruction referring to the GOT
4322 entry, subtracting the size of that instruction. */
4323 got_pcrel_offset = (got->output_section->vma
4324 + got->output_offset
4325 + got_offset
4326 - plt->output_section->vma
4327 - plt->output_offset
4328 - plt_offset
4329 - htab->non_lazy_plt->plt_got_insn_size);
4330
4331 /* Check PC-relative offset overflow in GOT PLT entry. */
4332 got_after_plt = got->output_section->vma > plt->output_section->vma;
4333 if ((got_after_plt && got_pcrel_offset < 0)
4334 || (!got_after_plt && got_pcrel_offset > 0))
4335 /* xgettext:c-format */
4336 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4337 output_bfd, h->root.root.string);
4338
4339 bfd_put_32 (output_bfd, got_pcrel_offset,
4340 (plt->contents + plt_offset
4341 + htab->non_lazy_plt->plt_got_offset));
4342 }
4343
4344 if (!local_undefweak
4345 && !h->def_regular
4346 && (h->plt.offset != (bfd_vma) -1
4347 || eh->plt_got.offset != (bfd_vma) -1))
4348 {
4349 /* Mark the symbol as undefined, rather than as defined in
4350 the .plt section. Leave the value if there were any
4351 relocations where pointer equality matters (this is a clue
4352 for the dynamic linker, to make function pointer
4353 comparisons work between an application and shared
4354 library), otherwise set it to zero. If a function is only
4355 called from a binary, there is no need to slow down
4356 shared libraries because of that. */
4357 sym->st_shndx = SHN_UNDEF;
4358 if (!h->pointer_equality_needed)
4359 sym->st_value = 0;
4360 }
4361
4362 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4363
4364 /* Don't generate dynamic GOT relocation against undefined weak
4365 symbol in executable. */
4366 if (h->got.offset != (bfd_vma) -1
4367 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4368 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4369 && !local_undefweak)
4370 {
4371 Elf_Internal_Rela rela;
4372 asection *relgot = htab->elf.srelgot;
4373
4374 /* This symbol has an entry in the global offset table. Set it
4375 up. */
4376 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4377 abort ();
4378
4379 rela.r_offset = (htab->elf.sgot->output_section->vma
4380 + htab->elf.sgot->output_offset
4381 + (h->got.offset &~ (bfd_vma) 1));
4382
4383 /* If this is a static link, or it is a -Bsymbolic link and the
4384 symbol is defined locally or was forced to be local because
4385 of a version file, we just want to emit a RELATIVE reloc.
4386 The entry in the global offset table will already have been
4387 initialized in the relocate_section function. */
4388 if (h->def_regular
4389 && h->type == STT_GNU_IFUNC)
4390 {
4391 if (h->plt.offset == (bfd_vma) -1)
4392 {
4393 /* STT_GNU_IFUNC is referenced without PLT. */
4394 if (htab->elf.splt == NULL)
4395 {
4396 /* use .rel[a].iplt section to store .got relocations
4397 in static executable. */
4398 relgot = htab->elf.irelplt;
4399 }
4400 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4401 {
4402 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4403 h->root.root.string,
4404 h->root.u.def.section->owner);
4405
4406 rela.r_info = htab->r_info (0,
4407 R_X86_64_IRELATIVE);
4408 rela.r_addend = (h->root.u.def.value
4409 + h->root.u.def.section->output_section->vma
4410 + h->root.u.def.section->output_offset);
4411 }
4412 else
4413 goto do_glob_dat;
4414 }
4415 else if (bfd_link_pic (info))
4416 {
4417 /* Generate R_X86_64_GLOB_DAT. */
4418 goto do_glob_dat;
4419 }
4420 else
4421 {
4422 asection *plt;
4423 bfd_vma plt_offset;
4424
4425 if (!h->pointer_equality_needed)
4426 abort ();
4427
4428 /* For non-shared object, we can't use .got.plt, which
4429 contains the real function addres if we need pointer
4430 equality. We load the GOT entry with the PLT entry. */
4431 if (htab->plt_second != NULL)
4432 {
4433 plt = htab->plt_second;
4434 plt_offset = eh->plt_second.offset;
4435 }
4436 else
4437 {
4438 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4439 plt_offset = h->plt.offset;
4440 }
4441 bfd_put_64 (output_bfd, (plt->output_section->vma
4442 + plt->output_offset
4443 + plt_offset),
4444 htab->elf.sgot->contents + h->got.offset);
4445 return TRUE;
4446 }
4447 }
4448 else if (bfd_link_pic (info)
4449 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4450 {
4451 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4452 return FALSE;
4453 BFD_ASSERT((h->got.offset & 1) != 0);
4454 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4455 rela.r_addend = (h->root.u.def.value
4456 + h->root.u.def.section->output_section->vma
4457 + h->root.u.def.section->output_offset);
4458 }
4459 else
4460 {
4461 BFD_ASSERT((h->got.offset & 1) == 0);
4462 do_glob_dat:
4463 bfd_put_64 (output_bfd, (bfd_vma) 0,
4464 htab->elf.sgot->contents + h->got.offset);
4465 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4466 rela.r_addend = 0;
4467 }
4468
4469 elf_append_rela (output_bfd, relgot, &rela);
4470 }
4471
4472 if (h->needs_copy)
4473 {
4474 Elf_Internal_Rela rela;
4475 asection *s;
4476
4477 /* This symbol needs a copy reloc. Set it up. */
4478 VERIFY_COPY_RELOC (h, htab)
4479
4480 rela.r_offset = (h->root.u.def.value
4481 + h->root.u.def.section->output_section->vma
4482 + h->root.u.def.section->output_offset);
4483 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4484 rela.r_addend = 0;
4485 if (h->root.u.def.section == htab->elf.sdynrelro)
4486 s = htab->elf.sreldynrelro;
4487 else
4488 s = htab->elf.srelbss;
4489 elf_append_rela (output_bfd, s, &rela);
4490 }
4491
4492 return TRUE;
4493 }
4494
4495 /* Finish up local dynamic symbol handling. We set the contents of
4496 various dynamic sections here. */
4497
4498 static bfd_boolean
4499 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4500 {
4501 struct elf_link_hash_entry *h
4502 = (struct elf_link_hash_entry *) *slot;
4503 struct bfd_link_info *info
4504 = (struct bfd_link_info *) inf;
4505
4506 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4507 info, h, NULL);
4508 }
4509
4510 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4511 here since undefined weak symbol may not be dynamic and may not be
4512 called for elf_x86_64_finish_dynamic_symbol. */
4513
4514 static bfd_boolean
4515 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4516 void *inf)
4517 {
4518 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4519 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4520
4521 if (h->root.type != bfd_link_hash_undefweak
4522 || h->dynindx != -1)
4523 return TRUE;
4524
4525 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4526 info, h, NULL);
4527 }
4528
4529 /* Used to decide how to sort relocs in an optimal manner for the
4530 dynamic linker, before writing them out. */
4531
4532 static enum elf_reloc_type_class
4533 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4534 const asection *rel_sec ATTRIBUTE_UNUSED,
4535 const Elf_Internal_Rela *rela)
4536 {
4537 bfd *abfd = info->output_bfd;
4538 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4539 struct elf_x86_link_hash_table *htab
4540 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4541
4542 if (htab->elf.dynsym != NULL
4543 && htab->elf.dynsym->contents != NULL)
4544 {
4545 /* Check relocation against STT_GNU_IFUNC symbol if there are
4546 dynamic symbols. */
4547 unsigned long r_symndx = htab->r_sym (rela->r_info);
4548 if (r_symndx != STN_UNDEF)
4549 {
4550 Elf_Internal_Sym sym;
4551 if (!bed->s->swap_symbol_in (abfd,
4552 (htab->elf.dynsym->contents
4553 + r_symndx * bed->s->sizeof_sym),
4554 0, &sym))
4555 abort ();
4556
4557 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4558 return reloc_class_ifunc;
4559 }
4560 }
4561
4562 switch ((int) ELF32_R_TYPE (rela->r_info))
4563 {
4564 case R_X86_64_IRELATIVE:
4565 return reloc_class_ifunc;
4566 case R_X86_64_RELATIVE:
4567 case R_X86_64_RELATIVE64:
4568 return reloc_class_relative;
4569 case R_X86_64_JUMP_SLOT:
4570 return reloc_class_plt;
4571 case R_X86_64_COPY:
4572 return reloc_class_copy;
4573 default:
4574 return reloc_class_normal;
4575 }
4576 }
4577
4578 /* Finish up the dynamic sections. */
4579
4580 static bfd_boolean
4581 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4582 struct bfd_link_info *info)
4583 {
4584 struct elf_x86_link_hash_table *htab;
4585
4586 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4587 if (htab == NULL)
4588 return FALSE;
4589
4590 if (! htab->elf.dynamic_sections_created)
4591 return TRUE;
4592
4593 if (htab->elf.splt && htab->elf.splt->size > 0)
4594 {
4595 elf_section_data (htab->elf.splt->output_section)
4596 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4597
4598 if (htab->plt.has_plt0)
4599 {
4600 /* Fill in the special first entry in the procedure linkage
4601 table. */
4602 memcpy (htab->elf.splt->contents,
4603 htab->lazy_plt->plt0_entry,
4604 htab->lazy_plt->plt0_entry_size);
4605 /* Add offset for pushq GOT+8(%rip), since the instruction
4606 uses 6 bytes subtract this value. */
4607 bfd_put_32 (output_bfd,
4608 (htab->elf.sgotplt->output_section->vma
4609 + htab->elf.sgotplt->output_offset
4610 + 8
4611 - htab->elf.splt->output_section->vma
4612 - htab->elf.splt->output_offset
4613 - 6),
4614 (htab->elf.splt->contents
4615 + htab->lazy_plt->plt0_got1_offset));
4616 /* Add offset for the PC-relative instruction accessing
4617 GOT+16, subtracting the offset to the end of that
4618 instruction. */
4619 bfd_put_32 (output_bfd,
4620 (htab->elf.sgotplt->output_section->vma
4621 + htab->elf.sgotplt->output_offset
4622 + 16
4623 - htab->elf.splt->output_section->vma
4624 - htab->elf.splt->output_offset
4625 - htab->lazy_plt->plt0_got2_insn_end),
4626 (htab->elf.splt->contents
4627 + htab->lazy_plt->plt0_got2_offset));
4628 }
4629
4630 if (htab->tlsdesc_plt)
4631 {
4632 bfd_put_64 (output_bfd, (bfd_vma) 0,
4633 htab->elf.sgot->contents + htab->tlsdesc_got);
4634
4635 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4636 htab->lazy_plt->plt_tlsdesc_entry,
4637 htab->lazy_plt->plt_tlsdesc_entry_size);
4638
4639 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4640 bytes and the instruction uses 6 bytes, subtract these
4641 values. */
4642 bfd_put_32 (output_bfd,
4643 (htab->elf.sgotplt->output_section->vma
4644 + htab->elf.sgotplt->output_offset
4645 + 8
4646 - htab->elf.splt->output_section->vma
4647 - htab->elf.splt->output_offset
4648 - htab->tlsdesc_plt
4649 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4650 (htab->elf.splt->contents
4651 + htab->tlsdesc_plt
4652 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4653 /* Add offset for indirect branch via GOT+TDG, where TDG
4654 stands for htab->tlsdesc_got, subtracting the offset
4655 to the end of that instruction. */
4656 bfd_put_32 (output_bfd,
4657 (htab->elf.sgot->output_section->vma
4658 + htab->elf.sgot->output_offset
4659 + htab->tlsdesc_got
4660 - htab->elf.splt->output_section->vma
4661 - htab->elf.splt->output_offset
4662 - htab->tlsdesc_plt
4663 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4664 (htab->elf.splt->contents
4665 + htab->tlsdesc_plt
4666 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4667 }
4668 }
4669
4670 /* Fill PLT entries for undefined weak symbols in PIE. */
4671 if (bfd_link_pie (info))
4672 bfd_hash_traverse (&info->hash->table,
4673 elf_x86_64_pie_finish_undefweak_symbol,
4674 info);
4675
4676 return TRUE;
4677 }
4678
4679 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4680 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4681 It has to be done before elf_link_sort_relocs is called so that
4682 dynamic relocations are properly sorted. */
4683
4684 static bfd_boolean
4685 elf_x86_64_output_arch_local_syms
4686 (bfd *output_bfd ATTRIBUTE_UNUSED,
4687 struct bfd_link_info *info,
4688 void *flaginfo ATTRIBUTE_UNUSED,
4689 int (*func) (void *, const char *,
4690 Elf_Internal_Sym *,
4691 asection *,
4692 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4693 {
4694 struct elf_x86_link_hash_table *htab
4695 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4696 if (htab == NULL)
4697 return FALSE;
4698
4699 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4700 htab_traverse (htab->loc_hash_table,
4701 elf_x86_64_finish_local_dynamic_symbol,
4702 info);
4703
4704 return TRUE;
4705 }
4706
4707 /* Forward declaration. */
4708 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4709
4710 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4711 dynamic relocations. */
4712
4713 static long
4714 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4715 long symcount ATTRIBUTE_UNUSED,
4716 asymbol **syms ATTRIBUTE_UNUSED,
4717 long dynsymcount,
4718 asymbol **dynsyms,
4719 asymbol **ret)
4720 {
4721 long count, i, n;
4722 int j;
4723 bfd_byte *plt_contents;
4724 long relsize;
4725 const struct elf_x86_lazy_plt_layout *lazy_plt;
4726 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4727 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4728 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4729 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4730 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4731 asection *plt;
4732 enum elf_x86_plt_type plt_type;
4733 struct elf_x86_plt plts[] =
4734 {
4735 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4736 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4737 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4738 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4739 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4740 };
4741
4742 *ret = NULL;
4743
4744 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4745 return 0;
4746
4747 if (dynsymcount <= 0)
4748 return 0;
4749
4750 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4751 if (relsize <= 0)
4752 return -1;
4753
4754 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4755 {
4756 lazy_plt = &elf_x86_64_lazy_plt;
4757 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4758 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4759 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4760 if (ABI_64_P (abfd))
4761 {
4762 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4763 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4764 }
4765 else
4766 {
4767 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4768 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4769 }
4770 }
4771 else
4772 {
4773 lazy_plt = &elf_x86_64_nacl_plt;
4774 non_lazy_plt = NULL;
4775 lazy_bnd_plt = NULL;
4776 non_lazy_bnd_plt = NULL;
4777 lazy_ibt_plt = NULL;
4778 non_lazy_ibt_plt = NULL;
4779 }
4780
4781 count = 0;
4782 for (j = 0; plts[j].name != NULL; j++)
4783 {
4784 plt = bfd_get_section_by_name (abfd, plts[j].name);
4785 if (plt == NULL || plt->size == 0)
4786 continue;
4787
4788 /* Get the PLT section contents. */
4789 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4790 if (plt_contents == NULL)
4791 break;
4792 if (!bfd_get_section_contents (abfd, (asection *) plt,
4793 plt_contents, 0, plt->size))
4794 {
4795 free (plt_contents);
4796 break;
4797 }
4798
4799 /* Check what kind of PLT it is. */
4800 plt_type = plt_unknown;
4801 if (plts[j].type == plt_unknown
4802 && (plt->size >= (lazy_plt->plt_entry_size
4803 + lazy_plt->plt_entry_size)))
4804 {
4805 /* Match lazy PLT first. Need to check the first two
4806 instructions. */
4807 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4808 lazy_plt->plt0_got1_offset) == 0)
4809 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4810 2) == 0))
4811 plt_type = plt_lazy;
4812 else if (lazy_bnd_plt != NULL
4813 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4814 lazy_bnd_plt->plt0_got1_offset) == 0)
4815 && (memcmp (plt_contents + 6,
4816 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4817 {
4818 plt_type = plt_lazy | plt_second;
4819 /* The fist entry in the lazy IBT PLT is the same as the
4820 lazy BND PLT. */
4821 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4822 lazy_ibt_plt->plt_entry,
4823 lazy_ibt_plt->plt_got_offset) == 0))
4824 lazy_plt = lazy_ibt_plt;
4825 else
4826 lazy_plt = lazy_bnd_plt;
4827 }
4828 }
4829
4830 if (non_lazy_plt != NULL
4831 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4832 && plt->size >= non_lazy_plt->plt_entry_size)
4833 {
4834 /* Match non-lazy PLT. */
4835 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4836 non_lazy_plt->plt_got_offset) == 0)
4837 plt_type = plt_non_lazy;
4838 }
4839
4840 if (plt_type == plt_unknown || plt_type == plt_second)
4841 {
4842 if (non_lazy_bnd_plt != NULL
4843 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4844 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4845 non_lazy_bnd_plt->plt_got_offset) == 0))
4846 {
4847 /* Match BND PLT. */
4848 plt_type = plt_second;
4849 non_lazy_plt = non_lazy_bnd_plt;
4850 }
4851 else if (non_lazy_ibt_plt != NULL
4852 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4853 && (memcmp (plt_contents,
4854 non_lazy_ibt_plt->plt_entry,
4855 non_lazy_ibt_plt->plt_got_offset) == 0))
4856 {
4857 /* Match IBT PLT. */
4858 plt_type = plt_second;
4859 non_lazy_plt = non_lazy_ibt_plt;
4860 }
4861 }
4862
4863 if (plt_type == plt_unknown)
4864 {
4865 free (plt_contents);
4866 continue;
4867 }
4868
4869 plts[j].sec = plt;
4870 plts[j].type = plt_type;
4871
4872 if ((plt_type & plt_lazy))
4873 {
4874 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4875 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4876 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4877 /* Skip PLT0 in lazy PLT. */
4878 i = 1;
4879 }
4880 else
4881 {
4882 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4883 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4884 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4885 i = 0;
4886 }
4887
4888 /* Skip lazy PLT when the second PLT is used. */
4889 if (plt_type == (plt_lazy | plt_second))
4890 plts[j].count = 0;
4891 else
4892 {
4893 n = plt->size / plts[j].plt_entry_size;
4894 plts[j].count = n;
4895 count += n - i;
4896 }
4897
4898 plts[j].contents = plt_contents;
4899 }
4900
4901 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4902 (bfd_vma) 0, plts, dynsyms,
4903 ret);
4904 }
4905
4906 /* Handle an x86-64 specific section when reading an object file. This
4907 is called when elfcode.h finds a section with an unknown type. */
4908
4909 static bfd_boolean
4910 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4911 const char *name, int shindex)
4912 {
4913 if (hdr->sh_type != SHT_X86_64_UNWIND)
4914 return FALSE;
4915
4916 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4917 return FALSE;
4918
4919 return TRUE;
4920 }
4921
4922 /* Hook called by the linker routine which adds symbols from an object
4923 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4924 of .bss. */
4925
4926 static bfd_boolean
4927 elf_x86_64_add_symbol_hook (bfd *abfd,
4928 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4929 Elf_Internal_Sym *sym,
4930 const char **namep ATTRIBUTE_UNUSED,
4931 flagword *flagsp ATTRIBUTE_UNUSED,
4932 asection **secp,
4933 bfd_vma *valp)
4934 {
4935 asection *lcomm;
4936
4937 switch (sym->st_shndx)
4938 {
4939 case SHN_X86_64_LCOMMON:
4940 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4941 if (lcomm == NULL)
4942 {
4943 lcomm = bfd_make_section_with_flags (abfd,
4944 "LARGE_COMMON",
4945 (SEC_ALLOC
4946 | SEC_IS_COMMON
4947 | SEC_LINKER_CREATED));
4948 if (lcomm == NULL)
4949 return FALSE;
4950 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4951 }
4952 *secp = lcomm;
4953 *valp = sym->st_size;
4954 return TRUE;
4955 }
4956
4957 return TRUE;
4958 }
4959
4960
4961 /* Given a BFD section, try to locate the corresponding ELF section
4962 index. */
4963
4964 static bfd_boolean
4965 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4966 asection *sec, int *index_return)
4967 {
4968 if (sec == &_bfd_elf_large_com_section)
4969 {
4970 *index_return = SHN_X86_64_LCOMMON;
4971 return TRUE;
4972 }
4973 return FALSE;
4974 }
4975
4976 /* Process a symbol. */
4977
4978 static void
4979 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4980 asymbol *asym)
4981 {
4982 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4983
4984 switch (elfsym->internal_elf_sym.st_shndx)
4985 {
4986 case SHN_X86_64_LCOMMON:
4987 asym->section = &_bfd_elf_large_com_section;
4988 asym->value = elfsym->internal_elf_sym.st_size;
4989 /* Common symbol doesn't set BSF_GLOBAL. */
4990 asym->flags &= ~BSF_GLOBAL;
4991 break;
4992 }
4993 }
4994
4995 static bfd_boolean
4996 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4997 {
4998 return (sym->st_shndx == SHN_COMMON
4999 || sym->st_shndx == SHN_X86_64_LCOMMON);
5000 }
5001
5002 static unsigned int
5003 elf_x86_64_common_section_index (asection *sec)
5004 {
5005 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5006 return SHN_COMMON;
5007 else
5008 return SHN_X86_64_LCOMMON;
5009 }
5010
5011 static asection *
5012 elf_x86_64_common_section (asection *sec)
5013 {
5014 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5015 return bfd_com_section_ptr;
5016 else
5017 return &_bfd_elf_large_com_section;
5018 }
5019
5020 static bfd_boolean
5021 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5022 const Elf_Internal_Sym *sym,
5023 asection **psec,
5024 bfd_boolean newdef,
5025 bfd_boolean olddef,
5026 bfd *oldbfd,
5027 const asection *oldsec)
5028 {
5029 /* A normal common symbol and a large common symbol result in a
5030 normal common symbol. We turn the large common symbol into a
5031 normal one. */
5032 if (!olddef
5033 && h->root.type == bfd_link_hash_common
5034 && !newdef
5035 && bfd_is_com_section (*psec)
5036 && oldsec != *psec)
5037 {
5038 if (sym->st_shndx == SHN_COMMON
5039 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5040 {
5041 h->root.u.c.p->section
5042 = bfd_make_section_old_way (oldbfd, "COMMON");
5043 h->root.u.c.p->section->flags = SEC_ALLOC;
5044 }
5045 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5046 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5047 *psec = bfd_com_section_ptr;
5048 }
5049
5050 return TRUE;
5051 }
5052
5053 static int
5054 elf_x86_64_additional_program_headers (bfd *abfd,
5055 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5056 {
5057 asection *s;
5058 int count = 0;
5059
5060 /* Check to see if we need a large readonly segment. */
5061 s = bfd_get_section_by_name (abfd, ".lrodata");
5062 if (s && (s->flags & SEC_LOAD))
5063 count++;
5064
5065 /* Check to see if we need a large data segment. Since .lbss sections
5066 is placed right after the .bss section, there should be no need for
5067 a large data segment just because of .lbss. */
5068 s = bfd_get_section_by_name (abfd, ".ldata");
5069 if (s && (s->flags & SEC_LOAD))
5070 count++;
5071
5072 return count;
5073 }
5074
5075 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5076
5077 static bfd_boolean
5078 elf_x86_64_relocs_compatible (const bfd_target *input,
5079 const bfd_target *output)
5080 {
5081 return ((xvec_get_elf_backend_data (input)->s->elfclass
5082 == xvec_get_elf_backend_data (output)->s->elfclass)
5083 && _bfd_elf_relocs_compatible (input, output));
5084 }
5085
5086 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5087 with GNU properties if found. Otherwise, return NULL. */
5088
5089 static bfd *
5090 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5091 {
5092 struct elf_x86_init_table init_table;
5093
5094 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5095 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5096 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5097 != (int) R_X86_64_GNU_VTINHERIT)
5098 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5099 != (int) R_X86_64_GNU_VTENTRY))
5100 abort ();
5101
5102 /* This is unused for x86-64. */
5103 init_table.plt0_pad_byte = 0x90;
5104
5105 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5106 {
5107 const struct elf_backend_data *bed
5108 = get_elf_backend_data (info->output_bfd);
5109 struct elf_x86_link_hash_table *htab
5110 = elf_x86_hash_table (info, bed->target_id);
5111 if (!htab)
5112 abort ();
5113 if (htab->params->bndplt)
5114 {
5115 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5116 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5117 }
5118 else
5119 {
5120 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5121 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5122 }
5123
5124 if (ABI_64_P (info->output_bfd))
5125 {
5126 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5127 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5128 }
5129 else
5130 {
5131 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5132 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5133 }
5134 }
5135 else
5136 {
5137 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5138 init_table.non_lazy_plt = NULL;
5139 init_table.lazy_ibt_plt = NULL;
5140 init_table.non_lazy_ibt_plt = NULL;
5141 }
5142
5143 if (ABI_64_P (info->output_bfd))
5144 {
5145 init_table.r_info = elf64_r_info;
5146 init_table.r_sym = elf64_r_sym;
5147 }
5148 else
5149 {
5150 init_table.r_info = elf32_r_info;
5151 init_table.r_sym = elf32_r_sym;
5152 }
5153
5154 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5155 }
5156
5157 static const struct bfd_elf_special_section
5158 elf_x86_64_special_sections[]=
5159 {
5160 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5161 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5162 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5163 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5164 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5165 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5166 { NULL, 0, 0, 0, 0 }
5167 };
5168
5169 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5170 #define TARGET_LITTLE_NAME "elf64-x86-64"
5171 #define ELF_ARCH bfd_arch_i386
5172 #define ELF_TARGET_ID X86_64_ELF_DATA
5173 #define ELF_MACHINE_CODE EM_X86_64
5174 #if DEFAULT_LD_Z_SEPARATE_CODE
5175 # define ELF_MAXPAGESIZE 0x1000
5176 #else
5177 # define ELF_MAXPAGESIZE 0x200000
5178 #endif
5179 #define ELF_MINPAGESIZE 0x1000
5180 #define ELF_COMMONPAGESIZE 0x1000
5181
5182 #define elf_backend_can_gc_sections 1
5183 #define elf_backend_can_refcount 1
5184 #define elf_backend_want_got_plt 1
5185 #define elf_backend_plt_readonly 1
5186 #define elf_backend_want_plt_sym 0
5187 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5188 #define elf_backend_rela_normal 1
5189 #define elf_backend_plt_alignment 4
5190 #define elf_backend_extern_protected_data 1
5191 #define elf_backend_caches_rawsize 1
5192 #define elf_backend_dtrel_excludes_plt 1
5193 #define elf_backend_want_dynrelro 1
5194
5195 #define elf_info_to_howto elf_x86_64_info_to_howto
5196
5197 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5198 #define bfd_elf64_bfd_reloc_name_lookup \
5199 elf_x86_64_reloc_name_lookup
5200
5201 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5202 #define elf_backend_check_relocs elf_x86_64_check_relocs
5203 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5204 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5205 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5206 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5207 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5208 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5209 #ifdef CORE_HEADER
5210 #define elf_backend_write_core_note elf_x86_64_write_core_note
5211 #endif
5212 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5213 #define elf_backend_relocate_section elf_x86_64_relocate_section
5214 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5215 #define elf_backend_object_p elf64_x86_64_elf_object_p
5216 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5217
5218 #define elf_backend_section_from_shdr \
5219 elf_x86_64_section_from_shdr
5220
5221 #define elf_backend_section_from_bfd_section \
5222 elf_x86_64_elf_section_from_bfd_section
5223 #define elf_backend_add_symbol_hook \
5224 elf_x86_64_add_symbol_hook
5225 #define elf_backend_symbol_processing \
5226 elf_x86_64_symbol_processing
5227 #define elf_backend_common_section_index \
5228 elf_x86_64_common_section_index
5229 #define elf_backend_common_section \
5230 elf_x86_64_common_section
5231 #define elf_backend_common_definition \
5232 elf_x86_64_common_definition
5233 #define elf_backend_merge_symbol \
5234 elf_x86_64_merge_symbol
5235 #define elf_backend_special_sections \
5236 elf_x86_64_special_sections
5237 #define elf_backend_additional_program_headers \
5238 elf_x86_64_additional_program_headers
5239 #define elf_backend_setup_gnu_properties \
5240 elf_x86_64_link_setup_gnu_properties
5241 #define elf_backend_hide_symbol \
5242 _bfd_x86_elf_hide_symbol
5243
5244 #undef elf64_bed
5245 #define elf64_bed elf64_x86_64_bed
5246
5247 #include "elf64-target.h"
5248
5249 /* CloudABI support. */
5250
5251 #undef TARGET_LITTLE_SYM
5252 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5253 #undef TARGET_LITTLE_NAME
5254 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5255
5256 #undef ELF_OSABI
5257 #define ELF_OSABI ELFOSABI_CLOUDABI
5258
5259 #undef elf64_bed
5260 #define elf64_bed elf64_x86_64_cloudabi_bed
5261
5262 #include "elf64-target.h"
5263
5264 /* FreeBSD support. */
5265
5266 #undef TARGET_LITTLE_SYM
5267 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5268 #undef TARGET_LITTLE_NAME
5269 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5270
5271 #undef ELF_OSABI
5272 #define ELF_OSABI ELFOSABI_FREEBSD
5273
5274 #undef elf64_bed
5275 #define elf64_bed elf64_x86_64_fbsd_bed
5276
5277 #include "elf64-target.h"
5278
5279 /* Solaris 2 support. */
5280
5281 #undef TARGET_LITTLE_SYM
5282 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5283 #undef TARGET_LITTLE_NAME
5284 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5285
5286 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5287 {
5288 is_solaris /* os */
5289 };
5290
5291 #undef elf_backend_arch_data
5292 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5293
5294 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5295 objects won't be recognized. */
5296 #undef ELF_OSABI
5297
5298 #undef elf64_bed
5299 #define elf64_bed elf64_x86_64_sol2_bed
5300
5301 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5302 boundary. */
5303 #undef elf_backend_static_tls_alignment
5304 #define elf_backend_static_tls_alignment 16
5305
5306 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5307
5308 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5309 File, p.63. */
5310 #undef elf_backend_want_plt_sym
5311 #define elf_backend_want_plt_sym 1
5312
5313 #undef elf_backend_strtab_flags
5314 #define elf_backend_strtab_flags SHF_STRINGS
5315
5316 static bfd_boolean
5317 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5318 bfd *obfd ATTRIBUTE_UNUSED,
5319 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5320 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5321 {
5322 /* PR 19938: FIXME: Need to add code for setting the sh_info
5323 and sh_link fields of Solaris specific section types. */
5324 return FALSE;
5325 }
5326
5327 #undef elf_backend_copy_special_section_fields
5328 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5329
5330 #include "elf64-target.h"
5331
5332 /* Native Client support. */
5333
5334 static bfd_boolean
5335 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5336 {
5337 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5338 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5339 return TRUE;
5340 }
5341
5342 #undef TARGET_LITTLE_SYM
5343 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5344 #undef TARGET_LITTLE_NAME
5345 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5346 #undef elf64_bed
5347 #define elf64_bed elf64_x86_64_nacl_bed
5348
5349 #undef ELF_MAXPAGESIZE
5350 #undef ELF_MINPAGESIZE
5351 #undef ELF_COMMONPAGESIZE
5352 #define ELF_MAXPAGESIZE 0x10000
5353 #define ELF_MINPAGESIZE 0x10000
5354 #define ELF_COMMONPAGESIZE 0x10000
5355
5356 /* Restore defaults. */
5357 #undef ELF_OSABI
5358 #undef elf_backend_static_tls_alignment
5359 #undef elf_backend_want_plt_sym
5360 #define elf_backend_want_plt_sym 0
5361 #undef elf_backend_strtab_flags
5362 #undef elf_backend_copy_special_section_fields
5363
5364 /* NaCl uses substantially different PLT entries for the same effects. */
5365
5366 #undef elf_backend_plt_alignment
5367 #define elf_backend_plt_alignment 5
5368 #define NACL_PLT_ENTRY_SIZE 64
5369 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5370
5371 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5372 {
5373 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5374 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5375 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5376 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5377 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5378
5379 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5380 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5381
5382 /* 32 bytes of nop to pad out to the standard size. */
5383 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5384 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5385 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5386 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5387 0x66, /* excess data16 prefix */
5388 0x90 /* nop */
5389 };
5390
5391 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5392 {
5393 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5394 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5395 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5396 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5397
5398 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5399 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5400 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5401
5402 /* Lazy GOT entries point here (32-byte aligned). */
5403 0x68, /* pushq immediate */
5404 0, 0, 0, 0, /* replaced with index into relocation table. */
5405 0xe9, /* jmp relative */
5406 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5407
5408 /* 22 bytes of nop to pad out to the standard size. */
5409 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5410 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5411 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5412 };
5413
5414 /* .eh_frame covering the .plt section. */
5415
5416 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5417 {
5418 #if (PLT_CIE_LENGTH != 20 \
5419 || PLT_FDE_LENGTH != 36 \
5420 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5421 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5422 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5423 #endif
5424 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5425 0, 0, 0, 0, /* CIE ID */
5426 1, /* CIE version */
5427 'z', 'R', 0, /* Augmentation string */
5428 1, /* Code alignment factor */
5429 0x78, /* Data alignment factor */
5430 16, /* Return address column */
5431 1, /* Augmentation size */
5432 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5433 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5434 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5435 DW_CFA_nop, DW_CFA_nop,
5436
5437 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5438 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5439 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5440 0, 0, 0, 0, /* .plt size goes here */
5441 0, /* Augmentation size */
5442 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5443 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5444 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5445 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5446 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5447 13, /* Block length */
5448 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5449 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5450 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5451 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5452 DW_CFA_nop, DW_CFA_nop
5453 };
5454
5455 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5456 {
5457 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5458 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5459 elf_x86_64_nacl_plt_entry, /* plt_entry */
5460 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5461 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5462 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5463 2, /* plt_tlsdesc_got1_offset */
5464 9, /* plt_tlsdesc_got2_offset */
5465 6, /* plt_tlsdesc_got1_insn_end */
5466 13, /* plt_tlsdesc_got2_insn_end */
5467 2, /* plt0_got1_offset */
5468 9, /* plt0_got2_offset */
5469 13, /* plt0_got2_insn_end */
5470 3, /* plt_got_offset */
5471 33, /* plt_reloc_offset */
5472 38, /* plt_plt_offset */
5473 7, /* plt_got_insn_size */
5474 42, /* plt_plt_insn_end */
5475 32, /* plt_lazy_offset */
5476 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5477 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5478 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5479 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5480 };
5481
5482 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5483 {
5484 is_nacl /* os */
5485 };
5486
5487 #undef elf_backend_arch_data
5488 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5489
5490 #undef elf_backend_object_p
5491 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5492 #undef elf_backend_modify_segment_map
5493 #define elf_backend_modify_segment_map nacl_modify_segment_map
5494 #undef elf_backend_modify_headers
5495 #define elf_backend_modify_headers nacl_modify_headers
5496 #undef elf_backend_final_write_processing
5497 #define elf_backend_final_write_processing nacl_final_write_processing
5498
5499 #include "elf64-target.h"
5500
5501 /* Native Client x32 support. */
5502
5503 static bfd_boolean
5504 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5505 {
5506 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5507 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5508 return TRUE;
5509 }
5510
5511 #undef TARGET_LITTLE_SYM
5512 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5513 #undef TARGET_LITTLE_NAME
5514 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5515 #undef elf32_bed
5516 #define elf32_bed elf32_x86_64_nacl_bed
5517
5518 #define bfd_elf32_bfd_reloc_type_lookup \
5519 elf_x86_64_reloc_type_lookup
5520 #define bfd_elf32_bfd_reloc_name_lookup \
5521 elf_x86_64_reloc_name_lookup
5522 #define bfd_elf32_get_synthetic_symtab \
5523 elf_x86_64_get_synthetic_symtab
5524
5525 #undef elf_backend_object_p
5526 #define elf_backend_object_p \
5527 elf32_x86_64_nacl_elf_object_p
5528
5529 #undef elf_backend_bfd_from_remote_memory
5530 #define elf_backend_bfd_from_remote_memory \
5531 _bfd_elf32_bfd_from_remote_memory
5532
5533 #undef elf_backend_size_info
5534 #define elf_backend_size_info \
5535 _bfd_elf32_size_info
5536
5537 #undef elf32_bed
5538 #define elf32_bed elf32_x86_64_bed
5539
5540 #include "elf32-target.h"
5541
5542 /* Restore defaults. */
5543 #undef elf_backend_object_p
5544 #define elf_backend_object_p elf64_x86_64_elf_object_p
5545 #undef elf_backend_bfd_from_remote_memory
5546 #undef elf_backend_size_info
5547 #undef elf_backend_modify_segment_map
5548 #undef elf_backend_modify_headers
5549 #undef elf_backend_final_write_processing
5550
5551 /* Intel L1OM support. */
5552
5553 static bfd_boolean
5554 elf64_l1om_elf_object_p (bfd *abfd)
5555 {
5556 /* Set the right machine number for an L1OM elf64 file. */
5557 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5558 return TRUE;
5559 }
5560
5561 #undef TARGET_LITTLE_SYM
5562 #define TARGET_LITTLE_SYM l1om_elf64_vec
5563 #undef TARGET_LITTLE_NAME
5564 #define TARGET_LITTLE_NAME "elf64-l1om"
5565 #undef ELF_ARCH
5566 #define ELF_ARCH bfd_arch_l1om
5567
5568 #undef ELF_MACHINE_CODE
5569 #define ELF_MACHINE_CODE EM_L1OM
5570
5571 #undef ELF_OSABI
5572
5573 #undef elf64_bed
5574 #define elf64_bed elf64_l1om_bed
5575
5576 #undef elf_backend_object_p
5577 #define elf_backend_object_p elf64_l1om_elf_object_p
5578
5579 /* Restore defaults. */
5580 #undef ELF_MAXPAGESIZE
5581 #undef ELF_MINPAGESIZE
5582 #undef ELF_COMMONPAGESIZE
5583 #if DEFAULT_LD_Z_SEPARATE_CODE
5584 # define ELF_MAXPAGESIZE 0x1000
5585 #else
5586 # define ELF_MAXPAGESIZE 0x200000
5587 #endif
5588 #define ELF_MINPAGESIZE 0x1000
5589 #define ELF_COMMONPAGESIZE 0x1000
5590 #undef elf_backend_plt_alignment
5591 #define elf_backend_plt_alignment 4
5592 #undef elf_backend_arch_data
5593 #define elf_backend_arch_data &elf_x86_64_arch_bed
5594
5595 #include "elf64-target.h"
5596
5597 /* FreeBSD L1OM support. */
5598
5599 #undef TARGET_LITTLE_SYM
5600 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5601 #undef TARGET_LITTLE_NAME
5602 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5603
5604 #undef ELF_OSABI
5605 #define ELF_OSABI ELFOSABI_FREEBSD
5606
5607 #undef elf64_bed
5608 #define elf64_bed elf64_l1om_fbsd_bed
5609
5610 #include "elf64-target.h"
5611
5612 /* Intel K1OM support. */
5613
5614 static bfd_boolean
5615 elf64_k1om_elf_object_p (bfd *abfd)
5616 {
5617 /* Set the right machine number for an K1OM elf64 file. */
5618 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5619 return TRUE;
5620 }
5621
5622 #undef TARGET_LITTLE_SYM
5623 #define TARGET_LITTLE_SYM k1om_elf64_vec
5624 #undef TARGET_LITTLE_NAME
5625 #define TARGET_LITTLE_NAME "elf64-k1om"
5626 #undef ELF_ARCH
5627 #define ELF_ARCH bfd_arch_k1om
5628
5629 #undef ELF_MACHINE_CODE
5630 #define ELF_MACHINE_CODE EM_K1OM
5631
5632 #undef ELF_OSABI
5633
5634 #undef elf64_bed
5635 #define elf64_bed elf64_k1om_bed
5636
5637 #undef elf_backend_object_p
5638 #define elf_backend_object_p elf64_k1om_elf_object_p
5639
5640 #undef elf_backend_static_tls_alignment
5641
5642 #undef elf_backend_want_plt_sym
5643 #define elf_backend_want_plt_sym 0
5644
5645 #include "elf64-target.h"
5646
5647 /* FreeBSD K1OM support. */
5648
5649 #undef TARGET_LITTLE_SYM
5650 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5651 #undef TARGET_LITTLE_NAME
5652 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5653
5654 #undef ELF_OSABI
5655 #define ELF_OSABI ELFOSABI_FREEBSD
5656
5657 #undef elf64_bed
5658 #define elf64_bed elf64_k1om_fbsd_bed
5659
5660 #include "elf64-target.h"
5661
5662 /* 32bit x86-64 support. */
5663
5664 #undef TARGET_LITTLE_SYM
5665 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5666 #undef TARGET_LITTLE_NAME
5667 #define TARGET_LITTLE_NAME "elf32-x86-64"
5668 #undef elf32_bed
5669
5670 #undef ELF_ARCH
5671 #define ELF_ARCH bfd_arch_i386
5672
5673 #undef ELF_MACHINE_CODE
5674 #define ELF_MACHINE_CODE EM_X86_64
5675
5676 #undef ELF_OSABI
5677
5678 #undef elf_backend_object_p
5679 #define elf_backend_object_p \
5680 elf32_x86_64_elf_object_p
5681
5682 #undef elf_backend_bfd_from_remote_memory
5683 #define elf_backend_bfd_from_remote_memory \
5684 _bfd_elf32_bfd_from_remote_memory
5685
5686 #undef elf_backend_size_info
5687 #define elf_backend_size_info \
5688 _bfd_elf32_size_info
5689
5690 #include "elf32-target.h"
This page took 0.291273 seconds and 5 git commands to generate.