Slightly improve logic of some operations on stap-probe.c
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = NULL;
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = NULL;
1436 }
1437
1438 if (bfd_link_dll (info))
1439 {
1440 object = _("a shared object");
1441 if (!pic)
1442 pic = _("; recompile with -fPIC");
1443 }
1444 else
1445 {
1446 if (bfd_link_pie (info))
1447 object = _("a PIE object");
1448 else
1449 object = _("a PDE object");
1450 if (!pic)
1451 pic = _("; recompile with -fPIE");
1452 }
1453
1454 /* xgettext:c-format */
1455 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1456 "not be used when making %s%s"),
1457 input_bfd, howto->name, und, v, name,
1458 object, pic);
1459 bfd_set_error (bfd_error_bad_value);
1460 sec->check_relocs_failed = 1;
1461 return FALSE;
1462 }
1463
1464 /* With the local symbol, foo, we convert
1465 mov foo@GOTPCREL(%rip), %reg
1466 to
1467 lea foo(%rip), %reg
1468 and convert
1469 call/jmp *foo@GOTPCREL(%rip)
1470 to
1471 nop call foo/jmp foo nop
1472 When PIC is false, convert
1473 test %reg, foo@GOTPCREL(%rip)
1474 to
1475 test $foo, %reg
1476 and convert
1477 binop foo@GOTPCREL(%rip), %reg
1478 to
1479 binop $foo, %reg
1480 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1481 instructions. */
1482
1483 static bfd_boolean
1484 elf_x86_64_convert_load_reloc (bfd *abfd,
1485 bfd_byte *contents,
1486 unsigned int *r_type_p,
1487 Elf_Internal_Rela *irel,
1488 struct elf_link_hash_entry *h,
1489 bfd_boolean *converted,
1490 struct bfd_link_info *link_info)
1491 {
1492 struct elf_x86_link_hash_table *htab;
1493 bfd_boolean is_pic;
1494 bfd_boolean no_overflow;
1495 bfd_boolean relocx;
1496 bfd_boolean to_reloc_pc32;
1497 asection *tsec;
1498 bfd_signed_vma raddend;
1499 unsigned int opcode;
1500 unsigned int modrm;
1501 unsigned int r_type = *r_type_p;
1502 unsigned int r_symndx;
1503 bfd_vma roff = irel->r_offset;
1504
1505 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1506 return TRUE;
1507
1508 raddend = irel->r_addend;
1509 /* Addend for 32-bit PC-relative relocation must be -4. */
1510 if (raddend != -4)
1511 return TRUE;
1512
1513 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1514 is_pic = bfd_link_pic (link_info);
1515
1516 relocx = (r_type == R_X86_64_GOTPCRELX
1517 || r_type == R_X86_64_REX_GOTPCRELX);
1518
1519 /* TRUE if --no-relax is used. */
1520 no_overflow = link_info->disable_target_specific_optimizations > 1;
1521
1522 r_symndx = htab->r_sym (irel->r_info);
1523
1524 opcode = bfd_get_8 (abfd, contents + roff - 2);
1525
1526 /* Convert mov to lea since it has been done for a while. */
1527 if (opcode != 0x8b)
1528 {
1529 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1530 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1531 test, xor instructions. */
1532 if (!relocx)
1533 return TRUE;
1534 }
1535
1536 /* We convert only to R_X86_64_PC32:
1537 1. Branch.
1538 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1539 3. no_overflow is true.
1540 4. PIC.
1541 */
1542 to_reloc_pc32 = (opcode == 0xff
1543 || !relocx
1544 || no_overflow
1545 || is_pic);
1546
1547 /* Get the symbol referred to by the reloc. */
1548 if (h == NULL)
1549 {
1550 Elf_Internal_Sym *isym
1551 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1552
1553 /* Skip relocation against undefined symbols. */
1554 if (isym->st_shndx == SHN_UNDEF)
1555 return TRUE;
1556
1557 if (isym->st_shndx == SHN_ABS)
1558 tsec = bfd_abs_section_ptr;
1559 else if (isym->st_shndx == SHN_COMMON)
1560 tsec = bfd_com_section_ptr;
1561 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1562 tsec = &_bfd_elf_large_com_section;
1563 else
1564 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1565 }
1566 else
1567 {
1568 /* Undefined weak symbol is only bound locally in executable
1569 and its reference is resolved as 0 without relocation
1570 overflow. We can only perform this optimization for
1571 GOTPCRELX relocations since we need to modify REX byte.
1572 It is OK convert mov with R_X86_64_GOTPCREL to
1573 R_X86_64_PC32. */
1574 bfd_boolean local_ref;
1575 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1576
1577 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1578 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1579 if ((relocx || opcode == 0x8b)
1580 && (h->root.type == bfd_link_hash_undefweak
1581 && !eh->linker_def
1582 && local_ref))
1583 {
1584 if (opcode == 0xff)
1585 {
1586 /* Skip for branch instructions since R_X86_64_PC32
1587 may overflow. */
1588 if (no_overflow)
1589 return TRUE;
1590 }
1591 else if (relocx)
1592 {
1593 /* For non-branch instructions, we can convert to
1594 R_X86_64_32/R_X86_64_32S since we know if there
1595 is a REX byte. */
1596 to_reloc_pc32 = FALSE;
1597 }
1598
1599 /* Since we don't know the current PC when PIC is true,
1600 we can't convert to R_X86_64_PC32. */
1601 if (to_reloc_pc32 && is_pic)
1602 return TRUE;
1603
1604 goto convert;
1605 }
1606 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1607 ld.so may use its link-time address. */
1608 else if (h->start_stop
1609 || eh->linker_def
1610 || ((h->def_regular
1611 || h->root.type == bfd_link_hash_defined
1612 || h->root.type == bfd_link_hash_defweak)
1613 && h != htab->elf.hdynamic
1614 && local_ref))
1615 {
1616 /* bfd_link_hash_new or bfd_link_hash_undefined is
1617 set by an assignment in a linker script in
1618 bfd_elf_record_link_assignment. start_stop is set
1619 on __start_SECNAME/__stop_SECNAME which mark section
1620 SECNAME. */
1621 if (h->start_stop
1622 || eh->linker_def
1623 || (h->def_regular
1624 && (h->root.type == bfd_link_hash_new
1625 || h->root.type == bfd_link_hash_undefined
1626 || ((h->root.type == bfd_link_hash_defined
1627 || h->root.type == bfd_link_hash_defweak)
1628 && h->root.u.def.section == bfd_und_section_ptr))))
1629 {
1630 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1631 if (no_overflow)
1632 return TRUE;
1633 goto convert;
1634 }
1635 tsec = h->root.u.def.section;
1636 }
1637 else
1638 return TRUE;
1639 }
1640
1641 /* Don't convert GOTPCREL relocation against large section. */
1642 if (elf_section_data (tsec) != NULL
1643 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1644 return TRUE;
1645
1646 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return TRUE;
1649
1650 convert:
1651 if (opcode == 0xff)
1652 {
1653 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1654 unsigned int nop;
1655 unsigned int disp;
1656 bfd_vma nop_offset;
1657
1658 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1659 R_X86_64_PC32. */
1660 modrm = bfd_get_8 (abfd, contents + roff - 1);
1661 if (modrm == 0x25)
1662 {
1663 /* Convert to "jmp foo nop". */
1664 modrm = 0xe9;
1665 nop = NOP_OPCODE;
1666 nop_offset = irel->r_offset + 3;
1667 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1668 irel->r_offset -= 1;
1669 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1670 }
1671 else
1672 {
1673 struct elf_x86_link_hash_entry *eh
1674 = (struct elf_x86_link_hash_entry *) h;
1675
1676 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1677 is a nop prefix. */
1678 modrm = 0xe8;
1679 /* To support TLS optimization, always use addr32 prefix for
1680 "call *__tls_get_addr@GOTPCREL(%rip)". */
1681 if (eh && eh->tls_get_addr)
1682 {
1683 nop = 0x67;
1684 nop_offset = irel->r_offset - 2;
1685 }
1686 else
1687 {
1688 nop = htab->params->call_nop_byte;
1689 if (htab->params->call_nop_as_suffix)
1690 {
1691 nop_offset = irel->r_offset + 3;
1692 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1693 irel->r_offset -= 1;
1694 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1695 }
1696 else
1697 nop_offset = irel->r_offset - 2;
1698 }
1699 }
1700 bfd_put_8 (abfd, nop, contents + nop_offset);
1701 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1702 r_type = R_X86_64_PC32;
1703 }
1704 else
1705 {
1706 unsigned int rex;
1707 unsigned int rex_mask = REX_R;
1708
1709 if (r_type == R_X86_64_REX_GOTPCRELX)
1710 rex = bfd_get_8 (abfd, contents + roff - 3);
1711 else
1712 rex = 0;
1713
1714 if (opcode == 0x8b)
1715 {
1716 if (to_reloc_pc32)
1717 {
1718 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1719 "lea foo(%rip), %reg". */
1720 opcode = 0x8d;
1721 r_type = R_X86_64_PC32;
1722 }
1723 else
1724 {
1725 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1726 "mov $foo, %reg". */
1727 opcode = 0xc7;
1728 modrm = bfd_get_8 (abfd, contents + roff - 1);
1729 modrm = 0xc0 | (modrm & 0x38) >> 3;
1730 if ((rex & REX_W) != 0
1731 && ABI_64_P (link_info->output_bfd))
1732 {
1733 /* Keep the REX_W bit in REX byte for LP64. */
1734 r_type = R_X86_64_32S;
1735 goto rewrite_modrm_rex;
1736 }
1737 else
1738 {
1739 /* If the REX_W bit in REX byte isn't needed,
1740 use R_X86_64_32 and clear the W bit to avoid
1741 sign-extend imm32 to imm64. */
1742 r_type = R_X86_64_32;
1743 /* Clear the W bit in REX byte. */
1744 rex_mask |= REX_W;
1745 goto rewrite_modrm_rex;
1746 }
1747 }
1748 }
1749 else
1750 {
1751 /* R_X86_64_PC32 isn't supported. */
1752 if (to_reloc_pc32)
1753 return TRUE;
1754
1755 modrm = bfd_get_8 (abfd, contents + roff - 1);
1756 if (opcode == 0x85)
1757 {
1758 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1759 "test $foo, %reg". */
1760 modrm = 0xc0 | (modrm & 0x38) >> 3;
1761 opcode = 0xf7;
1762 }
1763 else
1764 {
1765 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1766 "binop $foo, %reg". */
1767 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1768 opcode = 0x81;
1769 }
1770
1771 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1772 overflow when sign-extending imm32 to imm64. */
1773 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1774
1775 rewrite_modrm_rex:
1776 bfd_put_8 (abfd, modrm, contents + roff - 1);
1777
1778 if (rex)
1779 {
1780 /* Move the R bit to the B bit in REX byte. */
1781 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1782 bfd_put_8 (abfd, rex, contents + roff - 3);
1783 }
1784
1785 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1786 irel->r_addend = 0;
1787 }
1788
1789 bfd_put_8 (abfd, opcode, contents + roff - 2);
1790 }
1791
1792 *r_type_p = r_type;
1793 irel->r_info = htab->r_info (r_symndx,
1794 r_type | R_X86_64_converted_reloc_bit);
1795
1796 *converted = TRUE;
1797
1798 return TRUE;
1799 }
1800
1801 /* Look through the relocs for a section during the first phase, and
1802 calculate needed space in the global offset table, procedure
1803 linkage table, and dynamic reloc sections. */
1804
1805 static bfd_boolean
1806 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1807 asection *sec,
1808 const Elf_Internal_Rela *relocs)
1809 {
1810 struct elf_x86_link_hash_table *htab;
1811 Elf_Internal_Shdr *symtab_hdr;
1812 struct elf_link_hash_entry **sym_hashes;
1813 const Elf_Internal_Rela *rel;
1814 const Elf_Internal_Rela *rel_end;
1815 asection *sreloc;
1816 bfd_byte *contents;
1817 bfd_boolean converted;
1818
1819 if (bfd_link_relocatable (info))
1820 return TRUE;
1821
1822 /* Don't do anything special with non-loaded, non-alloced sections.
1823 In particular, any relocs in such sections should not affect GOT
1824 and PLT reference counting (ie. we don't allow them to create GOT
1825 or PLT entries), there's no possibility or desire to optimize TLS
1826 relocs, and there's not much point in propagating relocs to shared
1827 libs that the dynamic linker won't relocate. */
1828 if ((sec->flags & SEC_ALLOC) == 0)
1829 return TRUE;
1830
1831 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1832 if (htab == NULL)
1833 {
1834 sec->check_relocs_failed = 1;
1835 return FALSE;
1836 }
1837
1838 BFD_ASSERT (is_x86_elf (abfd, htab));
1839
1840 /* Get the section contents. */
1841 if (elf_section_data (sec)->this_hdr.contents != NULL)
1842 contents = elf_section_data (sec)->this_hdr.contents;
1843 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1844 {
1845 sec->check_relocs_failed = 1;
1846 return FALSE;
1847 }
1848
1849 symtab_hdr = &elf_symtab_hdr (abfd);
1850 sym_hashes = elf_sym_hashes (abfd);
1851
1852 converted = FALSE;
1853
1854 sreloc = NULL;
1855
1856 rel_end = relocs + sec->reloc_count;
1857 for (rel = relocs; rel < rel_end; rel++)
1858 {
1859 unsigned int r_type;
1860 unsigned int r_symndx;
1861 struct elf_link_hash_entry *h;
1862 struct elf_x86_link_hash_entry *eh;
1863 Elf_Internal_Sym *isym;
1864 const char *name;
1865 bfd_boolean size_reloc;
1866 bfd_boolean converted_reloc;
1867 bfd_boolean do_check_pic;
1868
1869 r_symndx = htab->r_sym (rel->r_info);
1870 r_type = ELF32_R_TYPE (rel->r_info);
1871
1872 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1873 {
1874 /* xgettext:c-format */
1875 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1876 abfd, r_symndx);
1877 goto error_return;
1878 }
1879
1880 if (r_symndx < symtab_hdr->sh_info)
1881 {
1882 /* A local symbol. */
1883 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1884 abfd, r_symndx);
1885 if (isym == NULL)
1886 goto error_return;
1887
1888 /* Check relocation against local STT_GNU_IFUNC symbol. */
1889 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1890 {
1891 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1892 TRUE);
1893 if (h == NULL)
1894 goto error_return;
1895
1896 /* Fake a STT_GNU_IFUNC symbol. */
1897 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1898 isym, NULL);
1899 h->type = STT_GNU_IFUNC;
1900 h->def_regular = 1;
1901 h->ref_regular = 1;
1902 h->forced_local = 1;
1903 h->root.type = bfd_link_hash_defined;
1904 }
1905 else
1906 h = NULL;
1907 }
1908 else
1909 {
1910 isym = NULL;
1911 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1912 while (h->root.type == bfd_link_hash_indirect
1913 || h->root.type == bfd_link_hash_warning)
1914 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1915 }
1916
1917 /* Check invalid x32 relocations. */
1918 if (!ABI_64_P (abfd))
1919 switch (r_type)
1920 {
1921 default:
1922 break;
1923
1924 case R_X86_64_DTPOFF64:
1925 case R_X86_64_TPOFF64:
1926 case R_X86_64_PC64:
1927 case R_X86_64_GOTOFF64:
1928 case R_X86_64_GOT64:
1929 case R_X86_64_GOTPCREL64:
1930 case R_X86_64_GOTPC64:
1931 case R_X86_64_GOTPLT64:
1932 case R_X86_64_PLTOFF64:
1933 {
1934 if (h)
1935 name = h->root.root.string;
1936 else
1937 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1938 NULL);
1939 _bfd_error_handler
1940 /* xgettext:c-format */
1941 (_("%pB: relocation %s against symbol `%s' isn't "
1942 "supported in x32 mode"), abfd,
1943 x86_64_elf_howto_table[r_type].name, name);
1944 bfd_set_error (bfd_error_bad_value);
1945 goto error_return;
1946 }
1947 break;
1948 }
1949
1950 if (h != NULL)
1951 {
1952 /* It is referenced by a non-shared object. */
1953 h->ref_regular = 1;
1954
1955 if (h->type == STT_GNU_IFUNC)
1956 elf_tdata (info->output_bfd)->has_gnu_symbols
1957 |= elf_gnu_symbol_ifunc;
1958 }
1959
1960 converted_reloc = FALSE;
1961 if ((r_type == R_X86_64_GOTPCREL
1962 || r_type == R_X86_64_GOTPCRELX
1963 || r_type == R_X86_64_REX_GOTPCRELX)
1964 && (h == NULL || h->type != STT_GNU_IFUNC))
1965 {
1966 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1967 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1968 irel, h, &converted_reloc,
1969 info))
1970 goto error_return;
1971
1972 if (converted_reloc)
1973 converted = TRUE;
1974 }
1975
1976 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1977 symtab_hdr, sym_hashes,
1978 &r_type, GOT_UNKNOWN,
1979 rel, rel_end, h, r_symndx, FALSE))
1980 goto error_return;
1981
1982 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1983 if (h == htab->elf.hgot)
1984 htab->got_referenced = TRUE;
1985
1986 eh = (struct elf_x86_link_hash_entry *) h;
1987 switch (r_type)
1988 {
1989 case R_X86_64_TLSLD:
1990 htab->tls_ld_or_ldm_got.refcount = 1;
1991 goto create_got;
1992
1993 case R_X86_64_TPOFF32:
1994 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1995 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1996 &x86_64_elf_howto_table[r_type]);
1997 if (eh != NULL)
1998 eh->zero_undefweak &= 0x2;
1999 break;
2000
2001 case R_X86_64_GOTTPOFF:
2002 if (!bfd_link_executable (info))
2003 info->flags |= DF_STATIC_TLS;
2004 /* Fall through */
2005
2006 case R_X86_64_GOT32:
2007 case R_X86_64_GOTPCREL:
2008 case R_X86_64_GOTPCRELX:
2009 case R_X86_64_REX_GOTPCRELX:
2010 case R_X86_64_TLSGD:
2011 case R_X86_64_GOT64:
2012 case R_X86_64_GOTPCREL64:
2013 case R_X86_64_GOTPLT64:
2014 case R_X86_64_GOTPC32_TLSDESC:
2015 case R_X86_64_TLSDESC_CALL:
2016 /* This symbol requires a global offset table entry. */
2017 {
2018 int tls_type, old_tls_type;
2019
2020 switch (r_type)
2021 {
2022 default: tls_type = GOT_NORMAL; break;
2023 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2024 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2025 case R_X86_64_GOTPC32_TLSDESC:
2026 case R_X86_64_TLSDESC_CALL:
2027 tls_type = GOT_TLS_GDESC; break;
2028 }
2029
2030 if (h != NULL)
2031 {
2032 h->got.refcount = 1;
2033 old_tls_type = eh->tls_type;
2034 }
2035 else
2036 {
2037 bfd_signed_vma *local_got_refcounts;
2038
2039 /* This is a global offset table entry for a local symbol. */
2040 local_got_refcounts = elf_local_got_refcounts (abfd);
2041 if (local_got_refcounts == NULL)
2042 {
2043 bfd_size_type size;
2044
2045 size = symtab_hdr->sh_info;
2046 size *= sizeof (bfd_signed_vma)
2047 + sizeof (bfd_vma) + sizeof (char);
2048 local_got_refcounts = ((bfd_signed_vma *)
2049 bfd_zalloc (abfd, size));
2050 if (local_got_refcounts == NULL)
2051 goto error_return;
2052 elf_local_got_refcounts (abfd) = local_got_refcounts;
2053 elf_x86_local_tlsdesc_gotent (abfd)
2054 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2055 elf_x86_local_got_tls_type (abfd)
2056 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2057 }
2058 local_got_refcounts[r_symndx] = 1;
2059 old_tls_type
2060 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2061 }
2062
2063 /* If a TLS symbol is accessed using IE at least once,
2064 there is no point to use dynamic model for it. */
2065 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2066 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2067 || tls_type != GOT_TLS_IE))
2068 {
2069 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2070 tls_type = old_tls_type;
2071 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2072 && GOT_TLS_GD_ANY_P (tls_type))
2073 tls_type |= old_tls_type;
2074 else
2075 {
2076 if (h)
2077 name = h->root.root.string;
2078 else
2079 name = bfd_elf_sym_name (abfd, symtab_hdr,
2080 isym, NULL);
2081 _bfd_error_handler
2082 /* xgettext:c-format */
2083 (_("%pB: '%s' accessed both as normal and"
2084 " thread local symbol"),
2085 abfd, name);
2086 bfd_set_error (bfd_error_bad_value);
2087 goto error_return;
2088 }
2089 }
2090
2091 if (old_tls_type != tls_type)
2092 {
2093 if (eh != NULL)
2094 eh->tls_type = tls_type;
2095 else
2096 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2097 }
2098 }
2099 /* Fall through */
2100
2101 case R_X86_64_GOTOFF64:
2102 case R_X86_64_GOTPC32:
2103 case R_X86_64_GOTPC64:
2104 create_got:
2105 if (eh != NULL)
2106 eh->zero_undefweak &= 0x2;
2107 break;
2108
2109 case R_X86_64_PLT32:
2110 case R_X86_64_PLT32_BND:
2111 /* This symbol requires a procedure linkage table entry. We
2112 actually build the entry in adjust_dynamic_symbol,
2113 because this might be a case of linking PIC code which is
2114 never referenced by a dynamic object, in which case we
2115 don't need to generate a procedure linkage table entry
2116 after all. */
2117
2118 /* If this is a local symbol, we resolve it directly without
2119 creating a procedure linkage table entry. */
2120 if (h == NULL)
2121 continue;
2122
2123 eh->zero_undefweak &= 0x2;
2124 h->needs_plt = 1;
2125 h->plt.refcount = 1;
2126 break;
2127
2128 case R_X86_64_PLTOFF64:
2129 /* This tries to form the 'address' of a function relative
2130 to GOT. For global symbols we need a PLT entry. */
2131 if (h != NULL)
2132 {
2133 h->needs_plt = 1;
2134 h->plt.refcount = 1;
2135 }
2136 goto create_got;
2137
2138 case R_X86_64_SIZE32:
2139 case R_X86_64_SIZE64:
2140 size_reloc = TRUE;
2141 goto do_size;
2142
2143 case R_X86_64_PC8:
2144 case R_X86_64_PC16:
2145 case R_X86_64_PC32:
2146 case R_X86_64_PC32_BND:
2147 do_check_pic = TRUE;
2148 goto check_pic;
2149
2150 case R_X86_64_32:
2151 if (!ABI_64_P (abfd))
2152 goto pointer;
2153 /* Fall through. */
2154 case R_X86_64_8:
2155 case R_X86_64_16:
2156 case R_X86_64_32S:
2157 /* Check relocation overflow as these relocs may lead to
2158 run-time relocation overflow. Don't error out for
2159 sections we don't care about, such as debug sections or
2160 when relocation overflow check is disabled. */
2161 if (!htab->params->no_reloc_overflow_check
2162 && !converted_reloc
2163 && (bfd_link_pic (info)
2164 || (bfd_link_executable (info)
2165 && h != NULL
2166 && !h->def_regular
2167 && h->def_dynamic
2168 && (sec->flags & SEC_READONLY) == 0)))
2169 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2170 &x86_64_elf_howto_table[r_type]);
2171 /* Fall through. */
2172
2173 case R_X86_64_PC64:
2174 case R_X86_64_64:
2175 pointer:
2176 do_check_pic = FALSE;
2177 check_pic:
2178 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2179 eh->zero_undefweak |= 0x2;
2180 /* We are called after all symbols have been resolved. Only
2181 relocation against STT_GNU_IFUNC symbol must go through
2182 PLT. */
2183 if (h != NULL
2184 && (bfd_link_executable (info)
2185 || h->type == STT_GNU_IFUNC))
2186 {
2187 bfd_boolean func_pointer_ref = FALSE;
2188
2189 if (r_type == R_X86_64_PC32)
2190 {
2191 /* Since something like ".long foo - ." may be used
2192 as pointer, make sure that PLT is used if foo is
2193 a function defined in a shared library. */
2194 if ((sec->flags & SEC_CODE) == 0)
2195 {
2196 h->pointer_equality_needed = 1;
2197 if (bfd_link_pie (info)
2198 && h->type == STT_FUNC
2199 && !h->def_regular
2200 && h->def_dynamic)
2201 {
2202 h->needs_plt = 1;
2203 h->plt.refcount = 1;
2204 }
2205 }
2206 }
2207 else if (r_type != R_X86_64_PC32_BND
2208 && r_type != R_X86_64_PC64)
2209 {
2210 h->pointer_equality_needed = 1;
2211 /* At run-time, R_X86_64_64 can be resolved for both
2212 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2213 can only be resolved for x32. */
2214 if ((sec->flags & SEC_READONLY) == 0
2215 && (r_type == R_X86_64_64
2216 || (!ABI_64_P (abfd)
2217 && (r_type == R_X86_64_32
2218 || r_type == R_X86_64_32S))))
2219 func_pointer_ref = TRUE;
2220 }
2221
2222 if (!func_pointer_ref)
2223 {
2224 /* If this reloc is in a read-only section, we might
2225 need a copy reloc. We can't check reliably at this
2226 stage whether the section is read-only, as input
2227 sections have not yet been mapped to output sections.
2228 Tentatively set the flag for now, and correct in
2229 adjust_dynamic_symbol. */
2230 h->non_got_ref = 1;
2231
2232 /* We may need a .plt entry if the symbol is a function
2233 defined in a shared lib or is a function referenced
2234 from the code or read-only section. */
2235 if (!h->def_regular
2236 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2237 h->plt.refcount = 1;
2238 }
2239 }
2240
2241 if (do_check_pic)
2242 {
2243 /* Don't complain about -fPIC if the symbol is undefined
2244 when building executable unless it is unresolved weak
2245 symbol, references a dynamic definition in PIE or
2246 -z nocopyreloc is used. */
2247 bfd_boolean no_copyreloc_p
2248 = (info->nocopyreloc
2249 || (h != NULL
2250 && !h->root.linker_def
2251 && !h->root.ldscript_def
2252 && eh->def_protected
2253 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
2254 if ((sec->flags & SEC_ALLOC) != 0
2255 && (sec->flags & SEC_READONLY) != 0
2256 && h != NULL
2257 && ((bfd_link_executable (info)
2258 && ((h->root.type == bfd_link_hash_undefweak
2259 && (eh == NULL
2260 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2261 eh)))
2262 || (bfd_link_pie (info)
2263 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2264 && h->def_dynamic)
2265 || (no_copyreloc_p
2266 && h->def_dynamic
2267 && !(h->root.u.def.section->flags & SEC_CODE))))
2268 || bfd_link_dll (info)))
2269 {
2270 bfd_boolean fail = FALSE;
2271 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
2272 {
2273 /* Symbol is referenced locally. Make sure it is
2274 defined locally. */
2275 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
2276 }
2277 else if (bfd_link_pie (info))
2278 {
2279 /* We can only use PC-relative relocations in PIE
2280 from non-code sections. */
2281 if (h->type == STT_FUNC
2282 && (sec->flags & SEC_CODE) != 0)
2283 fail = TRUE;
2284 }
2285 else if (no_copyreloc_p || bfd_link_dll (info))
2286 {
2287 /* Symbol doesn't need copy reloc and isn't
2288 referenced locally. Don't allow PC-relative
2289 relocations against default and protected
2290 symbols since address of protected function
2291 and location of protected data may not be in
2292 the shared object. */
2293 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2294 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
2295 }
2296
2297 if (fail)
2298 return elf_x86_64_need_pic (info, abfd, sec, h,
2299 symtab_hdr, isym,
2300 &x86_64_elf_howto_table[r_type]);
2301 }
2302 }
2303
2304 size_reloc = FALSE;
2305 do_size:
2306 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2307 htab->pointer_r_type))
2308 {
2309 struct elf_dyn_relocs *p;
2310 struct elf_dyn_relocs **head;
2311
2312 /* We must copy these reloc types into the output file.
2313 Create a reloc section in dynobj and make room for
2314 this reloc. */
2315 if (sreloc == NULL)
2316 {
2317 sreloc = _bfd_elf_make_dynamic_reloc_section
2318 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2319 abfd, /*rela?*/ TRUE);
2320
2321 if (sreloc == NULL)
2322 goto error_return;
2323 }
2324
2325 /* If this is a global symbol, we count the number of
2326 relocations we need for this symbol. */
2327 if (h != NULL)
2328 head = &eh->dyn_relocs;
2329 else
2330 {
2331 /* Track dynamic relocs needed for local syms too.
2332 We really need local syms available to do this
2333 easily. Oh well. */
2334 asection *s;
2335 void **vpp;
2336
2337 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2338 abfd, r_symndx);
2339 if (isym == NULL)
2340 goto error_return;
2341
2342 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2343 if (s == NULL)
2344 s = sec;
2345
2346 /* Beware of type punned pointers vs strict aliasing
2347 rules. */
2348 vpp = &(elf_section_data (s)->local_dynrel);
2349 head = (struct elf_dyn_relocs **)vpp;
2350 }
2351
2352 p = *head;
2353 if (p == NULL || p->sec != sec)
2354 {
2355 bfd_size_type amt = sizeof *p;
2356
2357 p = ((struct elf_dyn_relocs *)
2358 bfd_alloc (htab->elf.dynobj, amt));
2359 if (p == NULL)
2360 goto error_return;
2361 p->next = *head;
2362 *head = p;
2363 p->sec = sec;
2364 p->count = 0;
2365 p->pc_count = 0;
2366 }
2367
2368 p->count += 1;
2369 /* Count size relocation as PC-relative relocation. */
2370 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2371 p->pc_count += 1;
2372 }
2373 break;
2374
2375 /* This relocation describes the C++ object vtable hierarchy.
2376 Reconstruct it for later use during GC. */
2377 case R_X86_64_GNU_VTINHERIT:
2378 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2379 goto error_return;
2380 break;
2381
2382 /* This relocation describes which C++ vtable entries are actually
2383 used. Record for later use during GC. */
2384 case R_X86_64_GNU_VTENTRY:
2385 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2386 goto error_return;
2387 break;
2388
2389 default:
2390 break;
2391 }
2392 }
2393
2394 if (elf_section_data (sec)->this_hdr.contents != contents)
2395 {
2396 if (!converted && !info->keep_memory)
2397 free (contents);
2398 else
2399 {
2400 /* Cache the section contents for elf_link_input_bfd if any
2401 load is converted or --no-keep-memory isn't used. */
2402 elf_section_data (sec)->this_hdr.contents = contents;
2403 }
2404 }
2405
2406 /* Cache relocations if any load is converted. */
2407 if (elf_section_data (sec)->relocs != relocs && converted)
2408 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2409
2410 return TRUE;
2411
2412 error_return:
2413 if (elf_section_data (sec)->this_hdr.contents != contents)
2414 free (contents);
2415 sec->check_relocs_failed = 1;
2416 return FALSE;
2417 }
2418
2419 /* Return the relocation value for @tpoff relocation
2420 if STT_TLS virtual address is ADDRESS. */
2421
2422 static bfd_vma
2423 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2424 {
2425 struct elf_link_hash_table *htab = elf_hash_table (info);
2426 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2427 bfd_vma static_tls_size;
2428
2429 /* If tls_segment is NULL, we should have signalled an error already. */
2430 if (htab->tls_sec == NULL)
2431 return 0;
2432
2433 /* Consider special static TLS alignment requirements. */
2434 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2435 return address - static_tls_size - htab->tls_sec->vma;
2436 }
2437
2438 /* Relocate an x86_64 ELF section. */
2439
2440 static bfd_boolean
2441 elf_x86_64_relocate_section (bfd *output_bfd,
2442 struct bfd_link_info *info,
2443 bfd *input_bfd,
2444 asection *input_section,
2445 bfd_byte *contents,
2446 Elf_Internal_Rela *relocs,
2447 Elf_Internal_Sym *local_syms,
2448 asection **local_sections)
2449 {
2450 struct elf_x86_link_hash_table *htab;
2451 Elf_Internal_Shdr *symtab_hdr;
2452 struct elf_link_hash_entry **sym_hashes;
2453 bfd_vma *local_got_offsets;
2454 bfd_vma *local_tlsdesc_gotents;
2455 Elf_Internal_Rela *rel;
2456 Elf_Internal_Rela *wrel;
2457 Elf_Internal_Rela *relend;
2458 unsigned int plt_entry_size;
2459
2460 /* Skip if check_relocs failed. */
2461 if (input_section->check_relocs_failed)
2462 return FALSE;
2463
2464 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2465 if (htab == NULL)
2466 return FALSE;
2467
2468 if (!is_x86_elf (input_bfd, htab))
2469 {
2470 bfd_set_error (bfd_error_wrong_format);
2471 return FALSE;
2472 }
2473
2474 plt_entry_size = htab->plt.plt_entry_size;
2475 symtab_hdr = &elf_symtab_hdr (input_bfd);
2476 sym_hashes = elf_sym_hashes (input_bfd);
2477 local_got_offsets = elf_local_got_offsets (input_bfd);
2478 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2479
2480 _bfd_x86_elf_set_tls_module_base (info);
2481
2482 rel = wrel = relocs;
2483 relend = relocs + input_section->reloc_count;
2484 for (; rel < relend; wrel++, rel++)
2485 {
2486 unsigned int r_type, r_type_tls;
2487 reloc_howto_type *howto;
2488 unsigned long r_symndx;
2489 struct elf_link_hash_entry *h;
2490 struct elf_x86_link_hash_entry *eh;
2491 Elf_Internal_Sym *sym;
2492 asection *sec;
2493 bfd_vma off, offplt, plt_offset;
2494 bfd_vma relocation;
2495 bfd_boolean unresolved_reloc;
2496 bfd_reloc_status_type r;
2497 int tls_type;
2498 asection *base_got, *resolved_plt;
2499 bfd_vma st_size;
2500 bfd_boolean resolved_to_zero;
2501 bfd_boolean relative_reloc;
2502 bfd_boolean converted_reloc;
2503 bfd_boolean need_copy_reloc_in_pie;
2504
2505 r_type = ELF32_R_TYPE (rel->r_info);
2506 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2507 || r_type == (int) R_X86_64_GNU_VTENTRY)
2508 {
2509 if (wrel != rel)
2510 *wrel = *rel;
2511 continue;
2512 }
2513
2514 r_symndx = htab->r_sym (rel->r_info);
2515 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2516 if (converted_reloc)
2517 {
2518 r_type &= ~R_X86_64_converted_reloc_bit;
2519 rel->r_info = htab->r_info (r_symndx, r_type);
2520 }
2521
2522 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2523 if (howto == NULL)
2524 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2525
2526 h = NULL;
2527 sym = NULL;
2528 sec = NULL;
2529 unresolved_reloc = FALSE;
2530 if (r_symndx < symtab_hdr->sh_info)
2531 {
2532 sym = local_syms + r_symndx;
2533 sec = local_sections[r_symndx];
2534
2535 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2536 &sec, rel);
2537 st_size = sym->st_size;
2538
2539 /* Relocate against local STT_GNU_IFUNC symbol. */
2540 if (!bfd_link_relocatable (info)
2541 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2542 {
2543 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2544 rel, FALSE);
2545 if (h == NULL)
2546 abort ();
2547
2548 /* Set STT_GNU_IFUNC symbol value. */
2549 h->root.u.def.value = sym->st_value;
2550 h->root.u.def.section = sec;
2551 }
2552 }
2553 else
2554 {
2555 bfd_boolean warned ATTRIBUTE_UNUSED;
2556 bfd_boolean ignored ATTRIBUTE_UNUSED;
2557
2558 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2559 r_symndx, symtab_hdr, sym_hashes,
2560 h, sec, relocation,
2561 unresolved_reloc, warned, ignored);
2562 st_size = h->size;
2563 }
2564
2565 if (sec != NULL && discarded_section (sec))
2566 {
2567 _bfd_clear_contents (howto, input_bfd, input_section,
2568 contents, rel->r_offset);
2569 wrel->r_offset = rel->r_offset;
2570 wrel->r_info = 0;
2571 wrel->r_addend = 0;
2572
2573 /* For ld -r, remove relocations in debug sections against
2574 sections defined in discarded sections. Not done for
2575 eh_frame editing code expects to be present. */
2576 if (bfd_link_relocatable (info)
2577 && (input_section->flags & SEC_DEBUGGING))
2578 wrel--;
2579
2580 continue;
2581 }
2582
2583 if (bfd_link_relocatable (info))
2584 {
2585 if (wrel != rel)
2586 *wrel = *rel;
2587 continue;
2588 }
2589
2590 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2591 {
2592 if (r_type == R_X86_64_64)
2593 {
2594 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2595 zero-extend it to 64bit if addend is zero. */
2596 r_type = R_X86_64_32;
2597 memset (contents + rel->r_offset + 4, 0, 4);
2598 }
2599 else if (r_type == R_X86_64_SIZE64)
2600 {
2601 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2602 zero-extend it to 64bit if addend is zero. */
2603 r_type = R_X86_64_SIZE32;
2604 memset (contents + rel->r_offset + 4, 0, 4);
2605 }
2606 }
2607
2608 eh = (struct elf_x86_link_hash_entry *) h;
2609
2610 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2611 it here if it is defined in a non-shared object. */
2612 if (h != NULL
2613 && h->type == STT_GNU_IFUNC
2614 && h->def_regular)
2615 {
2616 bfd_vma plt_index;
2617 const char *name;
2618
2619 if ((input_section->flags & SEC_ALLOC) == 0)
2620 {
2621 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2622 STT_GNU_IFUNC symbol as STT_FUNC. */
2623 if (elf_section_type (input_section) == SHT_NOTE)
2624 goto skip_ifunc;
2625 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2626 sections because such sections are not SEC_ALLOC and
2627 thus ld.so will not process them. */
2628 if ((input_section->flags & SEC_DEBUGGING) != 0)
2629 continue;
2630 abort ();
2631 }
2632
2633 switch (r_type)
2634 {
2635 default:
2636 break;
2637
2638 case R_X86_64_GOTPCREL:
2639 case R_X86_64_GOTPCRELX:
2640 case R_X86_64_REX_GOTPCRELX:
2641 case R_X86_64_GOTPCREL64:
2642 base_got = htab->elf.sgot;
2643 off = h->got.offset;
2644
2645 if (base_got == NULL)
2646 abort ();
2647
2648 if (off == (bfd_vma) -1)
2649 {
2650 /* We can't use h->got.offset here to save state, or
2651 even just remember the offset, as finish_dynamic_symbol
2652 would use that as offset into .got. */
2653
2654 if (h->plt.offset == (bfd_vma) -1)
2655 abort ();
2656
2657 if (htab->elf.splt != NULL)
2658 {
2659 plt_index = (h->plt.offset / plt_entry_size
2660 - htab->plt.has_plt0);
2661 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2662 base_got = htab->elf.sgotplt;
2663 }
2664 else
2665 {
2666 plt_index = h->plt.offset / plt_entry_size;
2667 off = plt_index * GOT_ENTRY_SIZE;
2668 base_got = htab->elf.igotplt;
2669 }
2670
2671 if (h->dynindx == -1
2672 || h->forced_local
2673 || info->symbolic)
2674 {
2675 /* This references the local defitionion. We must
2676 initialize this entry in the global offset table.
2677 Since the offset must always be a multiple of 8,
2678 we use the least significant bit to record
2679 whether we have initialized it already.
2680
2681 When doing a dynamic link, we create a .rela.got
2682 relocation entry to initialize the value. This
2683 is done in the finish_dynamic_symbol routine. */
2684 if ((off & 1) != 0)
2685 off &= ~1;
2686 else
2687 {
2688 bfd_put_64 (output_bfd, relocation,
2689 base_got->contents + off);
2690 /* Note that this is harmless for the GOTPLT64
2691 case, as -1 | 1 still is -1. */
2692 h->got.offset |= 1;
2693 }
2694 }
2695 }
2696
2697 relocation = (base_got->output_section->vma
2698 + base_got->output_offset + off);
2699
2700 goto do_relocation;
2701 }
2702
2703 if (h->plt.offset == (bfd_vma) -1)
2704 {
2705 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2706 if (r_type == htab->pointer_r_type
2707 && (input_section->flags & SEC_CODE) == 0)
2708 goto do_ifunc_pointer;
2709 goto bad_ifunc_reloc;
2710 }
2711
2712 /* STT_GNU_IFUNC symbol must go through PLT. */
2713 if (htab->elf.splt != NULL)
2714 {
2715 if (htab->plt_second != NULL)
2716 {
2717 resolved_plt = htab->plt_second;
2718 plt_offset = eh->plt_second.offset;
2719 }
2720 else
2721 {
2722 resolved_plt = htab->elf.splt;
2723 plt_offset = h->plt.offset;
2724 }
2725 }
2726 else
2727 {
2728 resolved_plt = htab->elf.iplt;
2729 plt_offset = h->plt.offset;
2730 }
2731
2732 relocation = (resolved_plt->output_section->vma
2733 + resolved_plt->output_offset + plt_offset);
2734
2735 switch (r_type)
2736 {
2737 default:
2738 bad_ifunc_reloc:
2739 if (h->root.root.string)
2740 name = h->root.root.string;
2741 else
2742 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2743 NULL);
2744 _bfd_error_handler
2745 /* xgettext:c-format */
2746 (_("%pB: relocation %s against STT_GNU_IFUNC "
2747 "symbol `%s' isn't supported"), input_bfd,
2748 howto->name, name);
2749 bfd_set_error (bfd_error_bad_value);
2750 return FALSE;
2751
2752 case R_X86_64_32S:
2753 if (bfd_link_pic (info))
2754 abort ();
2755 goto do_relocation;
2756
2757 case R_X86_64_32:
2758 if (ABI_64_P (output_bfd))
2759 goto do_relocation;
2760 /* FALLTHROUGH */
2761 case R_X86_64_64:
2762 do_ifunc_pointer:
2763 if (rel->r_addend != 0)
2764 {
2765 if (h->root.root.string)
2766 name = h->root.root.string;
2767 else
2768 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2769 sym, NULL);
2770 _bfd_error_handler
2771 /* xgettext:c-format */
2772 (_("%pB: relocation %s against STT_GNU_IFUNC "
2773 "symbol `%s' has non-zero addend: %" PRId64),
2774 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2775 bfd_set_error (bfd_error_bad_value);
2776 return FALSE;
2777 }
2778
2779 /* Generate dynamic relcoation only when there is a
2780 non-GOT reference in a shared object or there is no
2781 PLT. */
2782 if ((bfd_link_pic (info) && h->non_got_ref)
2783 || h->plt.offset == (bfd_vma) -1)
2784 {
2785 Elf_Internal_Rela outrel;
2786 asection *sreloc;
2787
2788 /* Need a dynamic relocation to get the real function
2789 address. */
2790 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2791 info,
2792 input_section,
2793 rel->r_offset);
2794 if (outrel.r_offset == (bfd_vma) -1
2795 || outrel.r_offset == (bfd_vma) -2)
2796 abort ();
2797
2798 outrel.r_offset += (input_section->output_section->vma
2799 + input_section->output_offset);
2800
2801 if (POINTER_LOCAL_IFUNC_P (info, h))
2802 {
2803 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2804 h->root.root.string,
2805 h->root.u.def.section->owner);
2806
2807 /* This symbol is resolved locally. */
2808 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2809 outrel.r_addend = (h->root.u.def.value
2810 + h->root.u.def.section->output_section->vma
2811 + h->root.u.def.section->output_offset);
2812 }
2813 else
2814 {
2815 outrel.r_info = htab->r_info (h->dynindx, r_type);
2816 outrel.r_addend = 0;
2817 }
2818
2819 /* Dynamic relocations are stored in
2820 1. .rela.ifunc section in PIC object.
2821 2. .rela.got section in dynamic executable.
2822 3. .rela.iplt section in static executable. */
2823 if (bfd_link_pic (info))
2824 sreloc = htab->elf.irelifunc;
2825 else if (htab->elf.splt != NULL)
2826 sreloc = htab->elf.srelgot;
2827 else
2828 sreloc = htab->elf.irelplt;
2829 elf_append_rela (output_bfd, sreloc, &outrel);
2830
2831 /* If this reloc is against an external symbol, we
2832 do not want to fiddle with the addend. Otherwise,
2833 we need to include the symbol value so that it
2834 becomes an addend for the dynamic reloc. For an
2835 internal symbol, we have updated addend. */
2836 continue;
2837 }
2838 /* FALLTHROUGH */
2839 case R_X86_64_PC32:
2840 case R_X86_64_PC32_BND:
2841 case R_X86_64_PC64:
2842 case R_X86_64_PLT32:
2843 case R_X86_64_PLT32_BND:
2844 goto do_relocation;
2845 }
2846 }
2847
2848 skip_ifunc:
2849 resolved_to_zero = (eh != NULL
2850 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2851
2852 /* When generating a shared object, the relocations handled here are
2853 copied into the output file to be resolved at run time. */
2854 switch (r_type)
2855 {
2856 case R_X86_64_GOT32:
2857 case R_X86_64_GOT64:
2858 /* Relocation is to the entry for this symbol in the global
2859 offset table. */
2860 case R_X86_64_GOTPCREL:
2861 case R_X86_64_GOTPCRELX:
2862 case R_X86_64_REX_GOTPCRELX:
2863 case R_X86_64_GOTPCREL64:
2864 /* Use global offset table entry as symbol value. */
2865 case R_X86_64_GOTPLT64:
2866 /* This is obsolete and treated the same as GOT64. */
2867 base_got = htab->elf.sgot;
2868
2869 if (htab->elf.sgot == NULL)
2870 abort ();
2871
2872 relative_reloc = FALSE;
2873 if (h != NULL)
2874 {
2875 off = h->got.offset;
2876 if (h->needs_plt
2877 && h->plt.offset != (bfd_vma)-1
2878 && off == (bfd_vma)-1)
2879 {
2880 /* We can't use h->got.offset here to save
2881 state, or even just remember the offset, as
2882 finish_dynamic_symbol would use that as offset into
2883 .got. */
2884 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2885 - htab->plt.has_plt0);
2886 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2887 base_got = htab->elf.sgotplt;
2888 }
2889
2890 if (RESOLVED_LOCALLY_P (info, h, htab))
2891 {
2892 /* We must initialize this entry in the global offset
2893 table. Since the offset must always be a multiple
2894 of 8, we use the least significant bit to record
2895 whether we have initialized it already.
2896
2897 When doing a dynamic link, we create a .rela.got
2898 relocation entry to initialize the value. This is
2899 done in the finish_dynamic_symbol routine. */
2900 if ((off & 1) != 0)
2901 off &= ~1;
2902 else
2903 {
2904 bfd_put_64 (output_bfd, relocation,
2905 base_got->contents + off);
2906 /* Note that this is harmless for the GOTPLT64 case,
2907 as -1 | 1 still is -1. */
2908 h->got.offset |= 1;
2909
2910 if (GENERATE_RELATIVE_RELOC_P (info, h))
2911 {
2912 /* If this symbol isn't dynamic in PIC,
2913 generate R_X86_64_RELATIVE here. */
2914 eh->no_finish_dynamic_symbol = 1;
2915 relative_reloc = TRUE;
2916 }
2917 }
2918 }
2919 else
2920 unresolved_reloc = FALSE;
2921 }
2922 else
2923 {
2924 if (local_got_offsets == NULL)
2925 abort ();
2926
2927 off = local_got_offsets[r_symndx];
2928
2929 /* The offset must always be a multiple of 8. We use
2930 the least significant bit to record whether we have
2931 already generated the necessary reloc. */
2932 if ((off & 1) != 0)
2933 off &= ~1;
2934 else
2935 {
2936 bfd_put_64 (output_bfd, relocation,
2937 base_got->contents + off);
2938 local_got_offsets[r_symndx] |= 1;
2939
2940 if (bfd_link_pic (info))
2941 relative_reloc = TRUE;
2942 }
2943 }
2944
2945 if (relative_reloc)
2946 {
2947 asection *s;
2948 Elf_Internal_Rela outrel;
2949
2950 /* We need to generate a R_X86_64_RELATIVE reloc
2951 for the dynamic linker. */
2952 s = htab->elf.srelgot;
2953 if (s == NULL)
2954 abort ();
2955
2956 outrel.r_offset = (base_got->output_section->vma
2957 + base_got->output_offset
2958 + off);
2959 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2960 outrel.r_addend = relocation;
2961 elf_append_rela (output_bfd, s, &outrel);
2962 }
2963
2964 if (off >= (bfd_vma) -2)
2965 abort ();
2966
2967 relocation = base_got->output_section->vma
2968 + base_got->output_offset + off;
2969 if (r_type != R_X86_64_GOTPCREL
2970 && r_type != R_X86_64_GOTPCRELX
2971 && r_type != R_X86_64_REX_GOTPCRELX
2972 && r_type != R_X86_64_GOTPCREL64)
2973 relocation -= htab->elf.sgotplt->output_section->vma
2974 - htab->elf.sgotplt->output_offset;
2975
2976 break;
2977
2978 case R_X86_64_GOTOFF64:
2979 /* Relocation is relative to the start of the global offset
2980 table. */
2981
2982 /* Check to make sure it isn't a protected function or data
2983 symbol for shared library since it may not be local when
2984 used as function address or with copy relocation. We also
2985 need to make sure that a symbol is referenced locally. */
2986 if (bfd_link_pic (info) && h)
2987 {
2988 if (!h->def_regular)
2989 {
2990 const char *v;
2991
2992 switch (ELF_ST_VISIBILITY (h->other))
2993 {
2994 case STV_HIDDEN:
2995 v = _("hidden symbol");
2996 break;
2997 case STV_INTERNAL:
2998 v = _("internal symbol");
2999 break;
3000 case STV_PROTECTED:
3001 v = _("protected symbol");
3002 break;
3003 default:
3004 v = _("symbol");
3005 break;
3006 }
3007
3008 _bfd_error_handler
3009 /* xgettext:c-format */
3010 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3011 " `%s' can not be used when making a shared object"),
3012 input_bfd, v, h->root.root.string);
3013 bfd_set_error (bfd_error_bad_value);
3014 return FALSE;
3015 }
3016 else if (!bfd_link_executable (info)
3017 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3018 && (h->type == STT_FUNC
3019 || h->type == STT_OBJECT)
3020 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3021 {
3022 _bfd_error_handler
3023 /* xgettext:c-format */
3024 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3025 " `%s' can not be used when making a shared object"),
3026 input_bfd,
3027 h->type == STT_FUNC ? "function" : "data",
3028 h->root.root.string);
3029 bfd_set_error (bfd_error_bad_value);
3030 return FALSE;
3031 }
3032 }
3033
3034 /* Note that sgot is not involved in this
3035 calculation. We always want the start of .got.plt. If we
3036 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3037 permitted by the ABI, we might have to change this
3038 calculation. */
3039 relocation -= htab->elf.sgotplt->output_section->vma
3040 + htab->elf.sgotplt->output_offset;
3041 break;
3042
3043 case R_X86_64_GOTPC32:
3044 case R_X86_64_GOTPC64:
3045 /* Use global offset table as symbol value. */
3046 relocation = htab->elf.sgotplt->output_section->vma
3047 + htab->elf.sgotplt->output_offset;
3048 unresolved_reloc = FALSE;
3049 break;
3050
3051 case R_X86_64_PLTOFF64:
3052 /* Relocation is PLT entry relative to GOT. For local
3053 symbols it's the symbol itself relative to GOT. */
3054 if (h != NULL
3055 /* See PLT32 handling. */
3056 && (h->plt.offset != (bfd_vma) -1
3057 || eh->plt_got.offset != (bfd_vma) -1)
3058 && htab->elf.splt != NULL)
3059 {
3060 if (eh->plt_got.offset != (bfd_vma) -1)
3061 {
3062 /* Use the GOT PLT. */
3063 resolved_plt = htab->plt_got;
3064 plt_offset = eh->plt_got.offset;
3065 }
3066 else if (htab->plt_second != NULL)
3067 {
3068 resolved_plt = htab->plt_second;
3069 plt_offset = eh->plt_second.offset;
3070 }
3071 else
3072 {
3073 resolved_plt = htab->elf.splt;
3074 plt_offset = h->plt.offset;
3075 }
3076
3077 relocation = (resolved_plt->output_section->vma
3078 + resolved_plt->output_offset
3079 + plt_offset);
3080 unresolved_reloc = FALSE;
3081 }
3082
3083 relocation -= htab->elf.sgotplt->output_section->vma
3084 + htab->elf.sgotplt->output_offset;
3085 break;
3086
3087 case R_X86_64_PLT32:
3088 case R_X86_64_PLT32_BND:
3089 /* Relocation is to the entry for this symbol in the
3090 procedure linkage table. */
3091
3092 /* Resolve a PLT32 reloc against a local symbol directly,
3093 without using the procedure linkage table. */
3094 if (h == NULL)
3095 break;
3096
3097 if ((h->plt.offset == (bfd_vma) -1
3098 && eh->plt_got.offset == (bfd_vma) -1)
3099 || htab->elf.splt == NULL)
3100 {
3101 /* We didn't make a PLT entry for this symbol. This
3102 happens when statically linking PIC code, or when
3103 using -Bsymbolic. */
3104 break;
3105 }
3106
3107 use_plt:
3108 if (h->plt.offset != (bfd_vma) -1)
3109 {
3110 if (htab->plt_second != NULL)
3111 {
3112 resolved_plt = htab->plt_second;
3113 plt_offset = eh->plt_second.offset;
3114 }
3115 else
3116 {
3117 resolved_plt = htab->elf.splt;
3118 plt_offset = h->plt.offset;
3119 }
3120 }
3121 else
3122 {
3123 /* Use the GOT PLT. */
3124 resolved_plt = htab->plt_got;
3125 plt_offset = eh->plt_got.offset;
3126 }
3127
3128 relocation = (resolved_plt->output_section->vma
3129 + resolved_plt->output_offset
3130 + plt_offset);
3131 unresolved_reloc = FALSE;
3132 break;
3133
3134 case R_X86_64_SIZE32:
3135 case R_X86_64_SIZE64:
3136 /* Set to symbol size. */
3137 relocation = st_size;
3138 goto direct;
3139
3140 case R_X86_64_PC8:
3141 case R_X86_64_PC16:
3142 case R_X86_64_PC32:
3143 case R_X86_64_PC32_BND:
3144 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3145 as function address. */
3146 if (h != NULL
3147 && (input_section->flags & SEC_CODE) == 0
3148 && bfd_link_pie (info)
3149 && h->type == STT_FUNC
3150 && !h->def_regular
3151 && h->def_dynamic)
3152 goto use_plt;
3153 /* Fall through. */
3154
3155 case R_X86_64_8:
3156 case R_X86_64_16:
3157 case R_X86_64_32:
3158 case R_X86_64_PC64:
3159 case R_X86_64_64:
3160 /* FIXME: The ABI says the linker should make sure the value is
3161 the same when it's zeroextended to 64 bit. */
3162
3163 direct:
3164 if ((input_section->flags & SEC_ALLOC) == 0)
3165 break;
3166
3167 need_copy_reloc_in_pie = (bfd_link_pie (info)
3168 && h != NULL
3169 && (h->needs_copy
3170 || eh->needs_copy
3171 || (h->root.type
3172 == bfd_link_hash_undefined))
3173 && (X86_PCREL_TYPE_P (r_type)
3174 || X86_SIZE_TYPE_P (r_type)));
3175
3176 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3177 need_copy_reloc_in_pie,
3178 resolved_to_zero, FALSE))
3179 {
3180 Elf_Internal_Rela outrel;
3181 bfd_boolean skip, relocate;
3182 asection *sreloc;
3183
3184 /* When generating a shared object, these relocations
3185 are copied into the output file to be resolved at run
3186 time. */
3187 skip = FALSE;
3188 relocate = FALSE;
3189
3190 outrel.r_offset =
3191 _bfd_elf_section_offset (output_bfd, info, input_section,
3192 rel->r_offset);
3193 if (outrel.r_offset == (bfd_vma) -1)
3194 skip = TRUE;
3195 else if (outrel.r_offset == (bfd_vma) -2)
3196 skip = TRUE, relocate = TRUE;
3197
3198 outrel.r_offset += (input_section->output_section->vma
3199 + input_section->output_offset);
3200
3201 if (skip)
3202 memset (&outrel, 0, sizeof outrel);
3203
3204 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3205 {
3206 outrel.r_info = htab->r_info (h->dynindx, r_type);
3207 outrel.r_addend = rel->r_addend;
3208 }
3209 else
3210 {
3211 /* This symbol is local, or marked to become local.
3212 When relocation overflow check is disabled, we
3213 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3214 if (r_type == htab->pointer_r_type
3215 || (r_type == R_X86_64_32
3216 && htab->params->no_reloc_overflow_check))
3217 {
3218 relocate = TRUE;
3219 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3220 outrel.r_addend = relocation + rel->r_addend;
3221 }
3222 else if (r_type == R_X86_64_64
3223 && !ABI_64_P (output_bfd))
3224 {
3225 relocate = TRUE;
3226 outrel.r_info = htab->r_info (0,
3227 R_X86_64_RELATIVE64);
3228 outrel.r_addend = relocation + rel->r_addend;
3229 /* Check addend overflow. */
3230 if ((outrel.r_addend & 0x80000000)
3231 != (rel->r_addend & 0x80000000))
3232 {
3233 const char *name;
3234 int addend = rel->r_addend;
3235 if (h && h->root.root.string)
3236 name = h->root.root.string;
3237 else
3238 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3239 sym, NULL);
3240 _bfd_error_handler
3241 /* xgettext:c-format */
3242 (_("%pB: addend %s%#x in relocation %s against "
3243 "symbol `%s' at %#" PRIx64
3244 " in section `%pA' is out of range"),
3245 input_bfd, addend < 0 ? "-" : "", addend,
3246 howto->name, name, (uint64_t) rel->r_offset,
3247 input_section);
3248 bfd_set_error (bfd_error_bad_value);
3249 return FALSE;
3250 }
3251 }
3252 else
3253 {
3254 long sindx;
3255
3256 if (bfd_is_abs_section (sec))
3257 sindx = 0;
3258 else if (sec == NULL || sec->owner == NULL)
3259 {
3260 bfd_set_error (bfd_error_bad_value);
3261 return FALSE;
3262 }
3263 else
3264 {
3265 asection *osec;
3266
3267 /* We are turning this relocation into one
3268 against a section symbol. It would be
3269 proper to subtract the symbol's value,
3270 osec->vma, from the emitted reloc addend,
3271 but ld.so expects buggy relocs. */
3272 osec = sec->output_section;
3273 sindx = elf_section_data (osec)->dynindx;
3274 if (sindx == 0)
3275 {
3276 asection *oi = htab->elf.text_index_section;
3277 sindx = elf_section_data (oi)->dynindx;
3278 }
3279 BFD_ASSERT (sindx != 0);
3280 }
3281
3282 outrel.r_info = htab->r_info (sindx, r_type);
3283 outrel.r_addend = relocation + rel->r_addend;
3284 }
3285 }
3286
3287 sreloc = elf_section_data (input_section)->sreloc;
3288
3289 if (sreloc == NULL || sreloc->contents == NULL)
3290 {
3291 r = bfd_reloc_notsupported;
3292 goto check_relocation_error;
3293 }
3294
3295 elf_append_rela (output_bfd, sreloc, &outrel);
3296
3297 /* If this reloc is against an external symbol, we do
3298 not want to fiddle with the addend. Otherwise, we
3299 need to include the symbol value so that it becomes
3300 an addend for the dynamic reloc. */
3301 if (! relocate)
3302 continue;
3303 }
3304
3305 break;
3306
3307 case R_X86_64_TLSGD:
3308 case R_X86_64_GOTPC32_TLSDESC:
3309 case R_X86_64_TLSDESC_CALL:
3310 case R_X86_64_GOTTPOFF:
3311 tls_type = GOT_UNKNOWN;
3312 if (h == NULL && local_got_offsets)
3313 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3314 else if (h != NULL)
3315 tls_type = elf_x86_hash_entry (h)->tls_type;
3316
3317 r_type_tls = r_type;
3318 if (! elf_x86_64_tls_transition (info, input_bfd,
3319 input_section, contents,
3320 symtab_hdr, sym_hashes,
3321 &r_type_tls, tls_type, rel,
3322 relend, h, r_symndx, TRUE))
3323 return FALSE;
3324
3325 if (r_type_tls == R_X86_64_TPOFF32)
3326 {
3327 bfd_vma roff = rel->r_offset;
3328
3329 BFD_ASSERT (! unresolved_reloc);
3330
3331 if (r_type == R_X86_64_TLSGD)
3332 {
3333 /* GD->LE transition. For 64bit, change
3334 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3335 .word 0x6666; rex64; call __tls_get_addr@PLT
3336 or
3337 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3338 .byte 0x66; rex64
3339 call *__tls_get_addr@GOTPCREL(%rip)
3340 which may be converted to
3341 addr32 call __tls_get_addr
3342 into:
3343 movq %fs:0, %rax
3344 leaq foo@tpoff(%rax), %rax
3345 For 32bit, change
3346 leaq foo@tlsgd(%rip), %rdi
3347 .word 0x6666; rex64; call __tls_get_addr@PLT
3348 or
3349 leaq foo@tlsgd(%rip), %rdi
3350 .byte 0x66; rex64
3351 call *__tls_get_addr@GOTPCREL(%rip)
3352 which may be converted to
3353 addr32 call __tls_get_addr
3354 into:
3355 movl %fs:0, %eax
3356 leaq foo@tpoff(%rax), %rax
3357 For largepic, change:
3358 leaq foo@tlsgd(%rip), %rdi
3359 movabsq $__tls_get_addr@pltoff, %rax
3360 addq %r15, %rax
3361 call *%rax
3362 into:
3363 movq %fs:0, %rax
3364 leaq foo@tpoff(%rax), %rax
3365 nopw 0x0(%rax,%rax,1) */
3366 int largepic = 0;
3367 if (ABI_64_P (output_bfd))
3368 {
3369 if (contents[roff + 5] == 0xb8)
3370 {
3371 if (roff < 3
3372 || (roff - 3 + 22) > input_section->size)
3373 {
3374 corrupt_input:
3375 info->callbacks->einfo
3376 (_("%F%P: corrupt input: %pB\n"),
3377 input_bfd);
3378 return FALSE;
3379 }
3380 memcpy (contents + roff - 3,
3381 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3382 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3383 largepic = 1;
3384 }
3385 else
3386 {
3387 if (roff < 4
3388 || (roff - 4 + 16) > input_section->size)
3389 goto corrupt_input;
3390 memcpy (contents + roff - 4,
3391 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3392 16);
3393 }
3394 }
3395 else
3396 {
3397 if (roff < 3
3398 || (roff - 3 + 15) > input_section->size)
3399 goto corrupt_input;
3400 memcpy (contents + roff - 3,
3401 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3402 15);
3403 }
3404 bfd_put_32 (output_bfd,
3405 elf_x86_64_tpoff (info, relocation),
3406 contents + roff + 8 + largepic);
3407 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3408 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3409 rel++;
3410 wrel++;
3411 continue;
3412 }
3413 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3414 {
3415 /* GDesc -> LE transition.
3416 It's originally something like:
3417 leaq x@tlsdesc(%rip), %rax
3418
3419 Change it to:
3420 movl $x@tpoff, %rax. */
3421
3422 unsigned int val, type;
3423
3424 if (roff < 3)
3425 goto corrupt_input;
3426 type = bfd_get_8 (input_bfd, contents + roff - 3);
3427 val = bfd_get_8 (input_bfd, contents + roff - 1);
3428 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3429 contents + roff - 3);
3430 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3431 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3432 contents + roff - 1);
3433 bfd_put_32 (output_bfd,
3434 elf_x86_64_tpoff (info, relocation),
3435 contents + roff);
3436 continue;
3437 }
3438 else if (r_type == R_X86_64_TLSDESC_CALL)
3439 {
3440 /* GDesc -> LE transition.
3441 It's originally:
3442 call *(%rax)
3443 Turn it into:
3444 xchg %ax,%ax. */
3445 bfd_put_8 (output_bfd, 0x66, contents + roff);
3446 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3447 continue;
3448 }
3449 else if (r_type == R_X86_64_GOTTPOFF)
3450 {
3451 /* IE->LE transition:
3452 For 64bit, originally it can be one of:
3453 movq foo@gottpoff(%rip), %reg
3454 addq foo@gottpoff(%rip), %reg
3455 We change it into:
3456 movq $foo, %reg
3457 leaq foo(%reg), %reg
3458 addq $foo, %reg.
3459 For 32bit, originally it can be one of:
3460 movq foo@gottpoff(%rip), %reg
3461 addl foo@gottpoff(%rip), %reg
3462 We change it into:
3463 movq $foo, %reg
3464 leal foo(%reg), %reg
3465 addl $foo, %reg. */
3466
3467 unsigned int val, type, reg;
3468
3469 if (roff >= 3)
3470 val = bfd_get_8 (input_bfd, contents + roff - 3);
3471 else
3472 {
3473 if (roff < 2)
3474 goto corrupt_input;
3475 val = 0;
3476 }
3477 type = bfd_get_8 (input_bfd, contents + roff - 2);
3478 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3479 reg >>= 3;
3480 if (type == 0x8b)
3481 {
3482 /* movq */
3483 if (val == 0x4c)
3484 {
3485 if (roff < 3)
3486 goto corrupt_input;
3487 bfd_put_8 (output_bfd, 0x49,
3488 contents + roff - 3);
3489 }
3490 else if (!ABI_64_P (output_bfd) && val == 0x44)
3491 {
3492 if (roff < 3)
3493 goto corrupt_input;
3494 bfd_put_8 (output_bfd, 0x41,
3495 contents + roff - 3);
3496 }
3497 bfd_put_8 (output_bfd, 0xc7,
3498 contents + roff - 2);
3499 bfd_put_8 (output_bfd, 0xc0 | reg,
3500 contents + roff - 1);
3501 }
3502 else if (reg == 4)
3503 {
3504 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3505 is special */
3506 if (val == 0x4c)
3507 {
3508 if (roff < 3)
3509 goto corrupt_input;
3510 bfd_put_8 (output_bfd, 0x49,
3511 contents + roff - 3);
3512 }
3513 else if (!ABI_64_P (output_bfd) && val == 0x44)
3514 {
3515 if (roff < 3)
3516 goto corrupt_input;
3517 bfd_put_8 (output_bfd, 0x41,
3518 contents + roff - 3);
3519 }
3520 bfd_put_8 (output_bfd, 0x81,
3521 contents + roff - 2);
3522 bfd_put_8 (output_bfd, 0xc0 | reg,
3523 contents + roff - 1);
3524 }
3525 else
3526 {
3527 /* addq/addl -> leaq/leal */
3528 if (val == 0x4c)
3529 {
3530 if (roff < 3)
3531 goto corrupt_input;
3532 bfd_put_8 (output_bfd, 0x4d,
3533 contents + roff - 3);
3534 }
3535 else if (!ABI_64_P (output_bfd) && val == 0x44)
3536 {
3537 if (roff < 3)
3538 goto corrupt_input;
3539 bfd_put_8 (output_bfd, 0x45,
3540 contents + roff - 3);
3541 }
3542 bfd_put_8 (output_bfd, 0x8d,
3543 contents + roff - 2);
3544 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3545 contents + roff - 1);
3546 }
3547 bfd_put_32 (output_bfd,
3548 elf_x86_64_tpoff (info, relocation),
3549 contents + roff);
3550 continue;
3551 }
3552 else
3553 BFD_ASSERT (FALSE);
3554 }
3555
3556 if (htab->elf.sgot == NULL)
3557 abort ();
3558
3559 if (h != NULL)
3560 {
3561 off = h->got.offset;
3562 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3563 }
3564 else
3565 {
3566 if (local_got_offsets == NULL)
3567 abort ();
3568
3569 off = local_got_offsets[r_symndx];
3570 offplt = local_tlsdesc_gotents[r_symndx];
3571 }
3572
3573 if ((off & 1) != 0)
3574 off &= ~1;
3575 else
3576 {
3577 Elf_Internal_Rela outrel;
3578 int dr_type, indx;
3579 asection *sreloc;
3580
3581 if (htab->elf.srelgot == NULL)
3582 abort ();
3583
3584 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3585
3586 if (GOT_TLS_GDESC_P (tls_type))
3587 {
3588 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3589 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3590 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3591 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3592 + htab->elf.sgotplt->output_offset
3593 + offplt
3594 + htab->sgotplt_jump_table_size);
3595 sreloc = htab->elf.srelplt;
3596 if (indx == 0)
3597 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3598 else
3599 outrel.r_addend = 0;
3600 elf_append_rela (output_bfd, sreloc, &outrel);
3601 }
3602
3603 sreloc = htab->elf.srelgot;
3604
3605 outrel.r_offset = (htab->elf.sgot->output_section->vma
3606 + htab->elf.sgot->output_offset + off);
3607
3608 if (GOT_TLS_GD_P (tls_type))
3609 dr_type = R_X86_64_DTPMOD64;
3610 else if (GOT_TLS_GDESC_P (tls_type))
3611 goto dr_done;
3612 else
3613 dr_type = R_X86_64_TPOFF64;
3614
3615 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3616 outrel.r_addend = 0;
3617 if ((dr_type == R_X86_64_TPOFF64
3618 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3619 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3620 outrel.r_info = htab->r_info (indx, dr_type);
3621
3622 elf_append_rela (output_bfd, sreloc, &outrel);
3623
3624 if (GOT_TLS_GD_P (tls_type))
3625 {
3626 if (indx == 0)
3627 {
3628 BFD_ASSERT (! unresolved_reloc);
3629 bfd_put_64 (output_bfd,
3630 relocation - _bfd_x86_elf_dtpoff_base (info),
3631 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3632 }
3633 else
3634 {
3635 bfd_put_64 (output_bfd, 0,
3636 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3637 outrel.r_info = htab->r_info (indx,
3638 R_X86_64_DTPOFF64);
3639 outrel.r_offset += GOT_ENTRY_SIZE;
3640 elf_append_rela (output_bfd, sreloc,
3641 &outrel);
3642 }
3643 }
3644
3645 dr_done:
3646 if (h != NULL)
3647 h->got.offset |= 1;
3648 else
3649 local_got_offsets[r_symndx] |= 1;
3650 }
3651
3652 if (off >= (bfd_vma) -2
3653 && ! GOT_TLS_GDESC_P (tls_type))
3654 abort ();
3655 if (r_type_tls == r_type)
3656 {
3657 if (r_type == R_X86_64_GOTPC32_TLSDESC
3658 || r_type == R_X86_64_TLSDESC_CALL)
3659 relocation = htab->elf.sgotplt->output_section->vma
3660 + htab->elf.sgotplt->output_offset
3661 + offplt + htab->sgotplt_jump_table_size;
3662 else
3663 relocation = htab->elf.sgot->output_section->vma
3664 + htab->elf.sgot->output_offset + off;
3665 unresolved_reloc = FALSE;
3666 }
3667 else
3668 {
3669 bfd_vma roff = rel->r_offset;
3670
3671 if (r_type == R_X86_64_TLSGD)
3672 {
3673 /* GD->IE transition. For 64bit, change
3674 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3675 .word 0x6666; rex64; call __tls_get_addr@PLT
3676 or
3677 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3678 .byte 0x66; rex64
3679 call *__tls_get_addr@GOTPCREL(%rip
3680 which may be converted to
3681 addr32 call __tls_get_addr
3682 into:
3683 movq %fs:0, %rax
3684 addq foo@gottpoff(%rip), %rax
3685 For 32bit, change
3686 leaq foo@tlsgd(%rip), %rdi
3687 .word 0x6666; rex64; call __tls_get_addr@PLT
3688 or
3689 leaq foo@tlsgd(%rip), %rdi
3690 .byte 0x66; rex64;
3691 call *__tls_get_addr@GOTPCREL(%rip)
3692 which may be converted to
3693 addr32 call __tls_get_addr
3694 into:
3695 movl %fs:0, %eax
3696 addq foo@gottpoff(%rip), %rax
3697 For largepic, change:
3698 leaq foo@tlsgd(%rip), %rdi
3699 movabsq $__tls_get_addr@pltoff, %rax
3700 addq %r15, %rax
3701 call *%rax
3702 into:
3703 movq %fs:0, %rax
3704 addq foo@gottpoff(%rax), %rax
3705 nopw 0x0(%rax,%rax,1) */
3706 int largepic = 0;
3707 if (ABI_64_P (output_bfd))
3708 {
3709 if (contents[roff + 5] == 0xb8)
3710 {
3711 if (roff < 3
3712 || (roff - 3 + 22) > input_section->size)
3713 goto corrupt_input;
3714 memcpy (contents + roff - 3,
3715 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3716 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3717 largepic = 1;
3718 }
3719 else
3720 {
3721 if (roff < 4
3722 || (roff - 4 + 16) > input_section->size)
3723 goto corrupt_input;
3724 memcpy (contents + roff - 4,
3725 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3726 16);
3727 }
3728 }
3729 else
3730 {
3731 if (roff < 3
3732 || (roff - 3 + 15) > input_section->size)
3733 goto corrupt_input;
3734 memcpy (contents + roff - 3,
3735 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3736 15);
3737 }
3738
3739 relocation = (htab->elf.sgot->output_section->vma
3740 + htab->elf.sgot->output_offset + off
3741 - roff
3742 - largepic
3743 - input_section->output_section->vma
3744 - input_section->output_offset
3745 - 12);
3746 bfd_put_32 (output_bfd, relocation,
3747 contents + roff + 8 + largepic);
3748 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3749 rel++;
3750 wrel++;
3751 continue;
3752 }
3753 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3754 {
3755 /* GDesc -> IE transition.
3756 It's originally something like:
3757 leaq x@tlsdesc(%rip), %rax
3758
3759 Change it to:
3760 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3761
3762 /* Now modify the instruction as appropriate. To
3763 turn a leaq into a movq in the form we use it, it
3764 suffices to change the second byte from 0x8d to
3765 0x8b. */
3766 if (roff < 2)
3767 goto corrupt_input;
3768 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3769
3770 bfd_put_32 (output_bfd,
3771 htab->elf.sgot->output_section->vma
3772 + htab->elf.sgot->output_offset + off
3773 - rel->r_offset
3774 - input_section->output_section->vma
3775 - input_section->output_offset
3776 - 4,
3777 contents + roff);
3778 continue;
3779 }
3780 else if (r_type == R_X86_64_TLSDESC_CALL)
3781 {
3782 /* GDesc -> IE transition.
3783 It's originally:
3784 call *(%rax)
3785
3786 Change it to:
3787 xchg %ax, %ax. */
3788
3789 bfd_put_8 (output_bfd, 0x66, contents + roff);
3790 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3791 continue;
3792 }
3793 else
3794 BFD_ASSERT (FALSE);
3795 }
3796 break;
3797
3798 case R_X86_64_TLSLD:
3799 if (! elf_x86_64_tls_transition (info, input_bfd,
3800 input_section, contents,
3801 symtab_hdr, sym_hashes,
3802 &r_type, GOT_UNKNOWN, rel,
3803 relend, h, r_symndx, TRUE))
3804 return FALSE;
3805
3806 if (r_type != R_X86_64_TLSLD)
3807 {
3808 /* LD->LE transition:
3809 leaq foo@tlsld(%rip), %rdi
3810 call __tls_get_addr@PLT
3811 For 64bit, we change it into:
3812 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3813 For 32bit, we change it into:
3814 nopl 0x0(%rax); movl %fs:0, %eax
3815 Or
3816 leaq foo@tlsld(%rip), %rdi;
3817 call *__tls_get_addr@GOTPCREL(%rip)
3818 which may be converted to
3819 addr32 call __tls_get_addr
3820 For 64bit, we change it into:
3821 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3822 For 32bit, we change it into:
3823 nopw 0x0(%rax); movl %fs:0, %eax
3824 For largepic, change:
3825 leaq foo@tlsgd(%rip), %rdi
3826 movabsq $__tls_get_addr@pltoff, %rax
3827 addq %rbx, %rax
3828 call *%rax
3829 into
3830 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3831 movq %fs:0, %eax */
3832
3833 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3834 if (ABI_64_P (output_bfd))
3835 {
3836 if ((rel->r_offset + 5) >= input_section->size)
3837 goto corrupt_input;
3838 if (contents[rel->r_offset + 5] == 0xb8)
3839 {
3840 if (rel->r_offset < 3
3841 || (rel->r_offset - 3 + 22) > input_section->size)
3842 goto corrupt_input;
3843 memcpy (contents + rel->r_offset - 3,
3844 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3845 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3846 }
3847 else if (contents[rel->r_offset + 4] == 0xff
3848 || contents[rel->r_offset + 4] == 0x67)
3849 {
3850 if (rel->r_offset < 3
3851 || (rel->r_offset - 3 + 13) > input_section->size)
3852 goto corrupt_input;
3853 memcpy (contents + rel->r_offset - 3,
3854 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3855 13);
3856
3857 }
3858 else
3859 {
3860 if (rel->r_offset < 3
3861 || (rel->r_offset - 3 + 12) > input_section->size)
3862 goto corrupt_input;
3863 memcpy (contents + rel->r_offset - 3,
3864 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3865 }
3866 }
3867 else
3868 {
3869 if ((rel->r_offset + 4) >= input_section->size)
3870 goto corrupt_input;
3871 if (contents[rel->r_offset + 4] == 0xff)
3872 {
3873 if (rel->r_offset < 3
3874 || (rel->r_offset - 3 + 13) > input_section->size)
3875 goto corrupt_input;
3876 memcpy (contents + rel->r_offset - 3,
3877 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3878 13);
3879 }
3880 else
3881 {
3882 if (rel->r_offset < 3
3883 || (rel->r_offset - 3 + 12) > input_section->size)
3884 goto corrupt_input;
3885 memcpy (contents + rel->r_offset - 3,
3886 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3887 }
3888 }
3889 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3890 and R_X86_64_PLTOFF64. */
3891 rel++;
3892 wrel++;
3893 continue;
3894 }
3895
3896 if (htab->elf.sgot == NULL)
3897 abort ();
3898
3899 off = htab->tls_ld_or_ldm_got.offset;
3900 if (off & 1)
3901 off &= ~1;
3902 else
3903 {
3904 Elf_Internal_Rela outrel;
3905
3906 if (htab->elf.srelgot == NULL)
3907 abort ();
3908
3909 outrel.r_offset = (htab->elf.sgot->output_section->vma
3910 + htab->elf.sgot->output_offset + off);
3911
3912 bfd_put_64 (output_bfd, 0,
3913 htab->elf.sgot->contents + off);
3914 bfd_put_64 (output_bfd, 0,
3915 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3916 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3917 outrel.r_addend = 0;
3918 elf_append_rela (output_bfd, htab->elf.srelgot,
3919 &outrel);
3920 htab->tls_ld_or_ldm_got.offset |= 1;
3921 }
3922 relocation = htab->elf.sgot->output_section->vma
3923 + htab->elf.sgot->output_offset + off;
3924 unresolved_reloc = FALSE;
3925 break;
3926
3927 case R_X86_64_DTPOFF32:
3928 if (!bfd_link_executable (info)
3929 || (input_section->flags & SEC_CODE) == 0)
3930 relocation -= _bfd_x86_elf_dtpoff_base (info);
3931 else
3932 relocation = elf_x86_64_tpoff (info, relocation);
3933 break;
3934
3935 case R_X86_64_TPOFF32:
3936 case R_X86_64_TPOFF64:
3937 BFD_ASSERT (bfd_link_executable (info));
3938 relocation = elf_x86_64_tpoff (info, relocation);
3939 break;
3940
3941 case R_X86_64_DTPOFF64:
3942 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3943 relocation -= _bfd_x86_elf_dtpoff_base (info);
3944 break;
3945
3946 default:
3947 break;
3948 }
3949
3950 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3951 because such sections are not SEC_ALLOC and thus ld.so will
3952 not process them. */
3953 if (unresolved_reloc
3954 && !((input_section->flags & SEC_DEBUGGING) != 0
3955 && h->def_dynamic)
3956 && _bfd_elf_section_offset (output_bfd, info, input_section,
3957 rel->r_offset) != (bfd_vma) -1)
3958 {
3959 switch (r_type)
3960 {
3961 case R_X86_64_32S:
3962 sec = h->root.u.def.section;
3963 if ((info->nocopyreloc
3964 || (eh->def_protected
3965 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3966 && !(h->root.u.def.section->flags & SEC_CODE))
3967 return elf_x86_64_need_pic (info, input_bfd, input_section,
3968 h, NULL, NULL, howto);
3969 /* Fall through. */
3970
3971 default:
3972 _bfd_error_handler
3973 /* xgettext:c-format */
3974 (_("%pB(%pA+%#" PRIx64 "): "
3975 "unresolvable %s relocation against symbol `%s'"),
3976 input_bfd,
3977 input_section,
3978 (uint64_t) rel->r_offset,
3979 howto->name,
3980 h->root.root.string);
3981 return FALSE;
3982 }
3983 }
3984
3985 do_relocation:
3986 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3987 contents, rel->r_offset,
3988 relocation, rel->r_addend);
3989
3990 check_relocation_error:
3991 if (r != bfd_reloc_ok)
3992 {
3993 const char *name;
3994
3995 if (h != NULL)
3996 name = h->root.root.string;
3997 else
3998 {
3999 name = bfd_elf_string_from_elf_section (input_bfd,
4000 symtab_hdr->sh_link,
4001 sym->st_name);
4002 if (name == NULL)
4003 return FALSE;
4004 if (*name == '\0')
4005 name = bfd_section_name (input_bfd, sec);
4006 }
4007
4008 if (r == bfd_reloc_overflow)
4009 {
4010 if (converted_reloc)
4011 {
4012 info->callbacks->einfo
4013 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4014 return FALSE;
4015 }
4016 (*info->callbacks->reloc_overflow)
4017 (info, (h ? &h->root : NULL), name, howto->name,
4018 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4019 }
4020 else
4021 {
4022 _bfd_error_handler
4023 /* xgettext:c-format */
4024 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4025 input_bfd, input_section,
4026 (uint64_t) rel->r_offset, name, (int) r);
4027 return FALSE;
4028 }
4029 }
4030
4031 if (wrel != rel)
4032 *wrel = *rel;
4033 }
4034
4035 if (wrel != rel)
4036 {
4037 Elf_Internal_Shdr *rel_hdr;
4038 size_t deleted = rel - wrel;
4039
4040 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4041 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4042 if (rel_hdr->sh_size == 0)
4043 {
4044 /* It is too late to remove an empty reloc section. Leave
4045 one NONE reloc.
4046 ??? What is wrong with an empty section??? */
4047 rel_hdr->sh_size = rel_hdr->sh_entsize;
4048 deleted -= 1;
4049 }
4050 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4051 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4052 input_section->reloc_count -= deleted;
4053 }
4054
4055 return TRUE;
4056 }
4057
4058 /* Finish up dynamic symbol handling. We set the contents of various
4059 dynamic sections here. */
4060
4061 static bfd_boolean
4062 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4063 struct bfd_link_info *info,
4064 struct elf_link_hash_entry *h,
4065 Elf_Internal_Sym *sym)
4066 {
4067 struct elf_x86_link_hash_table *htab;
4068 bfd_boolean use_plt_second;
4069 struct elf_x86_link_hash_entry *eh;
4070 bfd_boolean local_undefweak;
4071
4072 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4073 if (htab == NULL)
4074 return FALSE;
4075
4076 /* Use the second PLT section only if there is .plt section. */
4077 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4078
4079 eh = (struct elf_x86_link_hash_entry *) h;
4080 if (eh->no_finish_dynamic_symbol)
4081 abort ();
4082
4083 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4084 resolved undefined weak symbols in executable so that their
4085 references have value 0 at run-time. */
4086 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4087
4088 if (h->plt.offset != (bfd_vma) -1)
4089 {
4090 bfd_vma plt_index;
4091 bfd_vma got_offset, plt_offset;
4092 Elf_Internal_Rela rela;
4093 bfd_byte *loc;
4094 asection *plt, *gotplt, *relplt, *resolved_plt;
4095 const struct elf_backend_data *bed;
4096 bfd_vma plt_got_pcrel_offset;
4097
4098 /* When building a static executable, use .iplt, .igot.plt and
4099 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4100 if (htab->elf.splt != NULL)
4101 {
4102 plt = htab->elf.splt;
4103 gotplt = htab->elf.sgotplt;
4104 relplt = htab->elf.srelplt;
4105 }
4106 else
4107 {
4108 plt = htab->elf.iplt;
4109 gotplt = htab->elf.igotplt;
4110 relplt = htab->elf.irelplt;
4111 }
4112
4113 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4114
4115 /* Get the index in the procedure linkage table which
4116 corresponds to this symbol. This is the index of this symbol
4117 in all the symbols for which we are making plt entries. The
4118 first entry in the procedure linkage table is reserved.
4119
4120 Get the offset into the .got table of the entry that
4121 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4122 bytes. The first three are reserved for the dynamic linker.
4123
4124 For static executables, we don't reserve anything. */
4125
4126 if (plt == htab->elf.splt)
4127 {
4128 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4129 - htab->plt.has_plt0);
4130 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4131 }
4132 else
4133 {
4134 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4135 got_offset = got_offset * GOT_ENTRY_SIZE;
4136 }
4137
4138 /* Fill in the entry in the procedure linkage table. */
4139 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4140 htab->plt.plt_entry_size);
4141 if (use_plt_second)
4142 {
4143 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4144 htab->non_lazy_plt->plt_entry,
4145 htab->non_lazy_plt->plt_entry_size);
4146
4147 resolved_plt = htab->plt_second;
4148 plt_offset = eh->plt_second.offset;
4149 }
4150 else
4151 {
4152 resolved_plt = plt;
4153 plt_offset = h->plt.offset;
4154 }
4155
4156 /* Insert the relocation positions of the plt section. */
4157
4158 /* Put offset the PC-relative instruction referring to the GOT entry,
4159 subtracting the size of that instruction. */
4160 plt_got_pcrel_offset = (gotplt->output_section->vma
4161 + gotplt->output_offset
4162 + got_offset
4163 - resolved_plt->output_section->vma
4164 - resolved_plt->output_offset
4165 - plt_offset
4166 - htab->plt.plt_got_insn_size);
4167
4168 /* Check PC-relative offset overflow in PLT entry. */
4169 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4170 /* xgettext:c-format */
4171 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4172 output_bfd, h->root.root.string);
4173
4174 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4175 (resolved_plt->contents + plt_offset
4176 + htab->plt.plt_got_offset));
4177
4178 /* Fill in the entry in the global offset table, initially this
4179 points to the second part of the PLT entry. Leave the entry
4180 as zero for undefined weak symbol in PIE. No PLT relocation
4181 against undefined weak symbol in PIE. */
4182 if (!local_undefweak)
4183 {
4184 if (htab->plt.has_plt0)
4185 bfd_put_64 (output_bfd, (plt->output_section->vma
4186 + plt->output_offset
4187 + h->plt.offset
4188 + htab->lazy_plt->plt_lazy_offset),
4189 gotplt->contents + got_offset);
4190
4191 /* Fill in the entry in the .rela.plt section. */
4192 rela.r_offset = (gotplt->output_section->vma
4193 + gotplt->output_offset
4194 + got_offset);
4195 if (PLT_LOCAL_IFUNC_P (info, h))
4196 {
4197 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4198 h->root.root.string,
4199 h->root.u.def.section->owner);
4200
4201 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4202 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4203 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4204 rela.r_addend = (h->root.u.def.value
4205 + h->root.u.def.section->output_section->vma
4206 + h->root.u.def.section->output_offset);
4207 /* R_X86_64_IRELATIVE comes last. */
4208 plt_index = htab->next_irelative_index--;
4209 }
4210 else
4211 {
4212 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4213 rela.r_addend = 0;
4214 plt_index = htab->next_jump_slot_index++;
4215 }
4216
4217 /* Don't fill the second and third slots in PLT entry for
4218 static executables nor without PLT0. */
4219 if (plt == htab->elf.splt && htab->plt.has_plt0)
4220 {
4221 bfd_vma plt0_offset
4222 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4223
4224 /* Put relocation index. */
4225 bfd_put_32 (output_bfd, plt_index,
4226 (plt->contents + h->plt.offset
4227 + htab->lazy_plt->plt_reloc_offset));
4228
4229 /* Put offset for jmp .PLT0 and check for overflow. We don't
4230 check relocation index for overflow since branch displacement
4231 will overflow first. */
4232 if (plt0_offset > 0x80000000)
4233 /* xgettext:c-format */
4234 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4235 output_bfd, h->root.root.string);
4236 bfd_put_32 (output_bfd, - plt0_offset,
4237 (plt->contents + h->plt.offset
4238 + htab->lazy_plt->plt_plt_offset));
4239 }
4240
4241 bed = get_elf_backend_data (output_bfd);
4242 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4243 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4244 }
4245 }
4246 else if (eh->plt_got.offset != (bfd_vma) -1)
4247 {
4248 bfd_vma got_offset, plt_offset;
4249 asection *plt, *got;
4250 bfd_boolean got_after_plt;
4251 int32_t got_pcrel_offset;
4252
4253 /* Set the entry in the GOT procedure linkage table. */
4254 plt = htab->plt_got;
4255 got = htab->elf.sgot;
4256 got_offset = h->got.offset;
4257
4258 if (got_offset == (bfd_vma) -1
4259 || (h->type == STT_GNU_IFUNC && h->def_regular)
4260 || plt == NULL
4261 || got == NULL)
4262 abort ();
4263
4264 /* Use the non-lazy PLT entry template for the GOT PLT since they
4265 are the identical. */
4266 /* Fill in the entry in the GOT procedure linkage table. */
4267 plt_offset = eh->plt_got.offset;
4268 memcpy (plt->contents + plt_offset,
4269 htab->non_lazy_plt->plt_entry,
4270 htab->non_lazy_plt->plt_entry_size);
4271
4272 /* Put offset the PC-relative instruction referring to the GOT
4273 entry, subtracting the size of that instruction. */
4274 got_pcrel_offset = (got->output_section->vma
4275 + got->output_offset
4276 + got_offset
4277 - plt->output_section->vma
4278 - plt->output_offset
4279 - plt_offset
4280 - htab->non_lazy_plt->plt_got_insn_size);
4281
4282 /* Check PC-relative offset overflow in GOT PLT entry. */
4283 got_after_plt = got->output_section->vma > plt->output_section->vma;
4284 if ((got_after_plt && got_pcrel_offset < 0)
4285 || (!got_after_plt && got_pcrel_offset > 0))
4286 /* xgettext:c-format */
4287 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4288 output_bfd, h->root.root.string);
4289
4290 bfd_put_32 (output_bfd, got_pcrel_offset,
4291 (plt->contents + plt_offset
4292 + htab->non_lazy_plt->plt_got_offset));
4293 }
4294
4295 if (!local_undefweak
4296 && !h->def_regular
4297 && (h->plt.offset != (bfd_vma) -1
4298 || eh->plt_got.offset != (bfd_vma) -1))
4299 {
4300 /* Mark the symbol as undefined, rather than as defined in
4301 the .plt section. Leave the value if there were any
4302 relocations where pointer equality matters (this is a clue
4303 for the dynamic linker, to make function pointer
4304 comparisons work between an application and shared
4305 library), otherwise set it to zero. If a function is only
4306 called from a binary, there is no need to slow down
4307 shared libraries because of that. */
4308 sym->st_shndx = SHN_UNDEF;
4309 if (!h->pointer_equality_needed)
4310 sym->st_value = 0;
4311 }
4312
4313 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4314
4315 /* Don't generate dynamic GOT relocation against undefined weak
4316 symbol in executable. */
4317 if (h->got.offset != (bfd_vma) -1
4318 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4319 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4320 && !local_undefweak)
4321 {
4322 Elf_Internal_Rela rela;
4323 asection *relgot = htab->elf.srelgot;
4324
4325 /* This symbol has an entry in the global offset table. Set it
4326 up. */
4327 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4328 abort ();
4329
4330 rela.r_offset = (htab->elf.sgot->output_section->vma
4331 + htab->elf.sgot->output_offset
4332 + (h->got.offset &~ (bfd_vma) 1));
4333
4334 /* If this is a static link, or it is a -Bsymbolic link and the
4335 symbol is defined locally or was forced to be local because
4336 of a version file, we just want to emit a RELATIVE reloc.
4337 The entry in the global offset table will already have been
4338 initialized in the relocate_section function. */
4339 if (h->def_regular
4340 && h->type == STT_GNU_IFUNC)
4341 {
4342 if (h->plt.offset == (bfd_vma) -1)
4343 {
4344 /* STT_GNU_IFUNC is referenced without PLT. */
4345 if (htab->elf.splt == NULL)
4346 {
4347 /* use .rel[a].iplt section to store .got relocations
4348 in static executable. */
4349 relgot = htab->elf.irelplt;
4350 }
4351 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4352 {
4353 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4354 h->root.root.string,
4355 h->root.u.def.section->owner);
4356
4357 rela.r_info = htab->r_info (0,
4358 R_X86_64_IRELATIVE);
4359 rela.r_addend = (h->root.u.def.value
4360 + h->root.u.def.section->output_section->vma
4361 + h->root.u.def.section->output_offset);
4362 }
4363 else
4364 goto do_glob_dat;
4365 }
4366 else if (bfd_link_pic (info))
4367 {
4368 /* Generate R_X86_64_GLOB_DAT. */
4369 goto do_glob_dat;
4370 }
4371 else
4372 {
4373 asection *plt;
4374 bfd_vma plt_offset;
4375
4376 if (!h->pointer_equality_needed)
4377 abort ();
4378
4379 /* For non-shared object, we can't use .got.plt, which
4380 contains the real function addres if we need pointer
4381 equality. We load the GOT entry with the PLT entry. */
4382 if (htab->plt_second != NULL)
4383 {
4384 plt = htab->plt_second;
4385 plt_offset = eh->plt_second.offset;
4386 }
4387 else
4388 {
4389 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4390 plt_offset = h->plt.offset;
4391 }
4392 bfd_put_64 (output_bfd, (plt->output_section->vma
4393 + plt->output_offset
4394 + plt_offset),
4395 htab->elf.sgot->contents + h->got.offset);
4396 return TRUE;
4397 }
4398 }
4399 else if (bfd_link_pic (info)
4400 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4401 {
4402 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4403 return FALSE;
4404 BFD_ASSERT((h->got.offset & 1) != 0);
4405 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4406 rela.r_addend = (h->root.u.def.value
4407 + h->root.u.def.section->output_section->vma
4408 + h->root.u.def.section->output_offset);
4409 }
4410 else
4411 {
4412 BFD_ASSERT((h->got.offset & 1) == 0);
4413 do_glob_dat:
4414 bfd_put_64 (output_bfd, (bfd_vma) 0,
4415 htab->elf.sgot->contents + h->got.offset);
4416 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4417 rela.r_addend = 0;
4418 }
4419
4420 elf_append_rela (output_bfd, relgot, &rela);
4421 }
4422
4423 if (h->needs_copy)
4424 {
4425 Elf_Internal_Rela rela;
4426 asection *s;
4427
4428 /* This symbol needs a copy reloc. Set it up. */
4429 VERIFY_COPY_RELOC (h, htab)
4430
4431 rela.r_offset = (h->root.u.def.value
4432 + h->root.u.def.section->output_section->vma
4433 + h->root.u.def.section->output_offset);
4434 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4435 rela.r_addend = 0;
4436 if (h->root.u.def.section == htab->elf.sdynrelro)
4437 s = htab->elf.sreldynrelro;
4438 else
4439 s = htab->elf.srelbss;
4440 elf_append_rela (output_bfd, s, &rela);
4441 }
4442
4443 return TRUE;
4444 }
4445
4446 /* Finish up local dynamic symbol handling. We set the contents of
4447 various dynamic sections here. */
4448
4449 static bfd_boolean
4450 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4451 {
4452 struct elf_link_hash_entry *h
4453 = (struct elf_link_hash_entry *) *slot;
4454 struct bfd_link_info *info
4455 = (struct bfd_link_info *) inf;
4456
4457 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4458 info, h, NULL);
4459 }
4460
4461 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4462 here since undefined weak symbol may not be dynamic and may not be
4463 called for elf_x86_64_finish_dynamic_symbol. */
4464
4465 static bfd_boolean
4466 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4467 void *inf)
4468 {
4469 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4470 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4471
4472 if (h->root.type != bfd_link_hash_undefweak
4473 || h->dynindx != -1)
4474 return TRUE;
4475
4476 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4477 info, h, NULL);
4478 }
4479
4480 /* Used to decide how to sort relocs in an optimal manner for the
4481 dynamic linker, before writing them out. */
4482
4483 static enum elf_reloc_type_class
4484 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4485 const asection *rel_sec ATTRIBUTE_UNUSED,
4486 const Elf_Internal_Rela *rela)
4487 {
4488 bfd *abfd = info->output_bfd;
4489 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4490 struct elf_x86_link_hash_table *htab
4491 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4492
4493 if (htab->elf.dynsym != NULL
4494 && htab->elf.dynsym->contents != NULL)
4495 {
4496 /* Check relocation against STT_GNU_IFUNC symbol if there are
4497 dynamic symbols. */
4498 unsigned long r_symndx = htab->r_sym (rela->r_info);
4499 if (r_symndx != STN_UNDEF)
4500 {
4501 Elf_Internal_Sym sym;
4502 if (!bed->s->swap_symbol_in (abfd,
4503 (htab->elf.dynsym->contents
4504 + r_symndx * bed->s->sizeof_sym),
4505 0, &sym))
4506 abort ();
4507
4508 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4509 return reloc_class_ifunc;
4510 }
4511 }
4512
4513 switch ((int) ELF32_R_TYPE (rela->r_info))
4514 {
4515 case R_X86_64_IRELATIVE:
4516 return reloc_class_ifunc;
4517 case R_X86_64_RELATIVE:
4518 case R_X86_64_RELATIVE64:
4519 return reloc_class_relative;
4520 case R_X86_64_JUMP_SLOT:
4521 return reloc_class_plt;
4522 case R_X86_64_COPY:
4523 return reloc_class_copy;
4524 default:
4525 return reloc_class_normal;
4526 }
4527 }
4528
4529 /* Finish up the dynamic sections. */
4530
4531 static bfd_boolean
4532 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4533 struct bfd_link_info *info)
4534 {
4535 struct elf_x86_link_hash_table *htab;
4536
4537 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4538 if (htab == NULL)
4539 return FALSE;
4540
4541 if (! htab->elf.dynamic_sections_created)
4542 return TRUE;
4543
4544 if (htab->elf.splt && htab->elf.splt->size > 0)
4545 {
4546 elf_section_data (htab->elf.splt->output_section)
4547 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4548
4549 if (htab->plt.has_plt0)
4550 {
4551 /* Fill in the special first entry in the procedure linkage
4552 table. */
4553 memcpy (htab->elf.splt->contents,
4554 htab->lazy_plt->plt0_entry,
4555 htab->lazy_plt->plt0_entry_size);
4556 /* Add offset for pushq GOT+8(%rip), since the instruction
4557 uses 6 bytes subtract this value. */
4558 bfd_put_32 (output_bfd,
4559 (htab->elf.sgotplt->output_section->vma
4560 + htab->elf.sgotplt->output_offset
4561 + 8
4562 - htab->elf.splt->output_section->vma
4563 - htab->elf.splt->output_offset
4564 - 6),
4565 (htab->elf.splt->contents
4566 + htab->lazy_plt->plt0_got1_offset));
4567 /* Add offset for the PC-relative instruction accessing
4568 GOT+16, subtracting the offset to the end of that
4569 instruction. */
4570 bfd_put_32 (output_bfd,
4571 (htab->elf.sgotplt->output_section->vma
4572 + htab->elf.sgotplt->output_offset
4573 + 16
4574 - htab->elf.splt->output_section->vma
4575 - htab->elf.splt->output_offset
4576 - htab->lazy_plt->plt0_got2_insn_end),
4577 (htab->elf.splt->contents
4578 + htab->lazy_plt->plt0_got2_offset));
4579 }
4580
4581 if (htab->tlsdesc_plt)
4582 {
4583 bfd_put_64 (output_bfd, (bfd_vma) 0,
4584 htab->elf.sgot->contents + htab->tlsdesc_got);
4585
4586 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4587 htab->lazy_plt->plt_tlsdesc_entry,
4588 htab->lazy_plt->plt_tlsdesc_entry_size);
4589
4590 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4591 bytes and the instruction uses 6 bytes, subtract these
4592 values. */
4593 bfd_put_32 (output_bfd,
4594 (htab->elf.sgotplt->output_section->vma
4595 + htab->elf.sgotplt->output_offset
4596 + 8
4597 - htab->elf.splt->output_section->vma
4598 - htab->elf.splt->output_offset
4599 - htab->tlsdesc_plt
4600 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4601 (htab->elf.splt->contents
4602 + htab->tlsdesc_plt
4603 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4604 /* Add offset for indirect branch via GOT+TDG, where TDG
4605 stands for htab->tlsdesc_got, subtracting the offset
4606 to the end of that instruction. */
4607 bfd_put_32 (output_bfd,
4608 (htab->elf.sgot->output_section->vma
4609 + htab->elf.sgot->output_offset
4610 + htab->tlsdesc_got
4611 - htab->elf.splt->output_section->vma
4612 - htab->elf.splt->output_offset
4613 - htab->tlsdesc_plt
4614 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4615 (htab->elf.splt->contents
4616 + htab->tlsdesc_plt
4617 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4618 }
4619 }
4620
4621 /* Fill PLT entries for undefined weak symbols in PIE. */
4622 if (bfd_link_pie (info))
4623 bfd_hash_traverse (&info->hash->table,
4624 elf_x86_64_pie_finish_undefweak_symbol,
4625 info);
4626
4627 return TRUE;
4628 }
4629
4630 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4631 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4632 It has to be done before elf_link_sort_relocs is called so that
4633 dynamic relocations are properly sorted. */
4634
4635 static bfd_boolean
4636 elf_x86_64_output_arch_local_syms
4637 (bfd *output_bfd ATTRIBUTE_UNUSED,
4638 struct bfd_link_info *info,
4639 void *flaginfo ATTRIBUTE_UNUSED,
4640 int (*func) (void *, const char *,
4641 Elf_Internal_Sym *,
4642 asection *,
4643 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4644 {
4645 struct elf_x86_link_hash_table *htab
4646 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4647 if (htab == NULL)
4648 return FALSE;
4649
4650 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4651 htab_traverse (htab->loc_hash_table,
4652 elf_x86_64_finish_local_dynamic_symbol,
4653 info);
4654
4655 return TRUE;
4656 }
4657
4658 /* Forward declaration. */
4659 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4660
4661 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4662 dynamic relocations. */
4663
4664 static long
4665 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4666 long symcount ATTRIBUTE_UNUSED,
4667 asymbol **syms ATTRIBUTE_UNUSED,
4668 long dynsymcount,
4669 asymbol **dynsyms,
4670 asymbol **ret)
4671 {
4672 long count, i, n;
4673 int j;
4674 bfd_byte *plt_contents;
4675 long relsize;
4676 const struct elf_x86_lazy_plt_layout *lazy_plt;
4677 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4678 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4679 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4680 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4681 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4682 asection *plt;
4683 enum elf_x86_plt_type plt_type;
4684 struct elf_x86_plt plts[] =
4685 {
4686 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4687 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4688 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4689 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4690 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4691 };
4692
4693 *ret = NULL;
4694
4695 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4696 return 0;
4697
4698 if (dynsymcount <= 0)
4699 return 0;
4700
4701 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4702 if (relsize <= 0)
4703 return -1;
4704
4705 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4706 {
4707 lazy_plt = &elf_x86_64_lazy_plt;
4708 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4709 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4710 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4711 if (ABI_64_P (abfd))
4712 {
4713 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4714 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4715 }
4716 else
4717 {
4718 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4719 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4720 }
4721 }
4722 else
4723 {
4724 lazy_plt = &elf_x86_64_nacl_plt;
4725 non_lazy_plt = NULL;
4726 lazy_bnd_plt = NULL;
4727 non_lazy_bnd_plt = NULL;
4728 lazy_ibt_plt = NULL;
4729 non_lazy_ibt_plt = NULL;
4730 }
4731
4732 count = 0;
4733 for (j = 0; plts[j].name != NULL; j++)
4734 {
4735 plt = bfd_get_section_by_name (abfd, plts[j].name);
4736 if (plt == NULL || plt->size == 0)
4737 continue;
4738
4739 /* Get the PLT section contents. */
4740 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4741 if (plt_contents == NULL)
4742 break;
4743 if (!bfd_get_section_contents (abfd, (asection *) plt,
4744 plt_contents, 0, plt->size))
4745 {
4746 free (plt_contents);
4747 break;
4748 }
4749
4750 /* Check what kind of PLT it is. */
4751 plt_type = plt_unknown;
4752 if (plts[j].type == plt_unknown
4753 && (plt->size >= (lazy_plt->plt_entry_size
4754 + lazy_plt->plt_entry_size)))
4755 {
4756 /* Match lazy PLT first. Need to check the first two
4757 instructions. */
4758 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4759 lazy_plt->plt0_got1_offset) == 0)
4760 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4761 2) == 0))
4762 plt_type = plt_lazy;
4763 else if (lazy_bnd_plt != NULL
4764 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4765 lazy_bnd_plt->plt0_got1_offset) == 0)
4766 && (memcmp (plt_contents + 6,
4767 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4768 {
4769 plt_type = plt_lazy | plt_second;
4770 /* The fist entry in the lazy IBT PLT is the same as the
4771 lazy BND PLT. */
4772 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4773 lazy_ibt_plt->plt_entry,
4774 lazy_ibt_plt->plt_got_offset) == 0))
4775 lazy_plt = lazy_ibt_plt;
4776 else
4777 lazy_plt = lazy_bnd_plt;
4778 }
4779 }
4780
4781 if (non_lazy_plt != NULL
4782 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4783 && plt->size >= non_lazy_plt->plt_entry_size)
4784 {
4785 /* Match non-lazy PLT. */
4786 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4787 non_lazy_plt->plt_got_offset) == 0)
4788 plt_type = plt_non_lazy;
4789 }
4790
4791 if (plt_type == plt_unknown || plt_type == plt_second)
4792 {
4793 if (non_lazy_bnd_plt != NULL
4794 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4795 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4796 non_lazy_bnd_plt->plt_got_offset) == 0))
4797 {
4798 /* Match BND PLT. */
4799 plt_type = plt_second;
4800 non_lazy_plt = non_lazy_bnd_plt;
4801 }
4802 else if (non_lazy_ibt_plt != NULL
4803 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4804 && (memcmp (plt_contents,
4805 non_lazy_ibt_plt->plt_entry,
4806 non_lazy_ibt_plt->plt_got_offset) == 0))
4807 {
4808 /* Match IBT PLT. */
4809 plt_type = plt_second;
4810 non_lazy_plt = non_lazy_ibt_plt;
4811 }
4812 }
4813
4814 if (plt_type == plt_unknown)
4815 {
4816 free (plt_contents);
4817 continue;
4818 }
4819
4820 plts[j].sec = plt;
4821 plts[j].type = plt_type;
4822
4823 if ((plt_type & plt_lazy))
4824 {
4825 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4826 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4827 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4828 /* Skip PLT0 in lazy PLT. */
4829 i = 1;
4830 }
4831 else
4832 {
4833 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4834 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4835 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4836 i = 0;
4837 }
4838
4839 /* Skip lazy PLT when the second PLT is used. */
4840 if (plt_type == (plt_lazy | plt_second))
4841 plts[j].count = 0;
4842 else
4843 {
4844 n = plt->size / plts[j].plt_entry_size;
4845 plts[j].count = n;
4846 count += n - i;
4847 }
4848
4849 plts[j].contents = plt_contents;
4850 }
4851
4852 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4853 (bfd_vma) 0, plts, dynsyms,
4854 ret);
4855 }
4856
4857 /* Handle an x86-64 specific section when reading an object file. This
4858 is called when elfcode.h finds a section with an unknown type. */
4859
4860 static bfd_boolean
4861 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4862 const char *name, int shindex)
4863 {
4864 if (hdr->sh_type != SHT_X86_64_UNWIND)
4865 return FALSE;
4866
4867 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4868 return FALSE;
4869
4870 return TRUE;
4871 }
4872
4873 /* Hook called by the linker routine which adds symbols from an object
4874 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4875 of .bss. */
4876
4877 static bfd_boolean
4878 elf_x86_64_add_symbol_hook (bfd *abfd,
4879 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4880 Elf_Internal_Sym *sym,
4881 const char **namep ATTRIBUTE_UNUSED,
4882 flagword *flagsp ATTRIBUTE_UNUSED,
4883 asection **secp,
4884 bfd_vma *valp)
4885 {
4886 asection *lcomm;
4887
4888 switch (sym->st_shndx)
4889 {
4890 case SHN_X86_64_LCOMMON:
4891 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4892 if (lcomm == NULL)
4893 {
4894 lcomm = bfd_make_section_with_flags (abfd,
4895 "LARGE_COMMON",
4896 (SEC_ALLOC
4897 | SEC_IS_COMMON
4898 | SEC_LINKER_CREATED));
4899 if (lcomm == NULL)
4900 return FALSE;
4901 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4902 }
4903 *secp = lcomm;
4904 *valp = sym->st_size;
4905 return TRUE;
4906 }
4907
4908 return TRUE;
4909 }
4910
4911
4912 /* Given a BFD section, try to locate the corresponding ELF section
4913 index. */
4914
4915 static bfd_boolean
4916 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4917 asection *sec, int *index_return)
4918 {
4919 if (sec == &_bfd_elf_large_com_section)
4920 {
4921 *index_return = SHN_X86_64_LCOMMON;
4922 return TRUE;
4923 }
4924 return FALSE;
4925 }
4926
4927 /* Process a symbol. */
4928
4929 static void
4930 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4931 asymbol *asym)
4932 {
4933 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4934
4935 switch (elfsym->internal_elf_sym.st_shndx)
4936 {
4937 case SHN_X86_64_LCOMMON:
4938 asym->section = &_bfd_elf_large_com_section;
4939 asym->value = elfsym->internal_elf_sym.st_size;
4940 /* Common symbol doesn't set BSF_GLOBAL. */
4941 asym->flags &= ~BSF_GLOBAL;
4942 break;
4943 }
4944 }
4945
4946 static bfd_boolean
4947 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4948 {
4949 return (sym->st_shndx == SHN_COMMON
4950 || sym->st_shndx == SHN_X86_64_LCOMMON);
4951 }
4952
4953 static unsigned int
4954 elf_x86_64_common_section_index (asection *sec)
4955 {
4956 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4957 return SHN_COMMON;
4958 else
4959 return SHN_X86_64_LCOMMON;
4960 }
4961
4962 static asection *
4963 elf_x86_64_common_section (asection *sec)
4964 {
4965 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4966 return bfd_com_section_ptr;
4967 else
4968 return &_bfd_elf_large_com_section;
4969 }
4970
4971 static bfd_boolean
4972 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4973 const Elf_Internal_Sym *sym,
4974 asection **psec,
4975 bfd_boolean newdef,
4976 bfd_boolean olddef,
4977 bfd *oldbfd,
4978 const asection *oldsec)
4979 {
4980 /* A normal common symbol and a large common symbol result in a
4981 normal common symbol. We turn the large common symbol into a
4982 normal one. */
4983 if (!olddef
4984 && h->root.type == bfd_link_hash_common
4985 && !newdef
4986 && bfd_is_com_section (*psec)
4987 && oldsec != *psec)
4988 {
4989 if (sym->st_shndx == SHN_COMMON
4990 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4991 {
4992 h->root.u.c.p->section
4993 = bfd_make_section_old_way (oldbfd, "COMMON");
4994 h->root.u.c.p->section->flags = SEC_ALLOC;
4995 }
4996 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4997 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4998 *psec = bfd_com_section_ptr;
4999 }
5000
5001 return TRUE;
5002 }
5003
5004 static int
5005 elf_x86_64_additional_program_headers (bfd *abfd,
5006 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5007 {
5008 asection *s;
5009 int count = 0;
5010
5011 /* Check to see if we need a large readonly segment. */
5012 s = bfd_get_section_by_name (abfd, ".lrodata");
5013 if (s && (s->flags & SEC_LOAD))
5014 count++;
5015
5016 /* Check to see if we need a large data segment. Since .lbss sections
5017 is placed right after the .bss section, there should be no need for
5018 a large data segment just because of .lbss. */
5019 s = bfd_get_section_by_name (abfd, ".ldata");
5020 if (s && (s->flags & SEC_LOAD))
5021 count++;
5022
5023 return count;
5024 }
5025
5026 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5027
5028 static bfd_boolean
5029 elf_x86_64_relocs_compatible (const bfd_target *input,
5030 const bfd_target *output)
5031 {
5032 return ((xvec_get_elf_backend_data (input)->s->elfclass
5033 == xvec_get_elf_backend_data (output)->s->elfclass)
5034 && _bfd_elf_relocs_compatible (input, output));
5035 }
5036
5037 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5038 with GNU properties if found. Otherwise, return NULL. */
5039
5040 static bfd *
5041 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5042 {
5043 struct elf_x86_init_table init_table;
5044
5045 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5046 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5047 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5048 != (int) R_X86_64_GNU_VTINHERIT)
5049 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5050 != (int) R_X86_64_GNU_VTENTRY))
5051 abort ();
5052
5053 /* This is unused for x86-64. */
5054 init_table.plt0_pad_byte = 0x90;
5055
5056 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5057 {
5058 const struct elf_backend_data *bed
5059 = get_elf_backend_data (info->output_bfd);
5060 struct elf_x86_link_hash_table *htab
5061 = elf_x86_hash_table (info, bed->target_id);
5062 if (!htab)
5063 abort ();
5064 if (htab->params->bndplt)
5065 {
5066 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5067 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5068 }
5069 else
5070 {
5071 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5072 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5073 }
5074
5075 if (ABI_64_P (info->output_bfd))
5076 {
5077 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5078 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5079 }
5080 else
5081 {
5082 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5083 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5084 }
5085 }
5086 else
5087 {
5088 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5089 init_table.non_lazy_plt = NULL;
5090 init_table.lazy_ibt_plt = NULL;
5091 init_table.non_lazy_ibt_plt = NULL;
5092 }
5093
5094 if (ABI_64_P (info->output_bfd))
5095 {
5096 init_table.r_info = elf64_r_info;
5097 init_table.r_sym = elf64_r_sym;
5098 }
5099 else
5100 {
5101 init_table.r_info = elf32_r_info;
5102 init_table.r_sym = elf32_r_sym;
5103 }
5104
5105 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5106 }
5107
5108 static const struct bfd_elf_special_section
5109 elf_x86_64_special_sections[]=
5110 {
5111 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5112 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5113 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5114 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5115 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5116 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5117 { NULL, 0, 0, 0, 0 }
5118 };
5119
5120 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5121 #define TARGET_LITTLE_NAME "elf64-x86-64"
5122 #define ELF_ARCH bfd_arch_i386
5123 #define ELF_TARGET_ID X86_64_ELF_DATA
5124 #define ELF_MACHINE_CODE EM_X86_64
5125 #if DEFAULT_LD_Z_SEPARATE_CODE
5126 # define ELF_MAXPAGESIZE 0x1000
5127 #else
5128 # define ELF_MAXPAGESIZE 0x200000
5129 #endif
5130 #define ELF_MINPAGESIZE 0x1000
5131 #define ELF_COMMONPAGESIZE 0x1000
5132
5133 #define elf_backend_can_gc_sections 1
5134 #define elf_backend_can_refcount 1
5135 #define elf_backend_want_got_plt 1
5136 #define elf_backend_plt_readonly 1
5137 #define elf_backend_want_plt_sym 0
5138 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5139 #define elf_backend_rela_normal 1
5140 #define elf_backend_plt_alignment 4
5141 #define elf_backend_extern_protected_data 1
5142 #define elf_backend_caches_rawsize 1
5143 #define elf_backend_dtrel_excludes_plt 1
5144 #define elf_backend_want_dynrelro 1
5145
5146 #define elf_info_to_howto elf_x86_64_info_to_howto
5147
5148 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5149 #define bfd_elf64_bfd_reloc_name_lookup \
5150 elf_x86_64_reloc_name_lookup
5151
5152 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5153 #define elf_backend_check_relocs elf_x86_64_check_relocs
5154 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5155 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5156 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5157 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5158 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5159 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5160 #ifdef CORE_HEADER
5161 #define elf_backend_write_core_note elf_x86_64_write_core_note
5162 #endif
5163 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5164 #define elf_backend_relocate_section elf_x86_64_relocate_section
5165 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5166 #define elf_backend_object_p elf64_x86_64_elf_object_p
5167 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5168
5169 #define elf_backend_section_from_shdr \
5170 elf_x86_64_section_from_shdr
5171
5172 #define elf_backend_section_from_bfd_section \
5173 elf_x86_64_elf_section_from_bfd_section
5174 #define elf_backend_add_symbol_hook \
5175 elf_x86_64_add_symbol_hook
5176 #define elf_backend_symbol_processing \
5177 elf_x86_64_symbol_processing
5178 #define elf_backend_common_section_index \
5179 elf_x86_64_common_section_index
5180 #define elf_backend_common_section \
5181 elf_x86_64_common_section
5182 #define elf_backend_common_definition \
5183 elf_x86_64_common_definition
5184 #define elf_backend_merge_symbol \
5185 elf_x86_64_merge_symbol
5186 #define elf_backend_special_sections \
5187 elf_x86_64_special_sections
5188 #define elf_backend_additional_program_headers \
5189 elf_x86_64_additional_program_headers
5190 #define elf_backend_setup_gnu_properties \
5191 elf_x86_64_link_setup_gnu_properties
5192 #define elf_backend_hide_symbol \
5193 _bfd_x86_elf_hide_symbol
5194
5195 #undef elf64_bed
5196 #define elf64_bed elf64_x86_64_bed
5197
5198 #include "elf64-target.h"
5199
5200 /* CloudABI support. */
5201
5202 #undef TARGET_LITTLE_SYM
5203 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5204 #undef TARGET_LITTLE_NAME
5205 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5206
5207 #undef ELF_OSABI
5208 #define ELF_OSABI ELFOSABI_CLOUDABI
5209
5210 #undef elf64_bed
5211 #define elf64_bed elf64_x86_64_cloudabi_bed
5212
5213 #include "elf64-target.h"
5214
5215 /* FreeBSD support. */
5216
5217 #undef TARGET_LITTLE_SYM
5218 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5219 #undef TARGET_LITTLE_NAME
5220 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5221
5222 #undef ELF_OSABI
5223 #define ELF_OSABI ELFOSABI_FREEBSD
5224
5225 #undef elf64_bed
5226 #define elf64_bed elf64_x86_64_fbsd_bed
5227
5228 #include "elf64-target.h"
5229
5230 /* Solaris 2 support. */
5231
5232 #undef TARGET_LITTLE_SYM
5233 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5234 #undef TARGET_LITTLE_NAME
5235 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5236
5237 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5238 {
5239 is_solaris /* os */
5240 };
5241
5242 #undef elf_backend_arch_data
5243 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5244
5245 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5246 objects won't be recognized. */
5247 #undef ELF_OSABI
5248
5249 #undef elf64_bed
5250 #define elf64_bed elf64_x86_64_sol2_bed
5251
5252 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5253 boundary. */
5254 #undef elf_backend_static_tls_alignment
5255 #define elf_backend_static_tls_alignment 16
5256
5257 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5258
5259 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5260 File, p.63. */
5261 #undef elf_backend_want_plt_sym
5262 #define elf_backend_want_plt_sym 1
5263
5264 #undef elf_backend_strtab_flags
5265 #define elf_backend_strtab_flags SHF_STRINGS
5266
5267 static bfd_boolean
5268 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5269 bfd *obfd ATTRIBUTE_UNUSED,
5270 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5271 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5272 {
5273 /* PR 19938: FIXME: Need to add code for setting the sh_info
5274 and sh_link fields of Solaris specific section types. */
5275 return FALSE;
5276 }
5277
5278 #undef elf_backend_copy_special_section_fields
5279 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5280
5281 #include "elf64-target.h"
5282
5283 /* Native Client support. */
5284
5285 static bfd_boolean
5286 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5287 {
5288 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5289 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5290 return TRUE;
5291 }
5292
5293 #undef TARGET_LITTLE_SYM
5294 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5295 #undef TARGET_LITTLE_NAME
5296 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5297 #undef elf64_bed
5298 #define elf64_bed elf64_x86_64_nacl_bed
5299
5300 #undef ELF_MAXPAGESIZE
5301 #undef ELF_MINPAGESIZE
5302 #undef ELF_COMMONPAGESIZE
5303 #define ELF_MAXPAGESIZE 0x10000
5304 #define ELF_MINPAGESIZE 0x10000
5305 #define ELF_COMMONPAGESIZE 0x10000
5306
5307 /* Restore defaults. */
5308 #undef ELF_OSABI
5309 #undef elf_backend_static_tls_alignment
5310 #undef elf_backend_want_plt_sym
5311 #define elf_backend_want_plt_sym 0
5312 #undef elf_backend_strtab_flags
5313 #undef elf_backend_copy_special_section_fields
5314
5315 /* NaCl uses substantially different PLT entries for the same effects. */
5316
5317 #undef elf_backend_plt_alignment
5318 #define elf_backend_plt_alignment 5
5319 #define NACL_PLT_ENTRY_SIZE 64
5320 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5321
5322 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5323 {
5324 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5325 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5326 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5327 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5328 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5329
5330 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5331 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5332
5333 /* 32 bytes of nop to pad out to the standard size. */
5334 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5335 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5336 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5337 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5338 0x66, /* excess data16 prefix */
5339 0x90 /* nop */
5340 };
5341
5342 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5343 {
5344 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5345 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5346 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5347 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5348
5349 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5350 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5351 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5352
5353 /* Lazy GOT entries point here (32-byte aligned). */
5354 0x68, /* pushq immediate */
5355 0, 0, 0, 0, /* replaced with index into relocation table. */
5356 0xe9, /* jmp relative */
5357 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5358
5359 /* 22 bytes of nop to pad out to the standard size. */
5360 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5361 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5362 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5363 };
5364
5365 /* .eh_frame covering the .plt section. */
5366
5367 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5368 {
5369 #if (PLT_CIE_LENGTH != 20 \
5370 || PLT_FDE_LENGTH != 36 \
5371 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5372 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5373 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5374 #endif
5375 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5376 0, 0, 0, 0, /* CIE ID */
5377 1, /* CIE version */
5378 'z', 'R', 0, /* Augmentation string */
5379 1, /* Code alignment factor */
5380 0x78, /* Data alignment factor */
5381 16, /* Return address column */
5382 1, /* Augmentation size */
5383 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5384 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5385 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5386 DW_CFA_nop, DW_CFA_nop,
5387
5388 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5389 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5390 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5391 0, 0, 0, 0, /* .plt size goes here */
5392 0, /* Augmentation size */
5393 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5394 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5395 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5396 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5397 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5398 13, /* Block length */
5399 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5400 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5401 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5402 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5403 DW_CFA_nop, DW_CFA_nop
5404 };
5405
5406 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5407 {
5408 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5409 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5410 elf_x86_64_nacl_plt_entry, /* plt_entry */
5411 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5412 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5413 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5414 2, /* plt_tlsdesc_got1_offset */
5415 9, /* plt_tlsdesc_got2_offset */
5416 6, /* plt_tlsdesc_got1_insn_end */
5417 13, /* plt_tlsdesc_got2_insn_end */
5418 2, /* plt0_got1_offset */
5419 9, /* plt0_got2_offset */
5420 13, /* plt0_got2_insn_end */
5421 3, /* plt_got_offset */
5422 33, /* plt_reloc_offset */
5423 38, /* plt_plt_offset */
5424 7, /* plt_got_insn_size */
5425 42, /* plt_plt_insn_end */
5426 32, /* plt_lazy_offset */
5427 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5428 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5429 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5430 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5431 };
5432
5433 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5434 {
5435 is_nacl /* os */
5436 };
5437
5438 #undef elf_backend_arch_data
5439 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5440
5441 #undef elf_backend_object_p
5442 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5443 #undef elf_backend_modify_segment_map
5444 #define elf_backend_modify_segment_map nacl_modify_segment_map
5445 #undef elf_backend_modify_program_headers
5446 #define elf_backend_modify_program_headers nacl_modify_program_headers
5447 #undef elf_backend_final_write_processing
5448 #define elf_backend_final_write_processing nacl_final_write_processing
5449
5450 #include "elf64-target.h"
5451
5452 /* Native Client x32 support. */
5453
5454 static bfd_boolean
5455 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5456 {
5457 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5458 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5459 return TRUE;
5460 }
5461
5462 #undef TARGET_LITTLE_SYM
5463 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5464 #undef TARGET_LITTLE_NAME
5465 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5466 #undef elf32_bed
5467 #define elf32_bed elf32_x86_64_nacl_bed
5468
5469 #define bfd_elf32_bfd_reloc_type_lookup \
5470 elf_x86_64_reloc_type_lookup
5471 #define bfd_elf32_bfd_reloc_name_lookup \
5472 elf_x86_64_reloc_name_lookup
5473 #define bfd_elf32_get_synthetic_symtab \
5474 elf_x86_64_get_synthetic_symtab
5475
5476 #undef elf_backend_object_p
5477 #define elf_backend_object_p \
5478 elf32_x86_64_nacl_elf_object_p
5479
5480 #undef elf_backend_bfd_from_remote_memory
5481 #define elf_backend_bfd_from_remote_memory \
5482 _bfd_elf32_bfd_from_remote_memory
5483
5484 #undef elf_backend_size_info
5485 #define elf_backend_size_info \
5486 _bfd_elf32_size_info
5487
5488 #undef elf32_bed
5489 #define elf32_bed elf32_x86_64_bed
5490
5491 #include "elf32-target.h"
5492
5493 /* Restore defaults. */
5494 #undef elf_backend_object_p
5495 #define elf_backend_object_p elf64_x86_64_elf_object_p
5496 #undef elf_backend_bfd_from_remote_memory
5497 #undef elf_backend_size_info
5498 #undef elf_backend_modify_segment_map
5499 #undef elf_backend_modify_program_headers
5500 #undef elf_backend_final_write_processing
5501
5502 /* Intel L1OM support. */
5503
5504 static bfd_boolean
5505 elf64_l1om_elf_object_p (bfd *abfd)
5506 {
5507 /* Set the right machine number for an L1OM elf64 file. */
5508 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5509 return TRUE;
5510 }
5511
5512 #undef TARGET_LITTLE_SYM
5513 #define TARGET_LITTLE_SYM l1om_elf64_vec
5514 #undef TARGET_LITTLE_NAME
5515 #define TARGET_LITTLE_NAME "elf64-l1om"
5516 #undef ELF_ARCH
5517 #define ELF_ARCH bfd_arch_l1om
5518
5519 #undef ELF_MACHINE_CODE
5520 #define ELF_MACHINE_CODE EM_L1OM
5521
5522 #undef ELF_OSABI
5523
5524 #undef elf64_bed
5525 #define elf64_bed elf64_l1om_bed
5526
5527 #undef elf_backend_object_p
5528 #define elf_backend_object_p elf64_l1om_elf_object_p
5529
5530 /* Restore defaults. */
5531 #undef ELF_MAXPAGESIZE
5532 #undef ELF_MINPAGESIZE
5533 #undef ELF_COMMONPAGESIZE
5534 #if DEFAULT_LD_Z_SEPARATE_CODE
5535 # define ELF_MAXPAGESIZE 0x1000
5536 #else
5537 # define ELF_MAXPAGESIZE 0x200000
5538 #endif
5539 #define ELF_MINPAGESIZE 0x1000
5540 #define ELF_COMMONPAGESIZE 0x1000
5541 #undef elf_backend_plt_alignment
5542 #define elf_backend_plt_alignment 4
5543 #undef elf_backend_arch_data
5544 #define elf_backend_arch_data &elf_x86_64_arch_bed
5545
5546 #include "elf64-target.h"
5547
5548 /* FreeBSD L1OM support. */
5549
5550 #undef TARGET_LITTLE_SYM
5551 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5552 #undef TARGET_LITTLE_NAME
5553 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5554
5555 #undef ELF_OSABI
5556 #define ELF_OSABI ELFOSABI_FREEBSD
5557
5558 #undef elf64_bed
5559 #define elf64_bed elf64_l1om_fbsd_bed
5560
5561 #include "elf64-target.h"
5562
5563 /* Intel K1OM support. */
5564
5565 static bfd_boolean
5566 elf64_k1om_elf_object_p (bfd *abfd)
5567 {
5568 /* Set the right machine number for an K1OM elf64 file. */
5569 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5570 return TRUE;
5571 }
5572
5573 #undef TARGET_LITTLE_SYM
5574 #define TARGET_LITTLE_SYM k1om_elf64_vec
5575 #undef TARGET_LITTLE_NAME
5576 #define TARGET_LITTLE_NAME "elf64-k1om"
5577 #undef ELF_ARCH
5578 #define ELF_ARCH bfd_arch_k1om
5579
5580 #undef ELF_MACHINE_CODE
5581 #define ELF_MACHINE_CODE EM_K1OM
5582
5583 #undef ELF_OSABI
5584
5585 #undef elf64_bed
5586 #define elf64_bed elf64_k1om_bed
5587
5588 #undef elf_backend_object_p
5589 #define elf_backend_object_p elf64_k1om_elf_object_p
5590
5591 #undef elf_backend_static_tls_alignment
5592
5593 #undef elf_backend_want_plt_sym
5594 #define elf_backend_want_plt_sym 0
5595
5596 #include "elf64-target.h"
5597
5598 /* FreeBSD K1OM support. */
5599
5600 #undef TARGET_LITTLE_SYM
5601 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5602 #undef TARGET_LITTLE_NAME
5603 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5604
5605 #undef ELF_OSABI
5606 #define ELF_OSABI ELFOSABI_FREEBSD
5607
5608 #undef elf64_bed
5609 #define elf64_bed elf64_k1om_fbsd_bed
5610
5611 #include "elf64-target.h"
5612
5613 /* 32bit x86-64 support. */
5614
5615 #undef TARGET_LITTLE_SYM
5616 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5617 #undef TARGET_LITTLE_NAME
5618 #define TARGET_LITTLE_NAME "elf32-x86-64"
5619 #undef elf32_bed
5620
5621 #undef ELF_ARCH
5622 #define ELF_ARCH bfd_arch_i386
5623
5624 #undef ELF_MACHINE_CODE
5625 #define ELF_MACHINE_CODE EM_X86_64
5626
5627 #undef ELF_OSABI
5628
5629 #undef elf_backend_object_p
5630 #define elf_backend_object_p \
5631 elf32_x86_64_elf_object_p
5632
5633 #undef elf_backend_bfd_from_remote_memory
5634 #define elf_backend_bfd_from_remote_memory \
5635 _bfd_elf32_bfd_from_remote_memory
5636
5637 #undef elf_backend_size_info
5638 #define elf_backend_size_info \
5639 _bfd_elf32_size_info
5640
5641 #include "elf32-target.h"
This page took 0.15817 seconds and 4 git commands to generate.