x86-64: Add TLSDESC fields to elf_x86_lazy_plt_layout
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = _("; recompile with -fPIC");
1426 break;
1427 }
1428
1429 if (!h->def_regular && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = _("; recompile with -fPIC");
1436 }
1437
1438 if (bfd_link_dll (info))
1439 object = _("a shared object");
1440 else if (bfd_link_pie (info))
1441 object = _("a PIE object");
1442 else
1443 object = _("a PDE object");
1444
1445 /* xgettext:c-format */
1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1447 "not be used when making %s%s"),
1448 input_bfd, howto->name, und, v, name,
1449 object, pic);
1450 bfd_set_error (bfd_error_bad_value);
1451 sec->check_relocs_failed = 1;
1452 return FALSE;
1453 }
1454
1455 /* With the local symbol, foo, we convert
1456 mov foo@GOTPCREL(%rip), %reg
1457 to
1458 lea foo(%rip), %reg
1459 and convert
1460 call/jmp *foo@GOTPCREL(%rip)
1461 to
1462 nop call foo/jmp foo nop
1463 When PIC is false, convert
1464 test %reg, foo@GOTPCREL(%rip)
1465 to
1466 test $foo, %reg
1467 and convert
1468 binop foo@GOTPCREL(%rip), %reg
1469 to
1470 binop $foo, %reg
1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1472 instructions. */
1473
1474 static bfd_boolean
1475 elf_x86_64_convert_load_reloc (bfd *abfd,
1476 bfd_byte *contents,
1477 unsigned int *r_type_p,
1478 Elf_Internal_Rela *irel,
1479 struct elf_link_hash_entry *h,
1480 bfd_boolean *converted,
1481 struct bfd_link_info *link_info)
1482 {
1483 struct elf_x86_link_hash_table *htab;
1484 bfd_boolean is_pic;
1485 bfd_boolean no_overflow;
1486 bfd_boolean relocx;
1487 bfd_boolean to_reloc_pc32;
1488 asection *tsec;
1489 bfd_signed_vma raddend;
1490 unsigned int opcode;
1491 unsigned int modrm;
1492 unsigned int r_type = *r_type_p;
1493 unsigned int r_symndx;
1494 bfd_vma roff = irel->r_offset;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return TRUE;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return TRUE;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return TRUE;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 /* Get the symbol referred to by the reloc. */
1539 if (h == NULL)
1540 {
1541 Elf_Internal_Sym *isym
1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1543
1544 /* Skip relocation against undefined symbols. */
1545 if (isym->st_shndx == SHN_UNDEF)
1546 return TRUE;
1547
1548 if (isym->st_shndx == SHN_ABS)
1549 tsec = bfd_abs_section_ptr;
1550 else if (isym->st_shndx == SHN_COMMON)
1551 tsec = bfd_com_section_ptr;
1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1553 tsec = &_bfd_elf_large_com_section;
1554 else
1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1556 }
1557 else
1558 {
1559 /* Undefined weak symbol is only bound locally in executable
1560 and its reference is resolved as 0 without relocation
1561 overflow. We can only perform this optimization for
1562 GOTPCRELX relocations since we need to modify REX byte.
1563 It is OK convert mov with R_X86_64_GOTPCREL to
1564 R_X86_64_PC32. */
1565 bfd_boolean local_ref;
1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1567
1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1570 if ((relocx || opcode == 0x8b)
1571 && (h->root.type == bfd_link_hash_undefweak
1572 && !eh->linker_def
1573 && local_ref))
1574 {
1575 if (opcode == 0xff)
1576 {
1577 /* Skip for branch instructions since R_X86_64_PC32
1578 may overflow. */
1579 if (no_overflow)
1580 return TRUE;
1581 }
1582 else if (relocx)
1583 {
1584 /* For non-branch instructions, we can convert to
1585 R_X86_64_32/R_X86_64_32S since we know if there
1586 is a REX byte. */
1587 to_reloc_pc32 = FALSE;
1588 }
1589
1590 /* Since we don't know the current PC when PIC is true,
1591 we can't convert to R_X86_64_PC32. */
1592 if (to_reloc_pc32 && is_pic)
1593 return TRUE;
1594
1595 goto convert;
1596 }
1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1598 ld.so may use its link-time address. */
1599 else if (h->start_stop
1600 || eh->linker_def
1601 || ((h->def_regular
1602 || h->root.type == bfd_link_hash_defined
1603 || h->root.type == bfd_link_hash_defweak)
1604 && h != htab->elf.hdynamic
1605 && local_ref))
1606 {
1607 /* bfd_link_hash_new or bfd_link_hash_undefined is
1608 set by an assignment in a linker script in
1609 bfd_elf_record_link_assignment. start_stop is set
1610 on __start_SECNAME/__stop_SECNAME which mark section
1611 SECNAME. */
1612 if (h->start_stop
1613 || eh->linker_def
1614 || (h->def_regular
1615 && (h->root.type == bfd_link_hash_new
1616 || h->root.type == bfd_link_hash_undefined
1617 || ((h->root.type == bfd_link_hash_defined
1618 || h->root.type == bfd_link_hash_defweak)
1619 && h->root.u.def.section == bfd_und_section_ptr))))
1620 {
1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1622 if (no_overflow)
1623 return TRUE;
1624 goto convert;
1625 }
1626 tsec = h->root.u.def.section;
1627 }
1628 else
1629 return TRUE;
1630 }
1631
1632 /* Don't convert GOTPCREL relocation against large section. */
1633 if (elf_section_data (tsec) != NULL
1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1635 return TRUE;
1636
1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1638 if (no_overflow)
1639 return TRUE;
1640
1641 convert:
1642 if (opcode == 0xff)
1643 {
1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1645 unsigned int nop;
1646 unsigned int disp;
1647 bfd_vma nop_offset;
1648
1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1650 R_X86_64_PC32. */
1651 modrm = bfd_get_8 (abfd, contents + roff - 1);
1652 if (modrm == 0x25)
1653 {
1654 /* Convert to "jmp foo nop". */
1655 modrm = 0xe9;
1656 nop = NOP_OPCODE;
1657 nop_offset = irel->r_offset + 3;
1658 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1659 irel->r_offset -= 1;
1660 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1661 }
1662 else
1663 {
1664 struct elf_x86_link_hash_entry *eh
1665 = (struct elf_x86_link_hash_entry *) h;
1666
1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1668 is a nop prefix. */
1669 modrm = 0xe8;
1670 /* To support TLS optimization, always use addr32 prefix for
1671 "call *__tls_get_addr@GOTPCREL(%rip)". */
1672 if (eh && eh->tls_get_addr)
1673 {
1674 nop = 0x67;
1675 nop_offset = irel->r_offset - 2;
1676 }
1677 else
1678 {
1679 nop = link_info->call_nop_byte;
1680 if (link_info->call_nop_as_suffix)
1681 {
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 nop_offset = irel->r_offset - 2;
1689 }
1690 }
1691 bfd_put_8 (abfd, nop, contents + nop_offset);
1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1693 r_type = R_X86_64_PC32;
1694 }
1695 else
1696 {
1697 unsigned int rex;
1698 unsigned int rex_mask = REX_R;
1699
1700 if (r_type == R_X86_64_REX_GOTPCRELX)
1701 rex = bfd_get_8 (abfd, contents + roff - 3);
1702 else
1703 rex = 0;
1704
1705 if (opcode == 0x8b)
1706 {
1707 if (to_reloc_pc32)
1708 {
1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1710 "lea foo(%rip), %reg". */
1711 opcode = 0x8d;
1712 r_type = R_X86_64_PC32;
1713 }
1714 else
1715 {
1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1717 "mov $foo, %reg". */
1718 opcode = 0xc7;
1719 modrm = bfd_get_8 (abfd, contents + roff - 1);
1720 modrm = 0xc0 | (modrm & 0x38) >> 3;
1721 if ((rex & REX_W) != 0
1722 && ABI_64_P (link_info->output_bfd))
1723 {
1724 /* Keep the REX_W bit in REX byte for LP64. */
1725 r_type = R_X86_64_32S;
1726 goto rewrite_modrm_rex;
1727 }
1728 else
1729 {
1730 /* If the REX_W bit in REX byte isn't needed,
1731 use R_X86_64_32 and clear the W bit to avoid
1732 sign-extend imm32 to imm64. */
1733 r_type = R_X86_64_32;
1734 /* Clear the W bit in REX byte. */
1735 rex_mask |= REX_W;
1736 goto rewrite_modrm_rex;
1737 }
1738 }
1739 }
1740 else
1741 {
1742 /* R_X86_64_PC32 isn't supported. */
1743 if (to_reloc_pc32)
1744 return TRUE;
1745
1746 modrm = bfd_get_8 (abfd, contents + roff - 1);
1747 if (opcode == 0x85)
1748 {
1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1750 "test $foo, %reg". */
1751 modrm = 0xc0 | (modrm & 0x38) >> 3;
1752 opcode = 0xf7;
1753 }
1754 else
1755 {
1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1757 "binop $foo, %reg". */
1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1759 opcode = 0x81;
1760 }
1761
1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1763 overflow when sign-extending imm32 to imm64. */
1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1765
1766 rewrite_modrm_rex:
1767 bfd_put_8 (abfd, modrm, contents + roff - 1);
1768
1769 if (rex)
1770 {
1771 /* Move the R bit to the B bit in REX byte. */
1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1773 bfd_put_8 (abfd, rex, contents + roff - 3);
1774 }
1775
1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1777 irel->r_addend = 0;
1778 }
1779
1780 bfd_put_8 (abfd, opcode, contents + roff - 2);
1781 }
1782
1783 *r_type_p = r_type;
1784 irel->r_info = htab->r_info (r_symndx,
1785 r_type | R_X86_64_converted_reloc_bit);
1786
1787 *converted = TRUE;
1788
1789 return TRUE;
1790 }
1791
1792 /* Look through the relocs for a section during the first phase, and
1793 calculate needed space in the global offset table, procedure
1794 linkage table, and dynamic reloc sections. */
1795
1796 static bfd_boolean
1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1798 asection *sec,
1799 const Elf_Internal_Rela *relocs)
1800 {
1801 struct elf_x86_link_hash_table *htab;
1802 Elf_Internal_Shdr *symtab_hdr;
1803 struct elf_link_hash_entry **sym_hashes;
1804 const Elf_Internal_Rela *rel;
1805 const Elf_Internal_Rela *rel_end;
1806 asection *sreloc;
1807 bfd_byte *contents;
1808 bfd_boolean converted;
1809
1810 if (bfd_link_relocatable (info))
1811 return TRUE;
1812
1813 /* Don't do anything special with non-loaded, non-alloced sections.
1814 In particular, any relocs in such sections should not affect GOT
1815 and PLT reference counting (ie. we don't allow them to create GOT
1816 or PLT entries), there's no possibility or desire to optimize TLS
1817 relocs, and there's not much point in propagating relocs to shared
1818 libs that the dynamic linker won't relocate. */
1819 if ((sec->flags & SEC_ALLOC) == 0)
1820 return TRUE;
1821
1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1823 if (htab == NULL)
1824 {
1825 sec->check_relocs_failed = 1;
1826 return FALSE;
1827 }
1828
1829 BFD_ASSERT (is_x86_elf (abfd, htab));
1830
1831 /* Get the section contents. */
1832 if (elf_section_data (sec)->this_hdr.contents != NULL)
1833 contents = elf_section_data (sec)->this_hdr.contents;
1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1835 {
1836 sec->check_relocs_failed = 1;
1837 return FALSE;
1838 }
1839
1840 symtab_hdr = &elf_symtab_hdr (abfd);
1841 sym_hashes = elf_sym_hashes (abfd);
1842
1843 converted = FALSE;
1844
1845 sreloc = NULL;
1846
1847 rel_end = relocs + sec->reloc_count;
1848 for (rel = relocs; rel < rel_end; rel++)
1849 {
1850 unsigned int r_type;
1851 unsigned int r_symndx;
1852 struct elf_link_hash_entry *h;
1853 struct elf_x86_link_hash_entry *eh;
1854 Elf_Internal_Sym *isym;
1855 const char *name;
1856 bfd_boolean size_reloc;
1857 bfd_boolean converted_reloc;
1858
1859 r_symndx = htab->r_sym (rel->r_info);
1860 r_type = ELF32_R_TYPE (rel->r_info);
1861
1862 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1863 {
1864 /* xgettext:c-format */
1865 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1866 abfd, r_symndx);
1867 goto error_return;
1868 }
1869
1870 if (r_symndx < symtab_hdr->sh_info)
1871 {
1872 /* A local symbol. */
1873 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1874 abfd, r_symndx);
1875 if (isym == NULL)
1876 goto error_return;
1877
1878 /* Check relocation against local STT_GNU_IFUNC symbol. */
1879 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1880 {
1881 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1882 TRUE);
1883 if (h == NULL)
1884 goto error_return;
1885
1886 /* Fake a STT_GNU_IFUNC symbol. */
1887 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1888 isym, NULL);
1889 h->type = STT_GNU_IFUNC;
1890 h->def_regular = 1;
1891 h->ref_regular = 1;
1892 h->forced_local = 1;
1893 h->root.type = bfd_link_hash_defined;
1894 }
1895 else
1896 h = NULL;
1897 }
1898 else
1899 {
1900 isym = NULL;
1901 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1902 while (h->root.type == bfd_link_hash_indirect
1903 || h->root.type == bfd_link_hash_warning)
1904 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1905 }
1906
1907 /* Check invalid x32 relocations. */
1908 if (!ABI_64_P (abfd))
1909 switch (r_type)
1910 {
1911 default:
1912 break;
1913
1914 case R_X86_64_DTPOFF64:
1915 case R_X86_64_TPOFF64:
1916 case R_X86_64_PC64:
1917 case R_X86_64_GOTOFF64:
1918 case R_X86_64_GOT64:
1919 case R_X86_64_GOTPCREL64:
1920 case R_X86_64_GOTPC64:
1921 case R_X86_64_GOTPLT64:
1922 case R_X86_64_PLTOFF64:
1923 {
1924 if (h)
1925 name = h->root.root.string;
1926 else
1927 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1928 NULL);
1929 _bfd_error_handler
1930 /* xgettext:c-format */
1931 (_("%pB: relocation %s against symbol `%s' isn't "
1932 "supported in x32 mode"), abfd,
1933 x86_64_elf_howto_table[r_type].name, name);
1934 bfd_set_error (bfd_error_bad_value);
1935 goto error_return;
1936 }
1937 break;
1938 }
1939
1940 if (h != NULL)
1941 {
1942 /* It is referenced by a non-shared object. */
1943 h->ref_regular = 1;
1944
1945 if (h->type == STT_GNU_IFUNC)
1946 elf_tdata (info->output_bfd)->has_gnu_symbols
1947 |= elf_gnu_symbol_ifunc;
1948 }
1949
1950 converted_reloc = FALSE;
1951 if ((r_type == R_X86_64_GOTPCREL
1952 || r_type == R_X86_64_GOTPCRELX
1953 || r_type == R_X86_64_REX_GOTPCRELX)
1954 && (h == NULL || h->type != STT_GNU_IFUNC))
1955 {
1956 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1957 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1958 irel, h, &converted_reloc,
1959 info))
1960 goto error_return;
1961
1962 if (converted_reloc)
1963 converted = TRUE;
1964 }
1965
1966 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1967 symtab_hdr, sym_hashes,
1968 &r_type, GOT_UNKNOWN,
1969 rel, rel_end, h, r_symndx, FALSE))
1970 goto error_return;
1971
1972 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1973 if (h == htab->elf.hgot)
1974 htab->got_referenced = TRUE;
1975
1976 eh = (struct elf_x86_link_hash_entry *) h;
1977 switch (r_type)
1978 {
1979 case R_X86_64_TLSLD:
1980 htab->tls_ld_or_ldm_got.refcount = 1;
1981 goto create_got;
1982
1983 case R_X86_64_TPOFF32:
1984 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1985 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1986 &x86_64_elf_howto_table[r_type]);
1987 if (eh != NULL)
1988 eh->zero_undefweak &= 0x2;
1989 break;
1990
1991 case R_X86_64_GOTTPOFF:
1992 if (!bfd_link_executable (info))
1993 info->flags |= DF_STATIC_TLS;
1994 /* Fall through */
1995
1996 case R_X86_64_GOT32:
1997 case R_X86_64_GOTPCREL:
1998 case R_X86_64_GOTPCRELX:
1999 case R_X86_64_REX_GOTPCRELX:
2000 case R_X86_64_TLSGD:
2001 case R_X86_64_GOT64:
2002 case R_X86_64_GOTPCREL64:
2003 case R_X86_64_GOTPLT64:
2004 case R_X86_64_GOTPC32_TLSDESC:
2005 case R_X86_64_TLSDESC_CALL:
2006 /* This symbol requires a global offset table entry. */
2007 {
2008 int tls_type, old_tls_type;
2009
2010 switch (r_type)
2011 {
2012 default: tls_type = GOT_NORMAL; break;
2013 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2014 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2015 case R_X86_64_GOTPC32_TLSDESC:
2016 case R_X86_64_TLSDESC_CALL:
2017 tls_type = GOT_TLS_GDESC; break;
2018 }
2019
2020 if (h != NULL)
2021 {
2022 h->got.refcount = 1;
2023 old_tls_type = eh->tls_type;
2024 }
2025 else
2026 {
2027 bfd_signed_vma *local_got_refcounts;
2028
2029 /* This is a global offset table entry for a local symbol. */
2030 local_got_refcounts = elf_local_got_refcounts (abfd);
2031 if (local_got_refcounts == NULL)
2032 {
2033 bfd_size_type size;
2034
2035 size = symtab_hdr->sh_info;
2036 size *= sizeof (bfd_signed_vma)
2037 + sizeof (bfd_vma) + sizeof (char);
2038 local_got_refcounts = ((bfd_signed_vma *)
2039 bfd_zalloc (abfd, size));
2040 if (local_got_refcounts == NULL)
2041 goto error_return;
2042 elf_local_got_refcounts (abfd) = local_got_refcounts;
2043 elf_x86_local_tlsdesc_gotent (abfd)
2044 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2045 elf_x86_local_got_tls_type (abfd)
2046 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2047 }
2048 local_got_refcounts[r_symndx] = 1;
2049 old_tls_type
2050 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2051 }
2052
2053 /* If a TLS symbol is accessed using IE at least once,
2054 there is no point to use dynamic model for it. */
2055 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2056 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2057 || tls_type != GOT_TLS_IE))
2058 {
2059 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2060 tls_type = old_tls_type;
2061 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2062 && GOT_TLS_GD_ANY_P (tls_type))
2063 tls_type |= old_tls_type;
2064 else
2065 {
2066 if (h)
2067 name = h->root.root.string;
2068 else
2069 name = bfd_elf_sym_name (abfd, symtab_hdr,
2070 isym, NULL);
2071 _bfd_error_handler
2072 /* xgettext:c-format */
2073 (_("%pB: '%s' accessed both as normal and"
2074 " thread local symbol"),
2075 abfd, name);
2076 bfd_set_error (bfd_error_bad_value);
2077 goto error_return;
2078 }
2079 }
2080
2081 if (old_tls_type != tls_type)
2082 {
2083 if (eh != NULL)
2084 eh->tls_type = tls_type;
2085 else
2086 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2087 }
2088 }
2089 /* Fall through */
2090
2091 case R_X86_64_GOTOFF64:
2092 case R_X86_64_GOTPC32:
2093 case R_X86_64_GOTPC64:
2094 create_got:
2095 if (eh != NULL)
2096 eh->zero_undefweak &= 0x2;
2097 break;
2098
2099 case R_X86_64_PLT32:
2100 case R_X86_64_PLT32_BND:
2101 /* This symbol requires a procedure linkage table entry. We
2102 actually build the entry in adjust_dynamic_symbol,
2103 because this might be a case of linking PIC code which is
2104 never referenced by a dynamic object, in which case we
2105 don't need to generate a procedure linkage table entry
2106 after all. */
2107
2108 /* If this is a local symbol, we resolve it directly without
2109 creating a procedure linkage table entry. */
2110 if (h == NULL)
2111 continue;
2112
2113 eh->zero_undefweak &= 0x2;
2114 h->needs_plt = 1;
2115 h->plt.refcount = 1;
2116 break;
2117
2118 case R_X86_64_PLTOFF64:
2119 /* This tries to form the 'address' of a function relative
2120 to GOT. For global symbols we need a PLT entry. */
2121 if (h != NULL)
2122 {
2123 h->needs_plt = 1;
2124 h->plt.refcount = 1;
2125 }
2126 goto create_got;
2127
2128 case R_X86_64_SIZE32:
2129 case R_X86_64_SIZE64:
2130 size_reloc = TRUE;
2131 goto do_size;
2132
2133 case R_X86_64_32:
2134 if (!ABI_64_P (abfd))
2135 goto pointer;
2136 /* Fall through. */
2137 case R_X86_64_8:
2138 case R_X86_64_16:
2139 case R_X86_64_32S:
2140 /* Check relocation overflow as these relocs may lead to
2141 run-time relocation overflow. Don't error out for
2142 sections we don't care about, such as debug sections or
2143 when relocation overflow check is disabled. */
2144 if (!info->no_reloc_overflow_check
2145 && !converted_reloc
2146 && (bfd_link_pic (info)
2147 || (bfd_link_executable (info)
2148 && h != NULL
2149 && !h->def_regular
2150 && h->def_dynamic
2151 && (sec->flags & SEC_READONLY) == 0)))
2152 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2153 &x86_64_elf_howto_table[r_type]);
2154 /* Fall through. */
2155
2156 case R_X86_64_PC8:
2157 case R_X86_64_PC16:
2158 case R_X86_64_PC32:
2159 case R_X86_64_PC32_BND:
2160 case R_X86_64_PC64:
2161 case R_X86_64_64:
2162 pointer:
2163 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2164 eh->zero_undefweak |= 0x2;
2165 /* We are called after all symbols have been resolved. Only
2166 relocation against STT_GNU_IFUNC symbol must go through
2167 PLT. */
2168 if (h != NULL
2169 && (bfd_link_executable (info)
2170 || h->type == STT_GNU_IFUNC))
2171 {
2172 bfd_boolean func_pointer_ref = FALSE;
2173
2174 if (r_type == R_X86_64_PC32)
2175 {
2176 /* Since something like ".long foo - ." may be used
2177 as pointer, make sure that PLT is used if foo is
2178 a function defined in a shared library. */
2179 if ((sec->flags & SEC_CODE) == 0)
2180 {
2181 h->pointer_equality_needed = 1;
2182 if (bfd_link_pie (info)
2183 && h->type == STT_FUNC
2184 && !h->def_regular
2185 && h->def_dynamic)
2186 {
2187 h->needs_plt = 1;
2188 h->plt.refcount = 1;
2189 }
2190 }
2191 }
2192 else if (r_type != R_X86_64_PC32_BND
2193 && r_type != R_X86_64_PC64)
2194 {
2195 h->pointer_equality_needed = 1;
2196 /* At run-time, R_X86_64_64 can be resolved for both
2197 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2198 can only be resolved for x32. */
2199 if ((sec->flags & SEC_READONLY) == 0
2200 && (r_type == R_X86_64_64
2201 || (!ABI_64_P (abfd)
2202 && (r_type == R_X86_64_32
2203 || r_type == R_X86_64_32S))))
2204 func_pointer_ref = TRUE;
2205 }
2206
2207 if (!func_pointer_ref)
2208 {
2209 /* If this reloc is in a read-only section, we might
2210 need a copy reloc. We can't check reliably at this
2211 stage whether the section is read-only, as input
2212 sections have not yet been mapped to output sections.
2213 Tentatively set the flag for now, and correct in
2214 adjust_dynamic_symbol. */
2215 h->non_got_ref = 1;
2216
2217 /* We may need a .plt entry if the symbol is a function
2218 defined in a shared lib or is a function referenced
2219 from the code or read-only section. */
2220 if (!h->def_regular
2221 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2222 h->plt.refcount = 1;
2223 }
2224 }
2225
2226 size_reloc = FALSE;
2227 do_size:
2228 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2229 htab->pointer_r_type))
2230 {
2231 struct elf_dyn_relocs *p;
2232 struct elf_dyn_relocs **head;
2233
2234 /* We must copy these reloc types into the output file.
2235 Create a reloc section in dynobj and make room for
2236 this reloc. */
2237 if (sreloc == NULL)
2238 {
2239 sreloc = _bfd_elf_make_dynamic_reloc_section
2240 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2241 abfd, /*rela?*/ TRUE);
2242
2243 if (sreloc == NULL)
2244 goto error_return;
2245 }
2246
2247 /* If this is a global symbol, we count the number of
2248 relocations we need for this symbol. */
2249 if (h != NULL)
2250 head = &eh->dyn_relocs;
2251 else
2252 {
2253 /* Track dynamic relocs needed for local syms too.
2254 We really need local syms available to do this
2255 easily. Oh well. */
2256 asection *s;
2257 void **vpp;
2258
2259 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2260 abfd, r_symndx);
2261 if (isym == NULL)
2262 goto error_return;
2263
2264 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2265 if (s == NULL)
2266 s = sec;
2267
2268 /* Beware of type punned pointers vs strict aliasing
2269 rules. */
2270 vpp = &(elf_section_data (s)->local_dynrel);
2271 head = (struct elf_dyn_relocs **)vpp;
2272 }
2273
2274 p = *head;
2275 if (p == NULL || p->sec != sec)
2276 {
2277 bfd_size_type amt = sizeof *p;
2278
2279 p = ((struct elf_dyn_relocs *)
2280 bfd_alloc (htab->elf.dynobj, amt));
2281 if (p == NULL)
2282 goto error_return;
2283 p->next = *head;
2284 *head = p;
2285 p->sec = sec;
2286 p->count = 0;
2287 p->pc_count = 0;
2288 }
2289
2290 p->count += 1;
2291 /* Count size relocation as PC-relative relocation. */
2292 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2293 p->pc_count += 1;
2294 }
2295 break;
2296
2297 /* This relocation describes the C++ object vtable hierarchy.
2298 Reconstruct it for later use during GC. */
2299 case R_X86_64_GNU_VTINHERIT:
2300 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2301 goto error_return;
2302 break;
2303
2304 /* This relocation describes which C++ vtable entries are actually
2305 used. Record for later use during GC. */
2306 case R_X86_64_GNU_VTENTRY:
2307 BFD_ASSERT (h != NULL);
2308 if (h != NULL
2309 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2310 goto error_return;
2311 break;
2312
2313 default:
2314 break;
2315 }
2316 }
2317
2318 if (elf_section_data (sec)->this_hdr.contents != contents)
2319 {
2320 if (!converted && !info->keep_memory)
2321 free (contents);
2322 else
2323 {
2324 /* Cache the section contents for elf_link_input_bfd if any
2325 load is converted or --no-keep-memory isn't used. */
2326 elf_section_data (sec)->this_hdr.contents = contents;
2327 }
2328 }
2329
2330 /* Cache relocations if any load is converted. */
2331 if (elf_section_data (sec)->relocs != relocs && converted)
2332 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2333
2334 return TRUE;
2335
2336 error_return:
2337 if (elf_section_data (sec)->this_hdr.contents != contents)
2338 free (contents);
2339 sec->check_relocs_failed = 1;
2340 return FALSE;
2341 }
2342
2343 /* Return the relocation value for @tpoff relocation
2344 if STT_TLS virtual address is ADDRESS. */
2345
2346 static bfd_vma
2347 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2348 {
2349 struct elf_link_hash_table *htab = elf_hash_table (info);
2350 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2351 bfd_vma static_tls_size;
2352
2353 /* If tls_segment is NULL, we should have signalled an error already. */
2354 if (htab->tls_sec == NULL)
2355 return 0;
2356
2357 /* Consider special static TLS alignment requirements. */
2358 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2359 return address - static_tls_size - htab->tls_sec->vma;
2360 }
2361
2362 /* Relocate an x86_64 ELF section. */
2363
2364 static bfd_boolean
2365 elf_x86_64_relocate_section (bfd *output_bfd,
2366 struct bfd_link_info *info,
2367 bfd *input_bfd,
2368 asection *input_section,
2369 bfd_byte *contents,
2370 Elf_Internal_Rela *relocs,
2371 Elf_Internal_Sym *local_syms,
2372 asection **local_sections)
2373 {
2374 struct elf_x86_link_hash_table *htab;
2375 Elf_Internal_Shdr *symtab_hdr;
2376 struct elf_link_hash_entry **sym_hashes;
2377 bfd_vma *local_got_offsets;
2378 bfd_vma *local_tlsdesc_gotents;
2379 Elf_Internal_Rela *rel;
2380 Elf_Internal_Rela *wrel;
2381 Elf_Internal_Rela *relend;
2382 unsigned int plt_entry_size;
2383
2384 /* Skip if check_relocs failed. */
2385 if (input_section->check_relocs_failed)
2386 return FALSE;
2387
2388 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2389 if (htab == NULL)
2390 return FALSE;
2391
2392 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2393
2394 plt_entry_size = htab->plt.plt_entry_size;
2395 symtab_hdr = &elf_symtab_hdr (input_bfd);
2396 sym_hashes = elf_sym_hashes (input_bfd);
2397 local_got_offsets = elf_local_got_offsets (input_bfd);
2398 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2399
2400 _bfd_x86_elf_set_tls_module_base (info);
2401
2402 rel = wrel = relocs;
2403 relend = relocs + input_section->reloc_count;
2404 for (; rel < relend; wrel++, rel++)
2405 {
2406 unsigned int r_type, r_type_tls;
2407 reloc_howto_type *howto;
2408 unsigned long r_symndx;
2409 struct elf_link_hash_entry *h;
2410 struct elf_x86_link_hash_entry *eh;
2411 Elf_Internal_Sym *sym;
2412 asection *sec;
2413 bfd_vma off, offplt, plt_offset;
2414 bfd_vma relocation;
2415 bfd_boolean unresolved_reloc;
2416 bfd_reloc_status_type r;
2417 int tls_type;
2418 asection *base_got, *resolved_plt;
2419 bfd_vma st_size;
2420 bfd_boolean resolved_to_zero;
2421 bfd_boolean relative_reloc;
2422 bfd_boolean converted_reloc;
2423 bfd_boolean need_copy_reloc_in_pie;
2424
2425 r_type = ELF32_R_TYPE (rel->r_info);
2426 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2427 || r_type == (int) R_X86_64_GNU_VTENTRY)
2428 {
2429 if (wrel != rel)
2430 *wrel = *rel;
2431 continue;
2432 }
2433
2434 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2435 r_type &= ~R_X86_64_converted_reloc_bit;
2436
2437 if (r_type >= (int) R_X86_64_standard)
2438 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2439
2440 if (r_type != (int) R_X86_64_32
2441 || ABI_64_P (output_bfd))
2442 howto = x86_64_elf_howto_table + r_type;
2443 else
2444 howto = (x86_64_elf_howto_table
2445 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2446 r_symndx = htab->r_sym (rel->r_info);
2447 h = NULL;
2448 sym = NULL;
2449 sec = NULL;
2450 unresolved_reloc = FALSE;
2451 if (r_symndx < symtab_hdr->sh_info)
2452 {
2453 sym = local_syms + r_symndx;
2454 sec = local_sections[r_symndx];
2455
2456 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2457 &sec, rel);
2458 st_size = sym->st_size;
2459
2460 /* Relocate against local STT_GNU_IFUNC symbol. */
2461 if (!bfd_link_relocatable (info)
2462 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2463 {
2464 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2465 rel, FALSE);
2466 if (h == NULL)
2467 abort ();
2468
2469 /* Set STT_GNU_IFUNC symbol value. */
2470 h->root.u.def.value = sym->st_value;
2471 h->root.u.def.section = sec;
2472 }
2473 }
2474 else
2475 {
2476 bfd_boolean warned ATTRIBUTE_UNUSED;
2477 bfd_boolean ignored ATTRIBUTE_UNUSED;
2478
2479 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2480 r_symndx, symtab_hdr, sym_hashes,
2481 h, sec, relocation,
2482 unresolved_reloc, warned, ignored);
2483 st_size = h->size;
2484 }
2485
2486 if (sec != NULL && discarded_section (sec))
2487 {
2488 _bfd_clear_contents (howto, input_bfd, input_section,
2489 contents + rel->r_offset);
2490 wrel->r_offset = rel->r_offset;
2491 wrel->r_info = 0;
2492 wrel->r_addend = 0;
2493
2494 /* For ld -r, remove relocations in debug sections against
2495 sections defined in discarded sections. Not done for
2496 eh_frame editing code expects to be present. */
2497 if (bfd_link_relocatable (info)
2498 && (input_section->flags & SEC_DEBUGGING))
2499 wrel--;
2500
2501 continue;
2502 }
2503
2504 if (bfd_link_relocatable (info))
2505 {
2506 if (wrel != rel)
2507 *wrel = *rel;
2508 continue;
2509 }
2510
2511 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2512 {
2513 if (r_type == R_X86_64_64)
2514 {
2515 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2516 zero-extend it to 64bit if addend is zero. */
2517 r_type = R_X86_64_32;
2518 memset (contents + rel->r_offset + 4, 0, 4);
2519 }
2520 else if (r_type == R_X86_64_SIZE64)
2521 {
2522 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2523 zero-extend it to 64bit if addend is zero. */
2524 r_type = R_X86_64_SIZE32;
2525 memset (contents + rel->r_offset + 4, 0, 4);
2526 }
2527 }
2528
2529 eh = (struct elf_x86_link_hash_entry *) h;
2530
2531 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2532 it here if it is defined in a non-shared object. */
2533 if (h != NULL
2534 && h->type == STT_GNU_IFUNC
2535 && h->def_regular)
2536 {
2537 bfd_vma plt_index;
2538 const char *name;
2539
2540 if ((input_section->flags & SEC_ALLOC) == 0)
2541 {
2542 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2543 STT_GNU_IFUNC symbol as STT_FUNC. */
2544 if (elf_section_type (input_section) == SHT_NOTE)
2545 goto skip_ifunc;
2546 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2547 sections because such sections are not SEC_ALLOC and
2548 thus ld.so will not process them. */
2549 if ((input_section->flags & SEC_DEBUGGING) != 0)
2550 continue;
2551 abort ();
2552 }
2553
2554 switch (r_type)
2555 {
2556 default:
2557 break;
2558
2559 case R_X86_64_GOTPCREL:
2560 case R_X86_64_GOTPCRELX:
2561 case R_X86_64_REX_GOTPCRELX:
2562 case R_X86_64_GOTPCREL64:
2563 base_got = htab->elf.sgot;
2564 off = h->got.offset;
2565
2566 if (base_got == NULL)
2567 abort ();
2568
2569 if (off == (bfd_vma) -1)
2570 {
2571 /* We can't use h->got.offset here to save state, or
2572 even just remember the offset, as finish_dynamic_symbol
2573 would use that as offset into .got. */
2574
2575 if (h->plt.offset == (bfd_vma) -1)
2576 abort ();
2577
2578 if (htab->elf.splt != NULL)
2579 {
2580 plt_index = (h->plt.offset / plt_entry_size
2581 - htab->plt.has_plt0);
2582 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2583 base_got = htab->elf.sgotplt;
2584 }
2585 else
2586 {
2587 plt_index = h->plt.offset / plt_entry_size;
2588 off = plt_index * GOT_ENTRY_SIZE;
2589 base_got = htab->elf.igotplt;
2590 }
2591
2592 if (h->dynindx == -1
2593 || h->forced_local
2594 || info->symbolic)
2595 {
2596 /* This references the local defitionion. We must
2597 initialize this entry in the global offset table.
2598 Since the offset must always be a multiple of 8,
2599 we use the least significant bit to record
2600 whether we have initialized it already.
2601
2602 When doing a dynamic link, we create a .rela.got
2603 relocation entry to initialize the value. This
2604 is done in the finish_dynamic_symbol routine. */
2605 if ((off & 1) != 0)
2606 off &= ~1;
2607 else
2608 {
2609 bfd_put_64 (output_bfd, relocation,
2610 base_got->contents + off);
2611 /* Note that this is harmless for the GOTPLT64
2612 case, as -1 | 1 still is -1. */
2613 h->got.offset |= 1;
2614 }
2615 }
2616 }
2617
2618 relocation = (base_got->output_section->vma
2619 + base_got->output_offset + off);
2620
2621 goto do_relocation;
2622 }
2623
2624 if (h->plt.offset == (bfd_vma) -1)
2625 {
2626 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2627 if (r_type == htab->pointer_r_type
2628 && (input_section->flags & SEC_CODE) == 0)
2629 goto do_ifunc_pointer;
2630 goto bad_ifunc_reloc;
2631 }
2632
2633 /* STT_GNU_IFUNC symbol must go through PLT. */
2634 if (htab->elf.splt != NULL)
2635 {
2636 if (htab->plt_second != NULL)
2637 {
2638 resolved_plt = htab->plt_second;
2639 plt_offset = eh->plt_second.offset;
2640 }
2641 else
2642 {
2643 resolved_plt = htab->elf.splt;
2644 plt_offset = h->plt.offset;
2645 }
2646 }
2647 else
2648 {
2649 resolved_plt = htab->elf.iplt;
2650 plt_offset = h->plt.offset;
2651 }
2652
2653 relocation = (resolved_plt->output_section->vma
2654 + resolved_plt->output_offset + plt_offset);
2655
2656 switch (r_type)
2657 {
2658 default:
2659 bad_ifunc_reloc:
2660 if (h->root.root.string)
2661 name = h->root.root.string;
2662 else
2663 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2664 NULL);
2665 _bfd_error_handler
2666 /* xgettext:c-format */
2667 (_("%pB: relocation %s against STT_GNU_IFUNC "
2668 "symbol `%s' isn't supported"), input_bfd,
2669 howto->name, name);
2670 bfd_set_error (bfd_error_bad_value);
2671 return FALSE;
2672
2673 case R_X86_64_32S:
2674 if (bfd_link_pic (info))
2675 abort ();
2676 goto do_relocation;
2677
2678 case R_X86_64_32:
2679 if (ABI_64_P (output_bfd))
2680 goto do_relocation;
2681 /* FALLTHROUGH */
2682 case R_X86_64_64:
2683 do_ifunc_pointer:
2684 if (rel->r_addend != 0)
2685 {
2686 if (h->root.root.string)
2687 name = h->root.root.string;
2688 else
2689 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2690 sym, NULL);
2691 _bfd_error_handler
2692 /* xgettext:c-format */
2693 (_("%pB: relocation %s against STT_GNU_IFUNC "
2694 "symbol `%s' has non-zero addend: %" PRId64),
2695 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2696 bfd_set_error (bfd_error_bad_value);
2697 return FALSE;
2698 }
2699
2700 /* Generate dynamic relcoation only when there is a
2701 non-GOT reference in a shared object or there is no
2702 PLT. */
2703 if ((bfd_link_pic (info) && h->non_got_ref)
2704 || h->plt.offset == (bfd_vma) -1)
2705 {
2706 Elf_Internal_Rela outrel;
2707 asection *sreloc;
2708
2709 /* Need a dynamic relocation to get the real function
2710 address. */
2711 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2712 info,
2713 input_section,
2714 rel->r_offset);
2715 if (outrel.r_offset == (bfd_vma) -1
2716 || outrel.r_offset == (bfd_vma) -2)
2717 abort ();
2718
2719 outrel.r_offset += (input_section->output_section->vma
2720 + input_section->output_offset);
2721
2722 if (POINTER_LOCAL_IFUNC_P (info, h))
2723 {
2724 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2725 h->root.root.string,
2726 h->root.u.def.section->owner);
2727
2728 /* This symbol is resolved locally. */
2729 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2730 outrel.r_addend = (h->root.u.def.value
2731 + h->root.u.def.section->output_section->vma
2732 + h->root.u.def.section->output_offset);
2733 }
2734 else
2735 {
2736 outrel.r_info = htab->r_info (h->dynindx, r_type);
2737 outrel.r_addend = 0;
2738 }
2739
2740 /* Dynamic relocations are stored in
2741 1. .rela.ifunc section in PIC object.
2742 2. .rela.got section in dynamic executable.
2743 3. .rela.iplt section in static executable. */
2744 if (bfd_link_pic (info))
2745 sreloc = htab->elf.irelifunc;
2746 else if (htab->elf.splt != NULL)
2747 sreloc = htab->elf.srelgot;
2748 else
2749 sreloc = htab->elf.irelplt;
2750 elf_append_rela (output_bfd, sreloc, &outrel);
2751
2752 /* If this reloc is against an external symbol, we
2753 do not want to fiddle with the addend. Otherwise,
2754 we need to include the symbol value so that it
2755 becomes an addend for the dynamic reloc. For an
2756 internal symbol, we have updated addend. */
2757 continue;
2758 }
2759 /* FALLTHROUGH */
2760 case R_X86_64_PC32:
2761 case R_X86_64_PC32_BND:
2762 case R_X86_64_PC64:
2763 case R_X86_64_PLT32:
2764 case R_X86_64_PLT32_BND:
2765 goto do_relocation;
2766 }
2767 }
2768
2769 skip_ifunc:
2770 resolved_to_zero = (eh != NULL
2771 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2772
2773 /* When generating a shared object, the relocations handled here are
2774 copied into the output file to be resolved at run time. */
2775 switch (r_type)
2776 {
2777 case R_X86_64_GOT32:
2778 case R_X86_64_GOT64:
2779 /* Relocation is to the entry for this symbol in the global
2780 offset table. */
2781 case R_X86_64_GOTPCREL:
2782 case R_X86_64_GOTPCRELX:
2783 case R_X86_64_REX_GOTPCRELX:
2784 case R_X86_64_GOTPCREL64:
2785 /* Use global offset table entry as symbol value. */
2786 case R_X86_64_GOTPLT64:
2787 /* This is obsolete and treated the same as GOT64. */
2788 base_got = htab->elf.sgot;
2789
2790 if (htab->elf.sgot == NULL)
2791 abort ();
2792
2793 relative_reloc = FALSE;
2794 if (h != NULL)
2795 {
2796 off = h->got.offset;
2797 if (h->needs_plt
2798 && h->plt.offset != (bfd_vma)-1
2799 && off == (bfd_vma)-1)
2800 {
2801 /* We can't use h->got.offset here to save
2802 state, or even just remember the offset, as
2803 finish_dynamic_symbol would use that as offset into
2804 .got. */
2805 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2806 - htab->plt.has_plt0);
2807 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2808 base_got = htab->elf.sgotplt;
2809 }
2810
2811 if (RESOLVED_LOCALLY_P (info, h, htab))
2812 {
2813 /* We must initialize this entry in the global offset
2814 table. Since the offset must always be a multiple
2815 of 8, we use the least significant bit to record
2816 whether we have initialized it already.
2817
2818 When doing a dynamic link, we create a .rela.got
2819 relocation entry to initialize the value. This is
2820 done in the finish_dynamic_symbol routine. */
2821 if ((off & 1) != 0)
2822 off &= ~1;
2823 else
2824 {
2825 bfd_put_64 (output_bfd, relocation,
2826 base_got->contents + off);
2827 /* Note that this is harmless for the GOTPLT64 case,
2828 as -1 | 1 still is -1. */
2829 h->got.offset |= 1;
2830
2831 if (GENERATE_RELATIVE_RELOC_P (info, h))
2832 {
2833 /* If this symbol isn't dynamic in PIC,
2834 generate R_X86_64_RELATIVE here. */
2835 eh->no_finish_dynamic_symbol = 1;
2836 relative_reloc = TRUE;
2837 }
2838 }
2839 }
2840 else
2841 unresolved_reloc = FALSE;
2842 }
2843 else
2844 {
2845 if (local_got_offsets == NULL)
2846 abort ();
2847
2848 off = local_got_offsets[r_symndx];
2849
2850 /* The offset must always be a multiple of 8. We use
2851 the least significant bit to record whether we have
2852 already generated the necessary reloc. */
2853 if ((off & 1) != 0)
2854 off &= ~1;
2855 else
2856 {
2857 bfd_put_64 (output_bfd, relocation,
2858 base_got->contents + off);
2859 local_got_offsets[r_symndx] |= 1;
2860
2861 if (bfd_link_pic (info))
2862 relative_reloc = TRUE;
2863 }
2864 }
2865
2866 if (relative_reloc)
2867 {
2868 asection *s;
2869 Elf_Internal_Rela outrel;
2870
2871 /* We need to generate a R_X86_64_RELATIVE reloc
2872 for the dynamic linker. */
2873 s = htab->elf.srelgot;
2874 if (s == NULL)
2875 abort ();
2876
2877 outrel.r_offset = (base_got->output_section->vma
2878 + base_got->output_offset
2879 + off);
2880 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2881 outrel.r_addend = relocation;
2882 elf_append_rela (output_bfd, s, &outrel);
2883 }
2884
2885 if (off >= (bfd_vma) -2)
2886 abort ();
2887
2888 relocation = base_got->output_section->vma
2889 + base_got->output_offset + off;
2890 if (r_type != R_X86_64_GOTPCREL
2891 && r_type != R_X86_64_GOTPCRELX
2892 && r_type != R_X86_64_REX_GOTPCRELX
2893 && r_type != R_X86_64_GOTPCREL64)
2894 relocation -= htab->elf.sgotplt->output_section->vma
2895 - htab->elf.sgotplt->output_offset;
2896
2897 break;
2898
2899 case R_X86_64_GOTOFF64:
2900 /* Relocation is relative to the start of the global offset
2901 table. */
2902
2903 /* Check to make sure it isn't a protected function or data
2904 symbol for shared library since it may not be local when
2905 used as function address or with copy relocation. We also
2906 need to make sure that a symbol is referenced locally. */
2907 if (bfd_link_pic (info) && h)
2908 {
2909 if (!h->def_regular)
2910 {
2911 const char *v;
2912
2913 switch (ELF_ST_VISIBILITY (h->other))
2914 {
2915 case STV_HIDDEN:
2916 v = _("hidden symbol");
2917 break;
2918 case STV_INTERNAL:
2919 v = _("internal symbol");
2920 break;
2921 case STV_PROTECTED:
2922 v = _("protected symbol");
2923 break;
2924 default:
2925 v = _("symbol");
2926 break;
2927 }
2928
2929 _bfd_error_handler
2930 /* xgettext:c-format */
2931 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2932 " `%s' can not be used when making a shared object"),
2933 input_bfd, v, h->root.root.string);
2934 bfd_set_error (bfd_error_bad_value);
2935 return FALSE;
2936 }
2937 else if (!bfd_link_executable (info)
2938 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2939 && (h->type == STT_FUNC
2940 || h->type == STT_OBJECT)
2941 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2942 {
2943 _bfd_error_handler
2944 /* xgettext:c-format */
2945 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2946 " `%s' can not be used when making a shared object"),
2947 input_bfd,
2948 h->type == STT_FUNC ? "function" : "data",
2949 h->root.root.string);
2950 bfd_set_error (bfd_error_bad_value);
2951 return FALSE;
2952 }
2953 }
2954
2955 /* Note that sgot is not involved in this
2956 calculation. We always want the start of .got.plt. If we
2957 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2958 permitted by the ABI, we might have to change this
2959 calculation. */
2960 relocation -= htab->elf.sgotplt->output_section->vma
2961 + htab->elf.sgotplt->output_offset;
2962 break;
2963
2964 case R_X86_64_GOTPC32:
2965 case R_X86_64_GOTPC64:
2966 /* Use global offset table as symbol value. */
2967 relocation = htab->elf.sgotplt->output_section->vma
2968 + htab->elf.sgotplt->output_offset;
2969 unresolved_reloc = FALSE;
2970 break;
2971
2972 case R_X86_64_PLTOFF64:
2973 /* Relocation is PLT entry relative to GOT. For local
2974 symbols it's the symbol itself relative to GOT. */
2975 if (h != NULL
2976 /* See PLT32 handling. */
2977 && (h->plt.offset != (bfd_vma) -1
2978 || eh->plt_got.offset != (bfd_vma) -1)
2979 && htab->elf.splt != NULL)
2980 {
2981 if (eh->plt_got.offset != (bfd_vma) -1)
2982 {
2983 /* Use the GOT PLT. */
2984 resolved_plt = htab->plt_got;
2985 plt_offset = eh->plt_got.offset;
2986 }
2987 else if (htab->plt_second != NULL)
2988 {
2989 resolved_plt = htab->plt_second;
2990 plt_offset = eh->plt_second.offset;
2991 }
2992 else
2993 {
2994 resolved_plt = htab->elf.splt;
2995 plt_offset = h->plt.offset;
2996 }
2997
2998 relocation = (resolved_plt->output_section->vma
2999 + resolved_plt->output_offset
3000 + plt_offset);
3001 unresolved_reloc = FALSE;
3002 }
3003
3004 relocation -= htab->elf.sgotplt->output_section->vma
3005 + htab->elf.sgotplt->output_offset;
3006 break;
3007
3008 case R_X86_64_PLT32:
3009 case R_X86_64_PLT32_BND:
3010 /* Relocation is to the entry for this symbol in the
3011 procedure linkage table. */
3012
3013 /* Resolve a PLT32 reloc against a local symbol directly,
3014 without using the procedure linkage table. */
3015 if (h == NULL)
3016 break;
3017
3018 if ((h->plt.offset == (bfd_vma) -1
3019 && eh->plt_got.offset == (bfd_vma) -1)
3020 || htab->elf.splt == NULL)
3021 {
3022 /* We didn't make a PLT entry for this symbol. This
3023 happens when statically linking PIC code, or when
3024 using -Bsymbolic. */
3025 break;
3026 }
3027
3028 use_plt:
3029 if (h->plt.offset != (bfd_vma) -1)
3030 {
3031 if (htab->plt_second != NULL)
3032 {
3033 resolved_plt = htab->plt_second;
3034 plt_offset = eh->plt_second.offset;
3035 }
3036 else
3037 {
3038 resolved_plt = htab->elf.splt;
3039 plt_offset = h->plt.offset;
3040 }
3041 }
3042 else
3043 {
3044 /* Use the GOT PLT. */
3045 resolved_plt = htab->plt_got;
3046 plt_offset = eh->plt_got.offset;
3047 }
3048
3049 relocation = (resolved_plt->output_section->vma
3050 + resolved_plt->output_offset
3051 + plt_offset);
3052 unresolved_reloc = FALSE;
3053 break;
3054
3055 case R_X86_64_SIZE32:
3056 case R_X86_64_SIZE64:
3057 /* Set to symbol size. */
3058 relocation = st_size;
3059 goto direct;
3060
3061 case R_X86_64_PC8:
3062 case R_X86_64_PC16:
3063 case R_X86_64_PC32:
3064 case R_X86_64_PC32_BND:
3065 /* Don't complain about -fPIC if the symbol is undefined when
3066 building executable unless it is unresolved weak symbol,
3067 references a dynamic definition in PIE or -z nocopyreloc
3068 is used. */
3069 if ((input_section->flags & SEC_ALLOC) != 0
3070 && (input_section->flags & SEC_READONLY) != 0
3071 && h != NULL
3072 && ((bfd_link_executable (info)
3073 && ((h->root.type == bfd_link_hash_undefweak
3074 && !resolved_to_zero)
3075 || (bfd_link_pie (info)
3076 && !h->def_regular
3077 && h->def_dynamic)
3078 || ((info->nocopyreloc
3079 || (eh->def_protected
3080 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3081 && h->def_dynamic
3082 && !(h->root.u.def.section->flags & SEC_CODE))))
3083 || bfd_link_dll (info)))
3084 {
3085 bfd_boolean fail = FALSE;
3086 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3087 {
3088 /* Symbol is referenced locally. Make sure it is
3089 defined locally. */
3090 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3091 }
3092 else if (!(bfd_link_pie (info)
3093 && (h->needs_copy || eh->needs_copy)))
3094 {
3095 /* Symbol doesn't need copy reloc and isn't referenced
3096 locally. Address of protected function may not be
3097 reachable at run-time. */
3098 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3099 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3100 && h->type == STT_FUNC));
3101 }
3102
3103 if (fail)
3104 return elf_x86_64_need_pic (info, input_bfd, input_section,
3105 h, NULL, NULL, howto);
3106 }
3107 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3108 as function address. */
3109 else if (h != NULL
3110 && (input_section->flags & SEC_CODE) == 0
3111 && bfd_link_pie (info)
3112 && h->type == STT_FUNC
3113 && !h->def_regular
3114 && h->def_dynamic)
3115 goto use_plt;
3116 /* Fall through. */
3117
3118 case R_X86_64_8:
3119 case R_X86_64_16:
3120 case R_X86_64_32:
3121 case R_X86_64_PC64:
3122 case R_X86_64_64:
3123 /* FIXME: The ABI says the linker should make sure the value is
3124 the same when it's zeroextended to 64 bit. */
3125
3126 direct:
3127 if ((input_section->flags & SEC_ALLOC) == 0)
3128 break;
3129
3130 need_copy_reloc_in_pie = (bfd_link_pie (info)
3131 && h != NULL
3132 && (h->needs_copy
3133 || eh->needs_copy
3134 || (h->root.type
3135 == bfd_link_hash_undefined))
3136 && (X86_PCREL_TYPE_P (r_type)
3137 || X86_SIZE_TYPE_P (r_type)));
3138
3139 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3140 need_copy_reloc_in_pie,
3141 resolved_to_zero, FALSE))
3142 {
3143 Elf_Internal_Rela outrel;
3144 bfd_boolean skip, relocate;
3145 asection *sreloc;
3146
3147 /* When generating a shared object, these relocations
3148 are copied into the output file to be resolved at run
3149 time. */
3150 skip = FALSE;
3151 relocate = FALSE;
3152
3153 outrel.r_offset =
3154 _bfd_elf_section_offset (output_bfd, info, input_section,
3155 rel->r_offset);
3156 if (outrel.r_offset == (bfd_vma) -1)
3157 skip = TRUE;
3158 else if (outrel.r_offset == (bfd_vma) -2)
3159 skip = TRUE, relocate = TRUE;
3160
3161 outrel.r_offset += (input_section->output_section->vma
3162 + input_section->output_offset);
3163
3164 if (skip)
3165 memset (&outrel, 0, sizeof outrel);
3166
3167 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3168 {
3169 outrel.r_info = htab->r_info (h->dynindx, r_type);
3170 outrel.r_addend = rel->r_addend;
3171 }
3172 else
3173 {
3174 /* This symbol is local, or marked to become local.
3175 When relocation overflow check is disabled, we
3176 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3177 if (r_type == htab->pointer_r_type
3178 || (r_type == R_X86_64_32
3179 && info->no_reloc_overflow_check))
3180 {
3181 relocate = TRUE;
3182 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3183 outrel.r_addend = relocation + rel->r_addend;
3184 }
3185 else if (r_type == R_X86_64_64
3186 && !ABI_64_P (output_bfd))
3187 {
3188 relocate = TRUE;
3189 outrel.r_info = htab->r_info (0,
3190 R_X86_64_RELATIVE64);
3191 outrel.r_addend = relocation + rel->r_addend;
3192 /* Check addend overflow. */
3193 if ((outrel.r_addend & 0x80000000)
3194 != (rel->r_addend & 0x80000000))
3195 {
3196 const char *name;
3197 int addend = rel->r_addend;
3198 if (h && h->root.root.string)
3199 name = h->root.root.string;
3200 else
3201 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3202 sym, NULL);
3203 _bfd_error_handler
3204 /* xgettext:c-format */
3205 (_("%pB: addend %s%#x in relocation %s against "
3206 "symbol `%s' at %#" PRIx64
3207 " in section `%pA' is out of range"),
3208 input_bfd, addend < 0 ? "-" : "", addend,
3209 howto->name, name, (uint64_t) rel->r_offset,
3210 input_section);
3211 bfd_set_error (bfd_error_bad_value);
3212 return FALSE;
3213 }
3214 }
3215 else
3216 {
3217 long sindx;
3218
3219 if (bfd_is_abs_section (sec))
3220 sindx = 0;
3221 else if (sec == NULL || sec->owner == NULL)
3222 {
3223 bfd_set_error (bfd_error_bad_value);
3224 return FALSE;
3225 }
3226 else
3227 {
3228 asection *osec;
3229
3230 /* We are turning this relocation into one
3231 against a section symbol. It would be
3232 proper to subtract the symbol's value,
3233 osec->vma, from the emitted reloc addend,
3234 but ld.so expects buggy relocs. */
3235 osec = sec->output_section;
3236 sindx = elf_section_data (osec)->dynindx;
3237 if (sindx == 0)
3238 {
3239 asection *oi = htab->elf.text_index_section;
3240 sindx = elf_section_data (oi)->dynindx;
3241 }
3242 BFD_ASSERT (sindx != 0);
3243 }
3244
3245 outrel.r_info = htab->r_info (sindx, r_type);
3246 outrel.r_addend = relocation + rel->r_addend;
3247 }
3248 }
3249
3250 sreloc = elf_section_data (input_section)->sreloc;
3251
3252 if (sreloc == NULL || sreloc->contents == NULL)
3253 {
3254 r = bfd_reloc_notsupported;
3255 goto check_relocation_error;
3256 }
3257
3258 elf_append_rela (output_bfd, sreloc, &outrel);
3259
3260 /* If this reloc is against an external symbol, we do
3261 not want to fiddle with the addend. Otherwise, we
3262 need to include the symbol value so that it becomes
3263 an addend for the dynamic reloc. */
3264 if (! relocate)
3265 continue;
3266 }
3267
3268 break;
3269
3270 case R_X86_64_TLSGD:
3271 case R_X86_64_GOTPC32_TLSDESC:
3272 case R_X86_64_TLSDESC_CALL:
3273 case R_X86_64_GOTTPOFF:
3274 tls_type = GOT_UNKNOWN;
3275 if (h == NULL && local_got_offsets)
3276 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3277 else if (h != NULL)
3278 tls_type = elf_x86_hash_entry (h)->tls_type;
3279
3280 r_type_tls = r_type;
3281 if (! elf_x86_64_tls_transition (info, input_bfd,
3282 input_section, contents,
3283 symtab_hdr, sym_hashes,
3284 &r_type_tls, tls_type, rel,
3285 relend, h, r_symndx, TRUE))
3286 return FALSE;
3287
3288 if (r_type_tls == R_X86_64_TPOFF32)
3289 {
3290 bfd_vma roff = rel->r_offset;
3291
3292 BFD_ASSERT (! unresolved_reloc);
3293
3294 if (r_type == R_X86_64_TLSGD)
3295 {
3296 /* GD->LE transition. For 64bit, change
3297 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3298 .word 0x6666; rex64; call __tls_get_addr@PLT
3299 or
3300 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3301 .byte 0x66; rex64
3302 call *__tls_get_addr@GOTPCREL(%rip)
3303 which may be converted to
3304 addr32 call __tls_get_addr
3305 into:
3306 movq %fs:0, %rax
3307 leaq foo@tpoff(%rax), %rax
3308 For 32bit, change
3309 leaq foo@tlsgd(%rip), %rdi
3310 .word 0x6666; rex64; call __tls_get_addr@PLT
3311 or
3312 leaq foo@tlsgd(%rip), %rdi
3313 .byte 0x66; rex64
3314 call *__tls_get_addr@GOTPCREL(%rip)
3315 which may be converted to
3316 addr32 call __tls_get_addr
3317 into:
3318 movl %fs:0, %eax
3319 leaq foo@tpoff(%rax), %rax
3320 For largepic, change:
3321 leaq foo@tlsgd(%rip), %rdi
3322 movabsq $__tls_get_addr@pltoff, %rax
3323 addq %r15, %rax
3324 call *%rax
3325 into:
3326 movq %fs:0, %rax
3327 leaq foo@tpoff(%rax), %rax
3328 nopw 0x0(%rax,%rax,1) */
3329 int largepic = 0;
3330 if (ABI_64_P (output_bfd))
3331 {
3332 if (contents[roff + 5] == 0xb8)
3333 {
3334 memcpy (contents + roff - 3,
3335 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3336 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3337 largepic = 1;
3338 }
3339 else
3340 memcpy (contents + roff - 4,
3341 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3342 16);
3343 }
3344 else
3345 memcpy (contents + roff - 3,
3346 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3347 15);
3348 bfd_put_32 (output_bfd,
3349 elf_x86_64_tpoff (info, relocation),
3350 contents + roff + 8 + largepic);
3351 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3352 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3353 rel++;
3354 wrel++;
3355 continue;
3356 }
3357 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3358 {
3359 /* GDesc -> LE transition.
3360 It's originally something like:
3361 leaq x@tlsdesc(%rip), %rax
3362
3363 Change it to:
3364 movl $x@tpoff, %rax. */
3365
3366 unsigned int val, type;
3367
3368 type = bfd_get_8 (input_bfd, contents + roff - 3);
3369 val = bfd_get_8 (input_bfd, contents + roff - 1);
3370 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3371 contents + roff - 3);
3372 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3373 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3374 contents + roff - 1);
3375 bfd_put_32 (output_bfd,
3376 elf_x86_64_tpoff (info, relocation),
3377 contents + roff);
3378 continue;
3379 }
3380 else if (r_type == R_X86_64_TLSDESC_CALL)
3381 {
3382 /* GDesc -> LE transition.
3383 It's originally:
3384 call *(%rax)
3385 Turn it into:
3386 xchg %ax,%ax. */
3387 bfd_put_8 (output_bfd, 0x66, contents + roff);
3388 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3389 continue;
3390 }
3391 else if (r_type == R_X86_64_GOTTPOFF)
3392 {
3393 /* IE->LE transition:
3394 For 64bit, originally it can be one of:
3395 movq foo@gottpoff(%rip), %reg
3396 addq foo@gottpoff(%rip), %reg
3397 We change it into:
3398 movq $foo, %reg
3399 leaq foo(%reg), %reg
3400 addq $foo, %reg.
3401 For 32bit, originally it can be one of:
3402 movq foo@gottpoff(%rip), %reg
3403 addl foo@gottpoff(%rip), %reg
3404 We change it into:
3405 movq $foo, %reg
3406 leal foo(%reg), %reg
3407 addl $foo, %reg. */
3408
3409 unsigned int val, type, reg;
3410
3411 if (roff >= 3)
3412 val = bfd_get_8 (input_bfd, contents + roff - 3);
3413 else
3414 val = 0;
3415 type = bfd_get_8 (input_bfd, contents + roff - 2);
3416 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3417 reg >>= 3;
3418 if (type == 0x8b)
3419 {
3420 /* movq */
3421 if (val == 0x4c)
3422 bfd_put_8 (output_bfd, 0x49,
3423 contents + roff - 3);
3424 else if (!ABI_64_P (output_bfd) && val == 0x44)
3425 bfd_put_8 (output_bfd, 0x41,
3426 contents + roff - 3);
3427 bfd_put_8 (output_bfd, 0xc7,
3428 contents + roff - 2);
3429 bfd_put_8 (output_bfd, 0xc0 | reg,
3430 contents + roff - 1);
3431 }
3432 else if (reg == 4)
3433 {
3434 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3435 is special */
3436 if (val == 0x4c)
3437 bfd_put_8 (output_bfd, 0x49,
3438 contents + roff - 3);
3439 else if (!ABI_64_P (output_bfd) && val == 0x44)
3440 bfd_put_8 (output_bfd, 0x41,
3441 contents + roff - 3);
3442 bfd_put_8 (output_bfd, 0x81,
3443 contents + roff - 2);
3444 bfd_put_8 (output_bfd, 0xc0 | reg,
3445 contents + roff - 1);
3446 }
3447 else
3448 {
3449 /* addq/addl -> leaq/leal */
3450 if (val == 0x4c)
3451 bfd_put_8 (output_bfd, 0x4d,
3452 contents + roff - 3);
3453 else if (!ABI_64_P (output_bfd) && val == 0x44)
3454 bfd_put_8 (output_bfd, 0x45,
3455 contents + roff - 3);
3456 bfd_put_8 (output_bfd, 0x8d,
3457 contents + roff - 2);
3458 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3459 contents + roff - 1);
3460 }
3461 bfd_put_32 (output_bfd,
3462 elf_x86_64_tpoff (info, relocation),
3463 contents + roff);
3464 continue;
3465 }
3466 else
3467 BFD_ASSERT (FALSE);
3468 }
3469
3470 if (htab->elf.sgot == NULL)
3471 abort ();
3472
3473 if (h != NULL)
3474 {
3475 off = h->got.offset;
3476 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3477 }
3478 else
3479 {
3480 if (local_got_offsets == NULL)
3481 abort ();
3482
3483 off = local_got_offsets[r_symndx];
3484 offplt = local_tlsdesc_gotents[r_symndx];
3485 }
3486
3487 if ((off & 1) != 0)
3488 off &= ~1;
3489 else
3490 {
3491 Elf_Internal_Rela outrel;
3492 int dr_type, indx;
3493 asection *sreloc;
3494
3495 if (htab->elf.srelgot == NULL)
3496 abort ();
3497
3498 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3499
3500 if (GOT_TLS_GDESC_P (tls_type))
3501 {
3502 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3503 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3504 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3505 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3506 + htab->elf.sgotplt->output_offset
3507 + offplt
3508 + htab->sgotplt_jump_table_size);
3509 sreloc = htab->elf.srelplt;
3510 if (indx == 0)
3511 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3512 else
3513 outrel.r_addend = 0;
3514 elf_append_rela (output_bfd, sreloc, &outrel);
3515 }
3516
3517 sreloc = htab->elf.srelgot;
3518
3519 outrel.r_offset = (htab->elf.sgot->output_section->vma
3520 + htab->elf.sgot->output_offset + off);
3521
3522 if (GOT_TLS_GD_P (tls_type))
3523 dr_type = R_X86_64_DTPMOD64;
3524 else if (GOT_TLS_GDESC_P (tls_type))
3525 goto dr_done;
3526 else
3527 dr_type = R_X86_64_TPOFF64;
3528
3529 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3530 outrel.r_addend = 0;
3531 if ((dr_type == R_X86_64_TPOFF64
3532 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3533 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3534 outrel.r_info = htab->r_info (indx, dr_type);
3535
3536 elf_append_rela (output_bfd, sreloc, &outrel);
3537
3538 if (GOT_TLS_GD_P (tls_type))
3539 {
3540 if (indx == 0)
3541 {
3542 BFD_ASSERT (! unresolved_reloc);
3543 bfd_put_64 (output_bfd,
3544 relocation - _bfd_x86_elf_dtpoff_base (info),
3545 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3546 }
3547 else
3548 {
3549 bfd_put_64 (output_bfd, 0,
3550 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3551 outrel.r_info = htab->r_info (indx,
3552 R_X86_64_DTPOFF64);
3553 outrel.r_offset += GOT_ENTRY_SIZE;
3554 elf_append_rela (output_bfd, sreloc,
3555 &outrel);
3556 }
3557 }
3558
3559 dr_done:
3560 if (h != NULL)
3561 h->got.offset |= 1;
3562 else
3563 local_got_offsets[r_symndx] |= 1;
3564 }
3565
3566 if (off >= (bfd_vma) -2
3567 && ! GOT_TLS_GDESC_P (tls_type))
3568 abort ();
3569 if (r_type_tls == r_type)
3570 {
3571 if (r_type == R_X86_64_GOTPC32_TLSDESC
3572 || r_type == R_X86_64_TLSDESC_CALL)
3573 relocation = htab->elf.sgotplt->output_section->vma
3574 + htab->elf.sgotplt->output_offset
3575 + offplt + htab->sgotplt_jump_table_size;
3576 else
3577 relocation = htab->elf.sgot->output_section->vma
3578 + htab->elf.sgot->output_offset + off;
3579 unresolved_reloc = FALSE;
3580 }
3581 else
3582 {
3583 bfd_vma roff = rel->r_offset;
3584
3585 if (r_type == R_X86_64_TLSGD)
3586 {
3587 /* GD->IE transition. For 64bit, change
3588 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3589 .word 0x6666; rex64; call __tls_get_addr@PLT
3590 or
3591 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3592 .byte 0x66; rex64
3593 call *__tls_get_addr@GOTPCREL(%rip
3594 which may be converted to
3595 addr32 call __tls_get_addr
3596 into:
3597 movq %fs:0, %rax
3598 addq foo@gottpoff(%rip), %rax
3599 For 32bit, change
3600 leaq foo@tlsgd(%rip), %rdi
3601 .word 0x6666; rex64; call __tls_get_addr@PLT
3602 or
3603 leaq foo@tlsgd(%rip), %rdi
3604 .byte 0x66; rex64;
3605 call *__tls_get_addr@GOTPCREL(%rip)
3606 which may be converted to
3607 addr32 call __tls_get_addr
3608 into:
3609 movl %fs:0, %eax
3610 addq foo@gottpoff(%rip), %rax
3611 For largepic, change:
3612 leaq foo@tlsgd(%rip), %rdi
3613 movabsq $__tls_get_addr@pltoff, %rax
3614 addq %r15, %rax
3615 call *%rax
3616 into:
3617 movq %fs:0, %rax
3618 addq foo@gottpoff(%rax), %rax
3619 nopw 0x0(%rax,%rax,1) */
3620 int largepic = 0;
3621 if (ABI_64_P (output_bfd))
3622 {
3623 if (contents[roff + 5] == 0xb8)
3624 {
3625 memcpy (contents + roff - 3,
3626 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3627 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3628 largepic = 1;
3629 }
3630 else
3631 memcpy (contents + roff - 4,
3632 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3633 16);
3634 }
3635 else
3636 memcpy (contents + roff - 3,
3637 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3638 15);
3639
3640 relocation = (htab->elf.sgot->output_section->vma
3641 + htab->elf.sgot->output_offset + off
3642 - roff
3643 - largepic
3644 - input_section->output_section->vma
3645 - input_section->output_offset
3646 - 12);
3647 bfd_put_32 (output_bfd, relocation,
3648 contents + roff + 8 + largepic);
3649 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3650 rel++;
3651 wrel++;
3652 continue;
3653 }
3654 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3655 {
3656 /* GDesc -> IE transition.
3657 It's originally something like:
3658 leaq x@tlsdesc(%rip), %rax
3659
3660 Change it to:
3661 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3662
3663 /* Now modify the instruction as appropriate. To
3664 turn a leaq into a movq in the form we use it, it
3665 suffices to change the second byte from 0x8d to
3666 0x8b. */
3667 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3668
3669 bfd_put_32 (output_bfd,
3670 htab->elf.sgot->output_section->vma
3671 + htab->elf.sgot->output_offset + off
3672 - rel->r_offset
3673 - input_section->output_section->vma
3674 - input_section->output_offset
3675 - 4,
3676 contents + roff);
3677 continue;
3678 }
3679 else if (r_type == R_X86_64_TLSDESC_CALL)
3680 {
3681 /* GDesc -> IE transition.
3682 It's originally:
3683 call *(%rax)
3684
3685 Change it to:
3686 xchg %ax, %ax. */
3687
3688 bfd_put_8 (output_bfd, 0x66, contents + roff);
3689 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3690 continue;
3691 }
3692 else
3693 BFD_ASSERT (FALSE);
3694 }
3695 break;
3696
3697 case R_X86_64_TLSLD:
3698 if (! elf_x86_64_tls_transition (info, input_bfd,
3699 input_section, contents,
3700 symtab_hdr, sym_hashes,
3701 &r_type, GOT_UNKNOWN, rel,
3702 relend, h, r_symndx, TRUE))
3703 return FALSE;
3704
3705 if (r_type != R_X86_64_TLSLD)
3706 {
3707 /* LD->LE transition:
3708 leaq foo@tlsld(%rip), %rdi
3709 call __tls_get_addr@PLT
3710 For 64bit, we change it into:
3711 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3712 For 32bit, we change it into:
3713 nopl 0x0(%rax); movl %fs:0, %eax
3714 Or
3715 leaq foo@tlsld(%rip), %rdi;
3716 call *__tls_get_addr@GOTPCREL(%rip)
3717 which may be converted to
3718 addr32 call __tls_get_addr
3719 For 64bit, we change it into:
3720 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3721 For 32bit, we change it into:
3722 nopw 0x0(%rax); movl %fs:0, %eax
3723 For largepic, change:
3724 leaq foo@tlsgd(%rip), %rdi
3725 movabsq $__tls_get_addr@pltoff, %rax
3726 addq %rbx, %rax
3727 call *%rax
3728 into
3729 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3730 movq %fs:0, %eax */
3731
3732 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3733 if (ABI_64_P (output_bfd))
3734 {
3735 if (contents[rel->r_offset + 5] == 0xb8)
3736 memcpy (contents + rel->r_offset - 3,
3737 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3738 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3739 else if (contents[rel->r_offset + 4] == 0xff
3740 || contents[rel->r_offset + 4] == 0x67)
3741 memcpy (contents + rel->r_offset - 3,
3742 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3743 13);
3744 else
3745 memcpy (contents + rel->r_offset - 3,
3746 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3747 }
3748 else
3749 {
3750 if (contents[rel->r_offset + 4] == 0xff)
3751 memcpy (contents + rel->r_offset - 3,
3752 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3753 13);
3754 else
3755 memcpy (contents + rel->r_offset - 3,
3756 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3757 }
3758 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3759 and R_X86_64_PLTOFF64. */
3760 rel++;
3761 wrel++;
3762 continue;
3763 }
3764
3765 if (htab->elf.sgot == NULL)
3766 abort ();
3767
3768 off = htab->tls_ld_or_ldm_got.offset;
3769 if (off & 1)
3770 off &= ~1;
3771 else
3772 {
3773 Elf_Internal_Rela outrel;
3774
3775 if (htab->elf.srelgot == NULL)
3776 abort ();
3777
3778 outrel.r_offset = (htab->elf.sgot->output_section->vma
3779 + htab->elf.sgot->output_offset + off);
3780
3781 bfd_put_64 (output_bfd, 0,
3782 htab->elf.sgot->contents + off);
3783 bfd_put_64 (output_bfd, 0,
3784 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3785 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3786 outrel.r_addend = 0;
3787 elf_append_rela (output_bfd, htab->elf.srelgot,
3788 &outrel);
3789 htab->tls_ld_or_ldm_got.offset |= 1;
3790 }
3791 relocation = htab->elf.sgot->output_section->vma
3792 + htab->elf.sgot->output_offset + off;
3793 unresolved_reloc = FALSE;
3794 break;
3795
3796 case R_X86_64_DTPOFF32:
3797 if (!bfd_link_executable (info)
3798 || (input_section->flags & SEC_CODE) == 0)
3799 relocation -= _bfd_x86_elf_dtpoff_base (info);
3800 else
3801 relocation = elf_x86_64_tpoff (info, relocation);
3802 break;
3803
3804 case R_X86_64_TPOFF32:
3805 case R_X86_64_TPOFF64:
3806 BFD_ASSERT (bfd_link_executable (info));
3807 relocation = elf_x86_64_tpoff (info, relocation);
3808 break;
3809
3810 case R_X86_64_DTPOFF64:
3811 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3812 relocation -= _bfd_x86_elf_dtpoff_base (info);
3813 break;
3814
3815 default:
3816 break;
3817 }
3818
3819 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3820 because such sections are not SEC_ALLOC and thus ld.so will
3821 not process them. */
3822 if (unresolved_reloc
3823 && !((input_section->flags & SEC_DEBUGGING) != 0
3824 && h->def_dynamic)
3825 && _bfd_elf_section_offset (output_bfd, info, input_section,
3826 rel->r_offset) != (bfd_vma) -1)
3827 {
3828 switch (r_type)
3829 {
3830 case R_X86_64_32S:
3831 sec = h->root.u.def.section;
3832 if ((info->nocopyreloc
3833 || (eh->def_protected
3834 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3835 && !(h->root.u.def.section->flags & SEC_CODE))
3836 return elf_x86_64_need_pic (info, input_bfd, input_section,
3837 h, NULL, NULL, howto);
3838 /* Fall through. */
3839
3840 default:
3841 _bfd_error_handler
3842 /* xgettext:c-format */
3843 (_("%pB(%pA+%#" PRIx64 "): "
3844 "unresolvable %s relocation against symbol `%s'"),
3845 input_bfd,
3846 input_section,
3847 (uint64_t) rel->r_offset,
3848 howto->name,
3849 h->root.root.string);
3850 return FALSE;
3851 }
3852 }
3853
3854 do_relocation:
3855 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3856 contents, rel->r_offset,
3857 relocation, rel->r_addend);
3858
3859 check_relocation_error:
3860 if (r != bfd_reloc_ok)
3861 {
3862 const char *name;
3863
3864 if (h != NULL)
3865 name = h->root.root.string;
3866 else
3867 {
3868 name = bfd_elf_string_from_elf_section (input_bfd,
3869 symtab_hdr->sh_link,
3870 sym->st_name);
3871 if (name == NULL)
3872 return FALSE;
3873 if (*name == '\0')
3874 name = bfd_section_name (input_bfd, sec);
3875 }
3876
3877 if (r == bfd_reloc_overflow)
3878 {
3879 if (converted_reloc)
3880 {
3881 info->callbacks->einfo
3882 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3883 return FALSE;
3884 }
3885 (*info->callbacks->reloc_overflow)
3886 (info, (h ? &h->root : NULL), name, howto->name,
3887 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3888 }
3889 else
3890 {
3891 _bfd_error_handler
3892 /* xgettext:c-format */
3893 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3894 input_bfd, input_section,
3895 (uint64_t) rel->r_offset, name, (int) r);
3896 return FALSE;
3897 }
3898 }
3899
3900 if (wrel != rel)
3901 *wrel = *rel;
3902 }
3903
3904 if (wrel != rel)
3905 {
3906 Elf_Internal_Shdr *rel_hdr;
3907 size_t deleted = rel - wrel;
3908
3909 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3910 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3911 if (rel_hdr->sh_size == 0)
3912 {
3913 /* It is too late to remove an empty reloc section. Leave
3914 one NONE reloc.
3915 ??? What is wrong with an empty section??? */
3916 rel_hdr->sh_size = rel_hdr->sh_entsize;
3917 deleted -= 1;
3918 }
3919 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3920 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3921 input_section->reloc_count -= deleted;
3922 }
3923
3924 return TRUE;
3925 }
3926
3927 /* Finish up dynamic symbol handling. We set the contents of various
3928 dynamic sections here. */
3929
3930 static bfd_boolean
3931 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3932 struct bfd_link_info *info,
3933 struct elf_link_hash_entry *h,
3934 Elf_Internal_Sym *sym)
3935 {
3936 struct elf_x86_link_hash_table *htab;
3937 bfd_boolean use_plt_second;
3938 struct elf_x86_link_hash_entry *eh;
3939 bfd_boolean local_undefweak;
3940
3941 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3942 if (htab == NULL)
3943 return FALSE;
3944
3945 /* Use the second PLT section only if there is .plt section. */
3946 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3947
3948 eh = (struct elf_x86_link_hash_entry *) h;
3949 if (eh->no_finish_dynamic_symbol)
3950 abort ();
3951
3952 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3953 resolved undefined weak symbols in executable so that their
3954 references have value 0 at run-time. */
3955 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3956
3957 if (h->plt.offset != (bfd_vma) -1)
3958 {
3959 bfd_vma plt_index;
3960 bfd_vma got_offset, plt_offset;
3961 Elf_Internal_Rela rela;
3962 bfd_byte *loc;
3963 asection *plt, *gotplt, *relplt, *resolved_plt;
3964 const struct elf_backend_data *bed;
3965 bfd_vma plt_got_pcrel_offset;
3966
3967 /* When building a static executable, use .iplt, .igot.plt and
3968 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3969 if (htab->elf.splt != NULL)
3970 {
3971 plt = htab->elf.splt;
3972 gotplt = htab->elf.sgotplt;
3973 relplt = htab->elf.srelplt;
3974 }
3975 else
3976 {
3977 plt = htab->elf.iplt;
3978 gotplt = htab->elf.igotplt;
3979 relplt = htab->elf.irelplt;
3980 }
3981
3982 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3983
3984 /* Get the index in the procedure linkage table which
3985 corresponds to this symbol. This is the index of this symbol
3986 in all the symbols for which we are making plt entries. The
3987 first entry in the procedure linkage table is reserved.
3988
3989 Get the offset into the .got table of the entry that
3990 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3991 bytes. The first three are reserved for the dynamic linker.
3992
3993 For static executables, we don't reserve anything. */
3994
3995 if (plt == htab->elf.splt)
3996 {
3997 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3998 - htab->plt.has_plt0);
3999 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4000 }
4001 else
4002 {
4003 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4004 got_offset = got_offset * GOT_ENTRY_SIZE;
4005 }
4006
4007 /* Fill in the entry in the procedure linkage table. */
4008 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4009 htab->plt.plt_entry_size);
4010 if (use_plt_second)
4011 {
4012 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4013 htab->non_lazy_plt->plt_entry,
4014 htab->non_lazy_plt->plt_entry_size);
4015
4016 resolved_plt = htab->plt_second;
4017 plt_offset = eh->plt_second.offset;
4018 }
4019 else
4020 {
4021 resolved_plt = plt;
4022 plt_offset = h->plt.offset;
4023 }
4024
4025 /* Insert the relocation positions of the plt section. */
4026
4027 /* Put offset the PC-relative instruction referring to the GOT entry,
4028 subtracting the size of that instruction. */
4029 plt_got_pcrel_offset = (gotplt->output_section->vma
4030 + gotplt->output_offset
4031 + got_offset
4032 - resolved_plt->output_section->vma
4033 - resolved_plt->output_offset
4034 - plt_offset
4035 - htab->plt.plt_got_insn_size);
4036
4037 /* Check PC-relative offset overflow in PLT entry. */
4038 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4039 /* xgettext:c-format */
4040 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4041 output_bfd, h->root.root.string);
4042
4043 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4044 (resolved_plt->contents + plt_offset
4045 + htab->plt.plt_got_offset));
4046
4047 /* Fill in the entry in the global offset table, initially this
4048 points to the second part of the PLT entry. Leave the entry
4049 as zero for undefined weak symbol in PIE. No PLT relocation
4050 against undefined weak symbol in PIE. */
4051 if (!local_undefweak)
4052 {
4053 if (htab->plt.has_plt0)
4054 bfd_put_64 (output_bfd, (plt->output_section->vma
4055 + plt->output_offset
4056 + h->plt.offset
4057 + htab->lazy_plt->plt_lazy_offset),
4058 gotplt->contents + got_offset);
4059
4060 /* Fill in the entry in the .rela.plt section. */
4061 rela.r_offset = (gotplt->output_section->vma
4062 + gotplt->output_offset
4063 + got_offset);
4064 if (PLT_LOCAL_IFUNC_P (info, h))
4065 {
4066 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4067 h->root.root.string,
4068 h->root.u.def.section->owner);
4069
4070 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4071 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4072 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4073 rela.r_addend = (h->root.u.def.value
4074 + h->root.u.def.section->output_section->vma
4075 + h->root.u.def.section->output_offset);
4076 /* R_X86_64_IRELATIVE comes last. */
4077 plt_index = htab->next_irelative_index--;
4078 }
4079 else
4080 {
4081 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4082 rela.r_addend = 0;
4083 plt_index = htab->next_jump_slot_index++;
4084 }
4085
4086 /* Don't fill the second and third slots in PLT entry for
4087 static executables nor without PLT0. */
4088 if (plt == htab->elf.splt && htab->plt.has_plt0)
4089 {
4090 bfd_vma plt0_offset
4091 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4092
4093 /* Put relocation index. */
4094 bfd_put_32 (output_bfd, plt_index,
4095 (plt->contents + h->plt.offset
4096 + htab->lazy_plt->plt_reloc_offset));
4097
4098 /* Put offset for jmp .PLT0 and check for overflow. We don't
4099 check relocation index for overflow since branch displacement
4100 will overflow first. */
4101 if (plt0_offset > 0x80000000)
4102 /* xgettext:c-format */
4103 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4104 output_bfd, h->root.root.string);
4105 bfd_put_32 (output_bfd, - plt0_offset,
4106 (plt->contents + h->plt.offset
4107 + htab->lazy_plt->plt_plt_offset));
4108 }
4109
4110 bed = get_elf_backend_data (output_bfd);
4111 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4112 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4113 }
4114 }
4115 else if (eh->plt_got.offset != (bfd_vma) -1)
4116 {
4117 bfd_vma got_offset, plt_offset;
4118 asection *plt, *got;
4119 bfd_boolean got_after_plt;
4120 int32_t got_pcrel_offset;
4121
4122 /* Set the entry in the GOT procedure linkage table. */
4123 plt = htab->plt_got;
4124 got = htab->elf.sgot;
4125 got_offset = h->got.offset;
4126
4127 if (got_offset == (bfd_vma) -1
4128 || (h->type == STT_GNU_IFUNC && h->def_regular)
4129 || plt == NULL
4130 || got == NULL)
4131 abort ();
4132
4133 /* Use the non-lazy PLT entry template for the GOT PLT since they
4134 are the identical. */
4135 /* Fill in the entry in the GOT procedure linkage table. */
4136 plt_offset = eh->plt_got.offset;
4137 memcpy (plt->contents + plt_offset,
4138 htab->non_lazy_plt->plt_entry,
4139 htab->non_lazy_plt->plt_entry_size);
4140
4141 /* Put offset the PC-relative instruction referring to the GOT
4142 entry, subtracting the size of that instruction. */
4143 got_pcrel_offset = (got->output_section->vma
4144 + got->output_offset
4145 + got_offset
4146 - plt->output_section->vma
4147 - plt->output_offset
4148 - plt_offset
4149 - htab->non_lazy_plt->plt_got_insn_size);
4150
4151 /* Check PC-relative offset overflow in GOT PLT entry. */
4152 got_after_plt = got->output_section->vma > plt->output_section->vma;
4153 if ((got_after_plt && got_pcrel_offset < 0)
4154 || (!got_after_plt && got_pcrel_offset > 0))
4155 /* xgettext:c-format */
4156 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4157 output_bfd, h->root.root.string);
4158
4159 bfd_put_32 (output_bfd, got_pcrel_offset,
4160 (plt->contents + plt_offset
4161 + htab->non_lazy_plt->plt_got_offset));
4162 }
4163
4164 if (!local_undefweak
4165 && !h->def_regular
4166 && (h->plt.offset != (bfd_vma) -1
4167 || eh->plt_got.offset != (bfd_vma) -1))
4168 {
4169 /* Mark the symbol as undefined, rather than as defined in
4170 the .plt section. Leave the value if there were any
4171 relocations where pointer equality matters (this is a clue
4172 for the dynamic linker, to make function pointer
4173 comparisons work between an application and shared
4174 library), otherwise set it to zero. If a function is only
4175 called from a binary, there is no need to slow down
4176 shared libraries because of that. */
4177 sym->st_shndx = SHN_UNDEF;
4178 if (!h->pointer_equality_needed)
4179 sym->st_value = 0;
4180 }
4181
4182 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4183
4184 /* Don't generate dynamic GOT relocation against undefined weak
4185 symbol in executable. */
4186 if (h->got.offset != (bfd_vma) -1
4187 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4188 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4189 && !local_undefweak)
4190 {
4191 Elf_Internal_Rela rela;
4192 asection *relgot = htab->elf.srelgot;
4193
4194 /* This symbol has an entry in the global offset table. Set it
4195 up. */
4196 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4197 abort ();
4198
4199 rela.r_offset = (htab->elf.sgot->output_section->vma
4200 + htab->elf.sgot->output_offset
4201 + (h->got.offset &~ (bfd_vma) 1));
4202
4203 /* If this is a static link, or it is a -Bsymbolic link and the
4204 symbol is defined locally or was forced to be local because
4205 of a version file, we just want to emit a RELATIVE reloc.
4206 The entry in the global offset table will already have been
4207 initialized in the relocate_section function. */
4208 if (h->def_regular
4209 && h->type == STT_GNU_IFUNC)
4210 {
4211 if (h->plt.offset == (bfd_vma) -1)
4212 {
4213 /* STT_GNU_IFUNC is referenced without PLT. */
4214 if (htab->elf.splt == NULL)
4215 {
4216 /* use .rel[a].iplt section to store .got relocations
4217 in static executable. */
4218 relgot = htab->elf.irelplt;
4219 }
4220 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4221 {
4222 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4223 h->root.root.string,
4224 h->root.u.def.section->owner);
4225
4226 rela.r_info = htab->r_info (0,
4227 R_X86_64_IRELATIVE);
4228 rela.r_addend = (h->root.u.def.value
4229 + h->root.u.def.section->output_section->vma
4230 + h->root.u.def.section->output_offset);
4231 }
4232 else
4233 goto do_glob_dat;
4234 }
4235 else if (bfd_link_pic (info))
4236 {
4237 /* Generate R_X86_64_GLOB_DAT. */
4238 goto do_glob_dat;
4239 }
4240 else
4241 {
4242 asection *plt;
4243 bfd_vma plt_offset;
4244
4245 if (!h->pointer_equality_needed)
4246 abort ();
4247
4248 /* For non-shared object, we can't use .got.plt, which
4249 contains the real function addres if we need pointer
4250 equality. We load the GOT entry with the PLT entry. */
4251 if (htab->plt_second != NULL)
4252 {
4253 plt = htab->plt_second;
4254 plt_offset = eh->plt_second.offset;
4255 }
4256 else
4257 {
4258 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4259 plt_offset = h->plt.offset;
4260 }
4261 bfd_put_64 (output_bfd, (plt->output_section->vma
4262 + plt->output_offset
4263 + plt_offset),
4264 htab->elf.sgot->contents + h->got.offset);
4265 return TRUE;
4266 }
4267 }
4268 else if (bfd_link_pic (info)
4269 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4270 {
4271 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4272 return FALSE;
4273 BFD_ASSERT((h->got.offset & 1) != 0);
4274 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4275 rela.r_addend = (h->root.u.def.value
4276 + h->root.u.def.section->output_section->vma
4277 + h->root.u.def.section->output_offset);
4278 }
4279 else
4280 {
4281 BFD_ASSERT((h->got.offset & 1) == 0);
4282 do_glob_dat:
4283 bfd_put_64 (output_bfd, (bfd_vma) 0,
4284 htab->elf.sgot->contents + h->got.offset);
4285 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4286 rela.r_addend = 0;
4287 }
4288
4289 elf_append_rela (output_bfd, relgot, &rela);
4290 }
4291
4292 if (h->needs_copy)
4293 {
4294 Elf_Internal_Rela rela;
4295 asection *s;
4296
4297 /* This symbol needs a copy reloc. Set it up. */
4298 VERIFY_COPY_RELOC (h, htab)
4299
4300 rela.r_offset = (h->root.u.def.value
4301 + h->root.u.def.section->output_section->vma
4302 + h->root.u.def.section->output_offset);
4303 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4304 rela.r_addend = 0;
4305 if (h->root.u.def.section == htab->elf.sdynrelro)
4306 s = htab->elf.sreldynrelro;
4307 else
4308 s = htab->elf.srelbss;
4309 elf_append_rela (output_bfd, s, &rela);
4310 }
4311
4312 return TRUE;
4313 }
4314
4315 /* Finish up local dynamic symbol handling. We set the contents of
4316 various dynamic sections here. */
4317
4318 static bfd_boolean
4319 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4320 {
4321 struct elf_link_hash_entry *h
4322 = (struct elf_link_hash_entry *) *slot;
4323 struct bfd_link_info *info
4324 = (struct bfd_link_info *) inf;
4325
4326 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4327 info, h, NULL);
4328 }
4329
4330 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4331 here since undefined weak symbol may not be dynamic and may not be
4332 called for elf_x86_64_finish_dynamic_symbol. */
4333
4334 static bfd_boolean
4335 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4336 void *inf)
4337 {
4338 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4339 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4340
4341 if (h->root.type != bfd_link_hash_undefweak
4342 || h->dynindx != -1)
4343 return TRUE;
4344
4345 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4346 info, h, NULL);
4347 }
4348
4349 /* Used to decide how to sort relocs in an optimal manner for the
4350 dynamic linker, before writing them out. */
4351
4352 static enum elf_reloc_type_class
4353 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4354 const asection *rel_sec ATTRIBUTE_UNUSED,
4355 const Elf_Internal_Rela *rela)
4356 {
4357 bfd *abfd = info->output_bfd;
4358 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4359 struct elf_x86_link_hash_table *htab
4360 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4361
4362 if (htab->elf.dynsym != NULL
4363 && htab->elf.dynsym->contents != NULL)
4364 {
4365 /* Check relocation against STT_GNU_IFUNC symbol if there are
4366 dynamic symbols. */
4367 unsigned long r_symndx = htab->r_sym (rela->r_info);
4368 if (r_symndx != STN_UNDEF)
4369 {
4370 Elf_Internal_Sym sym;
4371 if (!bed->s->swap_symbol_in (abfd,
4372 (htab->elf.dynsym->contents
4373 + r_symndx * bed->s->sizeof_sym),
4374 0, &sym))
4375 abort ();
4376
4377 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4378 return reloc_class_ifunc;
4379 }
4380 }
4381
4382 switch ((int) ELF32_R_TYPE (rela->r_info))
4383 {
4384 case R_X86_64_IRELATIVE:
4385 return reloc_class_ifunc;
4386 case R_X86_64_RELATIVE:
4387 case R_X86_64_RELATIVE64:
4388 return reloc_class_relative;
4389 case R_X86_64_JUMP_SLOT:
4390 return reloc_class_plt;
4391 case R_X86_64_COPY:
4392 return reloc_class_copy;
4393 default:
4394 return reloc_class_normal;
4395 }
4396 }
4397
4398 /* Finish up the dynamic sections. */
4399
4400 static bfd_boolean
4401 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4402 struct bfd_link_info *info)
4403 {
4404 struct elf_x86_link_hash_table *htab;
4405
4406 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4407 if (htab == NULL)
4408 return FALSE;
4409
4410 if (! htab->elf.dynamic_sections_created)
4411 return TRUE;
4412
4413 if (htab->elf.splt && htab->elf.splt->size > 0)
4414 {
4415 elf_section_data (htab->elf.splt->output_section)
4416 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4417
4418 if (htab->plt.has_plt0)
4419 {
4420 /* Fill in the special first entry in the procedure linkage
4421 table. */
4422 memcpy (htab->elf.splt->contents,
4423 htab->lazy_plt->plt0_entry,
4424 htab->lazy_plt->plt0_entry_size);
4425 /* Add offset for pushq GOT+8(%rip), since the instruction
4426 uses 6 bytes subtract this value. */
4427 bfd_put_32 (output_bfd,
4428 (htab->elf.sgotplt->output_section->vma
4429 + htab->elf.sgotplt->output_offset
4430 + 8
4431 - htab->elf.splt->output_section->vma
4432 - htab->elf.splt->output_offset
4433 - 6),
4434 (htab->elf.splt->contents
4435 + htab->lazy_plt->plt0_got1_offset));
4436 /* Add offset for the PC-relative instruction accessing
4437 GOT+16, subtracting the offset to the end of that
4438 instruction. */
4439 bfd_put_32 (output_bfd,
4440 (htab->elf.sgotplt->output_section->vma
4441 + htab->elf.sgotplt->output_offset
4442 + 16
4443 - htab->elf.splt->output_section->vma
4444 - htab->elf.splt->output_offset
4445 - htab->lazy_plt->plt0_got2_insn_end),
4446 (htab->elf.splt->contents
4447 + htab->lazy_plt->plt0_got2_offset));
4448 }
4449
4450 if (htab->tlsdesc_plt)
4451 {
4452 bfd_put_64 (output_bfd, (bfd_vma) 0,
4453 htab->elf.sgot->contents + htab->tlsdesc_got);
4454
4455 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4456 htab->lazy_plt->plt_tlsdesc_entry,
4457 htab->lazy_plt->plt_tlsdesc_entry_size);
4458
4459 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4460 bytes and the instruction uses 6 bytes, subtract these
4461 values. */
4462 bfd_put_32 (output_bfd,
4463 (htab->elf.sgotplt->output_section->vma
4464 + htab->elf.sgotplt->output_offset
4465 + 8
4466 - htab->elf.splt->output_section->vma
4467 - htab->elf.splt->output_offset
4468 - htab->tlsdesc_plt
4469 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4470 (htab->elf.splt->contents
4471 + htab->tlsdesc_plt
4472 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4473 /* Add offset for indirect branch via GOT+TDG, where TDG
4474 stands for htab->tlsdesc_got, subtracting the offset
4475 to the end of that instruction. */
4476 bfd_put_32 (output_bfd,
4477 (htab->elf.sgot->output_section->vma
4478 + htab->elf.sgot->output_offset
4479 + htab->tlsdesc_got
4480 - htab->elf.splt->output_section->vma
4481 - htab->elf.splt->output_offset
4482 - htab->tlsdesc_plt
4483 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4484 (htab->elf.splt->contents
4485 + htab->tlsdesc_plt
4486 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4487 }
4488 }
4489
4490 /* Fill PLT entries for undefined weak symbols in PIE. */
4491 if (bfd_link_pie (info))
4492 bfd_hash_traverse (&info->hash->table,
4493 elf_x86_64_pie_finish_undefweak_symbol,
4494 info);
4495
4496 return TRUE;
4497 }
4498
4499 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4500 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4501 It has to be done before elf_link_sort_relocs is called so that
4502 dynamic relocations are properly sorted. */
4503
4504 static bfd_boolean
4505 elf_x86_64_output_arch_local_syms
4506 (bfd *output_bfd ATTRIBUTE_UNUSED,
4507 struct bfd_link_info *info,
4508 void *flaginfo ATTRIBUTE_UNUSED,
4509 int (*func) (void *, const char *,
4510 Elf_Internal_Sym *,
4511 asection *,
4512 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4513 {
4514 struct elf_x86_link_hash_table *htab
4515 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4516 if (htab == NULL)
4517 return FALSE;
4518
4519 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4520 htab_traverse (htab->loc_hash_table,
4521 elf_x86_64_finish_local_dynamic_symbol,
4522 info);
4523
4524 return TRUE;
4525 }
4526
4527 /* Forward declaration. */
4528 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4529
4530 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4531 dynamic relocations. */
4532
4533 static long
4534 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4535 long symcount ATTRIBUTE_UNUSED,
4536 asymbol **syms ATTRIBUTE_UNUSED,
4537 long dynsymcount,
4538 asymbol **dynsyms,
4539 asymbol **ret)
4540 {
4541 long count, i, n;
4542 int j;
4543 bfd_byte *plt_contents;
4544 long relsize;
4545 const struct elf_x86_lazy_plt_layout *lazy_plt;
4546 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4547 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4548 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4549 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4550 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4551 asection *plt;
4552 enum elf_x86_plt_type plt_type;
4553 struct elf_x86_plt plts[] =
4554 {
4555 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4556 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4557 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4558 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4559 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4560 };
4561
4562 *ret = NULL;
4563
4564 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4565 return 0;
4566
4567 if (dynsymcount <= 0)
4568 return 0;
4569
4570 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4571 if (relsize <= 0)
4572 return -1;
4573
4574 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4575 {
4576 lazy_plt = &elf_x86_64_lazy_plt;
4577 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4578 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4579 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4580 if (ABI_64_P (abfd))
4581 {
4582 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4583 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4584 }
4585 else
4586 {
4587 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4588 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4589 }
4590 }
4591 else
4592 {
4593 lazy_plt = &elf_x86_64_nacl_plt;
4594 non_lazy_plt = NULL;
4595 lazy_bnd_plt = NULL;
4596 non_lazy_bnd_plt = NULL;
4597 lazy_ibt_plt = NULL;
4598 non_lazy_ibt_plt = NULL;
4599 }
4600
4601 count = 0;
4602 for (j = 0; plts[j].name != NULL; j++)
4603 {
4604 plt = bfd_get_section_by_name (abfd, plts[j].name);
4605 if (plt == NULL || plt->size == 0)
4606 continue;
4607
4608 /* Get the PLT section contents. */
4609 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4610 if (plt_contents == NULL)
4611 break;
4612 if (!bfd_get_section_contents (abfd, (asection *) plt,
4613 plt_contents, 0, plt->size))
4614 {
4615 free (plt_contents);
4616 break;
4617 }
4618
4619 /* Check what kind of PLT it is. */
4620 plt_type = plt_unknown;
4621 if (plts[j].type == plt_unknown
4622 && (plt->size >= (lazy_plt->plt_entry_size
4623 + lazy_plt->plt_entry_size)))
4624 {
4625 /* Match lazy PLT first. Need to check the first two
4626 instructions. */
4627 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4628 lazy_plt->plt0_got1_offset) == 0)
4629 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4630 2) == 0))
4631 plt_type = plt_lazy;
4632 else if (lazy_bnd_plt != NULL
4633 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4634 lazy_bnd_plt->plt0_got1_offset) == 0)
4635 && (memcmp (plt_contents + 6,
4636 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4637 {
4638 plt_type = plt_lazy | plt_second;
4639 /* The fist entry in the lazy IBT PLT is the same as the
4640 lazy BND PLT. */
4641 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4642 lazy_ibt_plt->plt_entry,
4643 lazy_ibt_plt->plt_got_offset) == 0))
4644 lazy_plt = lazy_ibt_plt;
4645 else
4646 lazy_plt = lazy_bnd_plt;
4647 }
4648 }
4649
4650 if (non_lazy_plt != NULL
4651 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4652 && plt->size >= non_lazy_plt->plt_entry_size)
4653 {
4654 /* Match non-lazy PLT. */
4655 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4656 non_lazy_plt->plt_got_offset) == 0)
4657 plt_type = plt_non_lazy;
4658 }
4659
4660 if (plt_type == plt_unknown || plt_type == plt_second)
4661 {
4662 if (non_lazy_bnd_plt != NULL
4663 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4664 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4665 non_lazy_bnd_plt->plt_got_offset) == 0))
4666 {
4667 /* Match BND PLT. */
4668 plt_type = plt_second;
4669 non_lazy_plt = non_lazy_bnd_plt;
4670 }
4671 else if (non_lazy_ibt_plt != NULL
4672 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4673 && (memcmp (plt_contents,
4674 non_lazy_ibt_plt->plt_entry,
4675 non_lazy_ibt_plt->plt_got_offset) == 0))
4676 {
4677 /* Match IBT PLT. */
4678 plt_type = plt_second;
4679 non_lazy_plt = non_lazy_ibt_plt;
4680 }
4681 }
4682
4683 if (plt_type == plt_unknown)
4684 {
4685 free (plt_contents);
4686 continue;
4687 }
4688
4689 plts[j].sec = plt;
4690 plts[j].type = plt_type;
4691
4692 if ((plt_type & plt_lazy))
4693 {
4694 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4695 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4696 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4697 /* Skip PLT0 in lazy PLT. */
4698 i = 1;
4699 }
4700 else
4701 {
4702 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4703 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4704 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4705 i = 0;
4706 }
4707
4708 /* Skip lazy PLT when the second PLT is used. */
4709 if (plt_type == (plt_lazy | plt_second))
4710 plts[j].count = 0;
4711 else
4712 {
4713 n = plt->size / plts[j].plt_entry_size;
4714 plts[j].count = n;
4715 count += n - i;
4716 }
4717
4718 plts[j].contents = plt_contents;
4719 }
4720
4721 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4722 (bfd_vma) 0, plts, dynsyms,
4723 ret);
4724 }
4725
4726 /* Handle an x86-64 specific section when reading an object file. This
4727 is called when elfcode.h finds a section with an unknown type. */
4728
4729 static bfd_boolean
4730 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4731 const char *name, int shindex)
4732 {
4733 if (hdr->sh_type != SHT_X86_64_UNWIND)
4734 return FALSE;
4735
4736 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4737 return FALSE;
4738
4739 return TRUE;
4740 }
4741
4742 /* Hook called by the linker routine which adds symbols from an object
4743 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4744 of .bss. */
4745
4746 static bfd_boolean
4747 elf_x86_64_add_symbol_hook (bfd *abfd,
4748 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4749 Elf_Internal_Sym *sym,
4750 const char **namep ATTRIBUTE_UNUSED,
4751 flagword *flagsp ATTRIBUTE_UNUSED,
4752 asection **secp,
4753 bfd_vma *valp)
4754 {
4755 asection *lcomm;
4756
4757 switch (sym->st_shndx)
4758 {
4759 case SHN_X86_64_LCOMMON:
4760 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4761 if (lcomm == NULL)
4762 {
4763 lcomm = bfd_make_section_with_flags (abfd,
4764 "LARGE_COMMON",
4765 (SEC_ALLOC
4766 | SEC_IS_COMMON
4767 | SEC_LINKER_CREATED));
4768 if (lcomm == NULL)
4769 return FALSE;
4770 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4771 }
4772 *secp = lcomm;
4773 *valp = sym->st_size;
4774 return TRUE;
4775 }
4776
4777 return TRUE;
4778 }
4779
4780
4781 /* Given a BFD section, try to locate the corresponding ELF section
4782 index. */
4783
4784 static bfd_boolean
4785 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4786 asection *sec, int *index_return)
4787 {
4788 if (sec == &_bfd_elf_large_com_section)
4789 {
4790 *index_return = SHN_X86_64_LCOMMON;
4791 return TRUE;
4792 }
4793 return FALSE;
4794 }
4795
4796 /* Process a symbol. */
4797
4798 static void
4799 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4800 asymbol *asym)
4801 {
4802 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4803
4804 switch (elfsym->internal_elf_sym.st_shndx)
4805 {
4806 case SHN_X86_64_LCOMMON:
4807 asym->section = &_bfd_elf_large_com_section;
4808 asym->value = elfsym->internal_elf_sym.st_size;
4809 /* Common symbol doesn't set BSF_GLOBAL. */
4810 asym->flags &= ~BSF_GLOBAL;
4811 break;
4812 }
4813 }
4814
4815 static bfd_boolean
4816 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4817 {
4818 return (sym->st_shndx == SHN_COMMON
4819 || sym->st_shndx == SHN_X86_64_LCOMMON);
4820 }
4821
4822 static unsigned int
4823 elf_x86_64_common_section_index (asection *sec)
4824 {
4825 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4826 return SHN_COMMON;
4827 else
4828 return SHN_X86_64_LCOMMON;
4829 }
4830
4831 static asection *
4832 elf_x86_64_common_section (asection *sec)
4833 {
4834 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4835 return bfd_com_section_ptr;
4836 else
4837 return &_bfd_elf_large_com_section;
4838 }
4839
4840 static bfd_boolean
4841 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4842 const Elf_Internal_Sym *sym,
4843 asection **psec,
4844 bfd_boolean newdef,
4845 bfd_boolean olddef,
4846 bfd *oldbfd,
4847 const asection *oldsec)
4848 {
4849 /* A normal common symbol and a large common symbol result in a
4850 normal common symbol. We turn the large common symbol into a
4851 normal one. */
4852 if (!olddef
4853 && h->root.type == bfd_link_hash_common
4854 && !newdef
4855 && bfd_is_com_section (*psec)
4856 && oldsec != *psec)
4857 {
4858 if (sym->st_shndx == SHN_COMMON
4859 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4860 {
4861 h->root.u.c.p->section
4862 = bfd_make_section_old_way (oldbfd, "COMMON");
4863 h->root.u.c.p->section->flags = SEC_ALLOC;
4864 }
4865 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4866 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4867 *psec = bfd_com_section_ptr;
4868 }
4869
4870 return TRUE;
4871 }
4872
4873 static int
4874 elf_x86_64_additional_program_headers (bfd *abfd,
4875 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4876 {
4877 asection *s;
4878 int count = 0;
4879
4880 /* Check to see if we need a large readonly segment. */
4881 s = bfd_get_section_by_name (abfd, ".lrodata");
4882 if (s && (s->flags & SEC_LOAD))
4883 count++;
4884
4885 /* Check to see if we need a large data segment. Since .lbss sections
4886 is placed right after the .bss section, there should be no need for
4887 a large data segment just because of .lbss. */
4888 s = bfd_get_section_by_name (abfd, ".ldata");
4889 if (s && (s->flags & SEC_LOAD))
4890 count++;
4891
4892 return count;
4893 }
4894
4895 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4896
4897 static bfd_boolean
4898 elf_x86_64_relocs_compatible (const bfd_target *input,
4899 const bfd_target *output)
4900 {
4901 return ((xvec_get_elf_backend_data (input)->s->elfclass
4902 == xvec_get_elf_backend_data (output)->s->elfclass)
4903 && _bfd_elf_relocs_compatible (input, output));
4904 }
4905
4906 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4907 with GNU properties if found. Otherwise, return NULL. */
4908
4909 static bfd *
4910 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4911 {
4912 struct elf_x86_init_table init_table;
4913
4914 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4915 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4916 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4917 != (int) R_X86_64_GNU_VTINHERIT)
4918 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4919 != (int) R_X86_64_GNU_VTENTRY))
4920 abort ();
4921
4922 /* This is unused for x86-64. */
4923 init_table.plt0_pad_byte = 0x90;
4924
4925 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4926 {
4927 if (info->bndplt)
4928 {
4929 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4930 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4931 }
4932 else
4933 {
4934 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4935 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4936 }
4937
4938 if (ABI_64_P (info->output_bfd))
4939 {
4940 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4941 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4942 }
4943 else
4944 {
4945 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4946 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4947 }
4948 }
4949 else
4950 {
4951 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4952 init_table.non_lazy_plt = NULL;
4953 init_table.lazy_ibt_plt = NULL;
4954 init_table.non_lazy_ibt_plt = NULL;
4955 }
4956
4957 if (ABI_64_P (info->output_bfd))
4958 {
4959 init_table.r_info = elf64_r_info;
4960 init_table.r_sym = elf64_r_sym;
4961 }
4962 else
4963 {
4964 init_table.r_info = elf32_r_info;
4965 init_table.r_sym = elf32_r_sym;
4966 }
4967
4968 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4969 }
4970
4971 static const struct bfd_elf_special_section
4972 elf_x86_64_special_sections[]=
4973 {
4974 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4975 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4976 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4977 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4978 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4979 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4980 { NULL, 0, 0, 0, 0 }
4981 };
4982
4983 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4984 #define TARGET_LITTLE_NAME "elf64-x86-64"
4985 #define ELF_ARCH bfd_arch_i386
4986 #define ELF_TARGET_ID X86_64_ELF_DATA
4987 #define ELF_MACHINE_CODE EM_X86_64
4988 #if DEFAULT_LD_Z_SEPARATE_CODE
4989 # define ELF_MAXPAGESIZE 0x1000
4990 #else
4991 # define ELF_MAXPAGESIZE 0x200000
4992 #endif
4993 #define ELF_MINPAGESIZE 0x1000
4994 #define ELF_COMMONPAGESIZE 0x1000
4995
4996 #define elf_backend_can_gc_sections 1
4997 #define elf_backend_can_refcount 1
4998 #define elf_backend_want_got_plt 1
4999 #define elf_backend_plt_readonly 1
5000 #define elf_backend_want_plt_sym 0
5001 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5002 #define elf_backend_rela_normal 1
5003 #define elf_backend_plt_alignment 4
5004 #define elf_backend_extern_protected_data 1
5005 #define elf_backend_caches_rawsize 1
5006 #define elf_backend_dtrel_excludes_plt 1
5007 #define elf_backend_want_dynrelro 1
5008
5009 #define elf_info_to_howto elf_x86_64_info_to_howto
5010
5011 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5012 #define bfd_elf64_bfd_reloc_name_lookup \
5013 elf_x86_64_reloc_name_lookup
5014
5015 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5016 #define elf_backend_check_relocs elf_x86_64_check_relocs
5017 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5018 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5019 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5020 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5021 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5022 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5023 #ifdef CORE_HEADER
5024 #define elf_backend_write_core_note elf_x86_64_write_core_note
5025 #endif
5026 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5027 #define elf_backend_relocate_section elf_x86_64_relocate_section
5028 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5029 #define elf_backend_object_p elf64_x86_64_elf_object_p
5030 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5031
5032 #define elf_backend_section_from_shdr \
5033 elf_x86_64_section_from_shdr
5034
5035 #define elf_backend_section_from_bfd_section \
5036 elf_x86_64_elf_section_from_bfd_section
5037 #define elf_backend_add_symbol_hook \
5038 elf_x86_64_add_symbol_hook
5039 #define elf_backend_symbol_processing \
5040 elf_x86_64_symbol_processing
5041 #define elf_backend_common_section_index \
5042 elf_x86_64_common_section_index
5043 #define elf_backend_common_section \
5044 elf_x86_64_common_section
5045 #define elf_backend_common_definition \
5046 elf_x86_64_common_definition
5047 #define elf_backend_merge_symbol \
5048 elf_x86_64_merge_symbol
5049 #define elf_backend_special_sections \
5050 elf_x86_64_special_sections
5051 #define elf_backend_additional_program_headers \
5052 elf_x86_64_additional_program_headers
5053 #define elf_backend_setup_gnu_properties \
5054 elf_x86_64_link_setup_gnu_properties
5055 #define elf_backend_hide_symbol \
5056 _bfd_x86_elf_hide_symbol
5057
5058 #undef elf64_bed
5059 #define elf64_bed elf64_x86_64_bed
5060
5061 #include "elf64-target.h"
5062
5063 /* CloudABI support. */
5064
5065 #undef TARGET_LITTLE_SYM
5066 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5067 #undef TARGET_LITTLE_NAME
5068 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5069
5070 #undef ELF_OSABI
5071 #define ELF_OSABI ELFOSABI_CLOUDABI
5072
5073 #undef elf64_bed
5074 #define elf64_bed elf64_x86_64_cloudabi_bed
5075
5076 #include "elf64-target.h"
5077
5078 /* FreeBSD support. */
5079
5080 #undef TARGET_LITTLE_SYM
5081 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5082 #undef TARGET_LITTLE_NAME
5083 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5084
5085 #undef ELF_OSABI
5086 #define ELF_OSABI ELFOSABI_FREEBSD
5087
5088 #undef elf64_bed
5089 #define elf64_bed elf64_x86_64_fbsd_bed
5090
5091 #include "elf64-target.h"
5092
5093 /* Solaris 2 support. */
5094
5095 #undef TARGET_LITTLE_SYM
5096 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5097 #undef TARGET_LITTLE_NAME
5098 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5099
5100 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5101 {
5102 is_solaris /* os */
5103 };
5104
5105 #undef elf_backend_arch_data
5106 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5107
5108 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5109 objects won't be recognized. */
5110 #undef ELF_OSABI
5111
5112 #undef elf64_bed
5113 #define elf64_bed elf64_x86_64_sol2_bed
5114
5115 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5116 boundary. */
5117 #undef elf_backend_static_tls_alignment
5118 #define elf_backend_static_tls_alignment 16
5119
5120 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5121
5122 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5123 File, p.63. */
5124 #undef elf_backend_want_plt_sym
5125 #define elf_backend_want_plt_sym 1
5126
5127 #undef elf_backend_strtab_flags
5128 #define elf_backend_strtab_flags SHF_STRINGS
5129
5130 static bfd_boolean
5131 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5132 bfd *obfd ATTRIBUTE_UNUSED,
5133 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5134 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5135 {
5136 /* PR 19938: FIXME: Need to add code for setting the sh_info
5137 and sh_link fields of Solaris specific section types. */
5138 return FALSE;
5139 }
5140
5141 #undef elf_backend_copy_special_section_fields
5142 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5143
5144 #include "elf64-target.h"
5145
5146 /* Native Client support. */
5147
5148 static bfd_boolean
5149 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5150 {
5151 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5152 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5153 return TRUE;
5154 }
5155
5156 #undef TARGET_LITTLE_SYM
5157 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5158 #undef TARGET_LITTLE_NAME
5159 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5160 #undef elf64_bed
5161 #define elf64_bed elf64_x86_64_nacl_bed
5162
5163 #undef ELF_MAXPAGESIZE
5164 #undef ELF_MINPAGESIZE
5165 #undef ELF_COMMONPAGESIZE
5166 #define ELF_MAXPAGESIZE 0x10000
5167 #define ELF_MINPAGESIZE 0x10000
5168 #define ELF_COMMONPAGESIZE 0x10000
5169
5170 /* Restore defaults. */
5171 #undef ELF_OSABI
5172 #undef elf_backend_static_tls_alignment
5173 #undef elf_backend_want_plt_sym
5174 #define elf_backend_want_plt_sym 0
5175 #undef elf_backend_strtab_flags
5176 #undef elf_backend_copy_special_section_fields
5177
5178 /* NaCl uses substantially different PLT entries for the same effects. */
5179
5180 #undef elf_backend_plt_alignment
5181 #define elf_backend_plt_alignment 5
5182 #define NACL_PLT_ENTRY_SIZE 64
5183 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5184
5185 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5186 {
5187 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5188 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5189 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5190 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5191 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5192
5193 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5194 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5195
5196 /* 32 bytes of nop to pad out to the standard size. */
5197 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5198 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5199 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5200 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5201 0x66, /* excess data16 prefix */
5202 0x90 /* nop */
5203 };
5204
5205 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5206 {
5207 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5208 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5209 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5210 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5211
5212 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5213 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5214 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5215
5216 /* Lazy GOT entries point here (32-byte aligned). */
5217 0x68, /* pushq immediate */
5218 0, 0, 0, 0, /* replaced with index into relocation table. */
5219 0xe9, /* jmp relative */
5220 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5221
5222 /* 22 bytes of nop to pad out to the standard size. */
5223 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5224 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5225 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5226 };
5227
5228 /* .eh_frame covering the .plt section. */
5229
5230 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5231 {
5232 #if (PLT_CIE_LENGTH != 20 \
5233 || PLT_FDE_LENGTH != 36 \
5234 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5235 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5236 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5237 #endif
5238 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5239 0, 0, 0, 0, /* CIE ID */
5240 1, /* CIE version */
5241 'z', 'R', 0, /* Augmentation string */
5242 1, /* Code alignment factor */
5243 0x78, /* Data alignment factor */
5244 16, /* Return address column */
5245 1, /* Augmentation size */
5246 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5247 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5248 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5249 DW_CFA_nop, DW_CFA_nop,
5250
5251 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5252 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5253 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5254 0, 0, 0, 0, /* .plt size goes here */
5255 0, /* Augmentation size */
5256 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5257 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5258 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5259 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5260 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5261 13, /* Block length */
5262 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5263 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5264 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5265 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5266 DW_CFA_nop, DW_CFA_nop
5267 };
5268
5269 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5270 {
5271 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5272 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5273 elf_x86_64_nacl_plt_entry, /* plt_entry */
5274 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5275 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5276 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5277 2, /* plt_tlsdesc_got1_offset */
5278 9, /* plt_tlsdesc_got2_offset */
5279 6, /* plt_tlsdesc_got1_insn_end */
5280 13, /* plt_tlsdesc_got2_insn_end */
5281 2, /* plt0_got1_offset */
5282 9, /* plt0_got2_offset */
5283 13, /* plt0_got2_insn_end */
5284 3, /* plt_got_offset */
5285 33, /* plt_reloc_offset */
5286 38, /* plt_plt_offset */
5287 7, /* plt_got_insn_size */
5288 42, /* plt_plt_insn_end */
5289 32, /* plt_lazy_offset */
5290 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5291 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5292 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5293 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5294 };
5295
5296 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5297 {
5298 is_nacl /* os */
5299 };
5300
5301 #undef elf_backend_arch_data
5302 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5303
5304 #undef elf_backend_object_p
5305 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5306 #undef elf_backend_modify_segment_map
5307 #define elf_backend_modify_segment_map nacl_modify_segment_map
5308 #undef elf_backend_modify_program_headers
5309 #define elf_backend_modify_program_headers nacl_modify_program_headers
5310 #undef elf_backend_final_write_processing
5311 #define elf_backend_final_write_processing nacl_final_write_processing
5312
5313 #include "elf64-target.h"
5314
5315 /* Native Client x32 support. */
5316
5317 static bfd_boolean
5318 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5319 {
5320 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5321 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5322 return TRUE;
5323 }
5324
5325 #undef TARGET_LITTLE_SYM
5326 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5327 #undef TARGET_LITTLE_NAME
5328 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5329 #undef elf32_bed
5330 #define elf32_bed elf32_x86_64_nacl_bed
5331
5332 #define bfd_elf32_bfd_reloc_type_lookup \
5333 elf_x86_64_reloc_type_lookup
5334 #define bfd_elf32_bfd_reloc_name_lookup \
5335 elf_x86_64_reloc_name_lookup
5336 #define bfd_elf32_get_synthetic_symtab \
5337 elf_x86_64_get_synthetic_symtab
5338
5339 #undef elf_backend_object_p
5340 #define elf_backend_object_p \
5341 elf32_x86_64_nacl_elf_object_p
5342
5343 #undef elf_backend_bfd_from_remote_memory
5344 #define elf_backend_bfd_from_remote_memory \
5345 _bfd_elf32_bfd_from_remote_memory
5346
5347 #undef elf_backend_size_info
5348 #define elf_backend_size_info \
5349 _bfd_elf32_size_info
5350
5351 #undef elf32_bed
5352 #define elf32_bed elf32_x86_64_bed
5353
5354 #include "elf32-target.h"
5355
5356 /* Restore defaults. */
5357 #undef elf_backend_object_p
5358 #define elf_backend_object_p elf64_x86_64_elf_object_p
5359 #undef elf_backend_bfd_from_remote_memory
5360 #undef elf_backend_size_info
5361 #undef elf_backend_modify_segment_map
5362 #undef elf_backend_modify_program_headers
5363 #undef elf_backend_final_write_processing
5364
5365 /* Intel L1OM support. */
5366
5367 static bfd_boolean
5368 elf64_l1om_elf_object_p (bfd *abfd)
5369 {
5370 /* Set the right machine number for an L1OM elf64 file. */
5371 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5372 return TRUE;
5373 }
5374
5375 #undef TARGET_LITTLE_SYM
5376 #define TARGET_LITTLE_SYM l1om_elf64_vec
5377 #undef TARGET_LITTLE_NAME
5378 #define TARGET_LITTLE_NAME "elf64-l1om"
5379 #undef ELF_ARCH
5380 #define ELF_ARCH bfd_arch_l1om
5381
5382 #undef ELF_MACHINE_CODE
5383 #define ELF_MACHINE_CODE EM_L1OM
5384
5385 #undef ELF_OSABI
5386
5387 #undef elf64_bed
5388 #define elf64_bed elf64_l1om_bed
5389
5390 #undef elf_backend_object_p
5391 #define elf_backend_object_p elf64_l1om_elf_object_p
5392
5393 /* Restore defaults. */
5394 #undef ELF_MAXPAGESIZE
5395 #undef ELF_MINPAGESIZE
5396 #undef ELF_COMMONPAGESIZE
5397 #if DEFAULT_LD_Z_SEPARATE_CODE
5398 # define ELF_MAXPAGESIZE 0x1000
5399 #else
5400 # define ELF_MAXPAGESIZE 0x200000
5401 #endif
5402 #define ELF_MINPAGESIZE 0x1000
5403 #define ELF_COMMONPAGESIZE 0x1000
5404 #undef elf_backend_plt_alignment
5405 #define elf_backend_plt_alignment 4
5406 #undef elf_backend_arch_data
5407 #define elf_backend_arch_data &elf_x86_64_arch_bed
5408
5409 #include "elf64-target.h"
5410
5411 /* FreeBSD L1OM support. */
5412
5413 #undef TARGET_LITTLE_SYM
5414 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5415 #undef TARGET_LITTLE_NAME
5416 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5417
5418 #undef ELF_OSABI
5419 #define ELF_OSABI ELFOSABI_FREEBSD
5420
5421 #undef elf64_bed
5422 #define elf64_bed elf64_l1om_fbsd_bed
5423
5424 #include "elf64-target.h"
5425
5426 /* Intel K1OM support. */
5427
5428 static bfd_boolean
5429 elf64_k1om_elf_object_p (bfd *abfd)
5430 {
5431 /* Set the right machine number for an K1OM elf64 file. */
5432 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5433 return TRUE;
5434 }
5435
5436 #undef TARGET_LITTLE_SYM
5437 #define TARGET_LITTLE_SYM k1om_elf64_vec
5438 #undef TARGET_LITTLE_NAME
5439 #define TARGET_LITTLE_NAME "elf64-k1om"
5440 #undef ELF_ARCH
5441 #define ELF_ARCH bfd_arch_k1om
5442
5443 #undef ELF_MACHINE_CODE
5444 #define ELF_MACHINE_CODE EM_K1OM
5445
5446 #undef ELF_OSABI
5447
5448 #undef elf64_bed
5449 #define elf64_bed elf64_k1om_bed
5450
5451 #undef elf_backend_object_p
5452 #define elf_backend_object_p elf64_k1om_elf_object_p
5453
5454 #undef elf_backend_static_tls_alignment
5455
5456 #undef elf_backend_want_plt_sym
5457 #define elf_backend_want_plt_sym 0
5458
5459 #include "elf64-target.h"
5460
5461 /* FreeBSD K1OM support. */
5462
5463 #undef TARGET_LITTLE_SYM
5464 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5465 #undef TARGET_LITTLE_NAME
5466 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5467
5468 #undef ELF_OSABI
5469 #define ELF_OSABI ELFOSABI_FREEBSD
5470
5471 #undef elf64_bed
5472 #define elf64_bed elf64_k1om_fbsd_bed
5473
5474 #include "elf64-target.h"
5475
5476 /* 32bit x86-64 support. */
5477
5478 #undef TARGET_LITTLE_SYM
5479 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5480 #undef TARGET_LITTLE_NAME
5481 #define TARGET_LITTLE_NAME "elf32-x86-64"
5482 #undef elf32_bed
5483
5484 #undef ELF_ARCH
5485 #define ELF_ARCH bfd_arch_i386
5486
5487 #undef ELF_MACHINE_CODE
5488 #define ELF_MACHINE_CODE EM_X86_64
5489
5490 #undef ELF_OSABI
5491
5492 #undef elf_backend_object_p
5493 #define elf_backend_object_p \
5494 elf32_x86_64_elf_object_p
5495
5496 #undef elf_backend_bfd_from_remote_memory
5497 #define elf_backend_bfd_from_remote_memory \
5498 _bfd_elf32_bfd_from_remote_memory
5499
5500 #undef elf_backend_size_info
5501 #define elf_backend_size_info \
5502 _bfd_elf32_size_info
5503
5504 #include "elf32-target.h"
This page took 0.210243 seconds and 5 git commands to generate.