x86: Call rtype_to_howto to get reloc_howto_type pointer
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = _("; recompile with -fPIC");
1426 break;
1427 }
1428
1429 if (!h->def_regular && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = _("; recompile with -fPIC");
1436 }
1437
1438 if (bfd_link_dll (info))
1439 object = _("a shared object");
1440 else if (bfd_link_pie (info))
1441 object = _("a PIE object");
1442 else
1443 object = _("a PDE object");
1444
1445 /* xgettext:c-format */
1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1447 "not be used when making %s%s"),
1448 input_bfd, howto->name, und, v, name,
1449 object, pic);
1450 bfd_set_error (bfd_error_bad_value);
1451 sec->check_relocs_failed = 1;
1452 return FALSE;
1453 }
1454
1455 /* With the local symbol, foo, we convert
1456 mov foo@GOTPCREL(%rip), %reg
1457 to
1458 lea foo(%rip), %reg
1459 and convert
1460 call/jmp *foo@GOTPCREL(%rip)
1461 to
1462 nop call foo/jmp foo nop
1463 When PIC is false, convert
1464 test %reg, foo@GOTPCREL(%rip)
1465 to
1466 test $foo, %reg
1467 and convert
1468 binop foo@GOTPCREL(%rip), %reg
1469 to
1470 binop $foo, %reg
1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1472 instructions. */
1473
1474 static bfd_boolean
1475 elf_x86_64_convert_load_reloc (bfd *abfd,
1476 bfd_byte *contents,
1477 unsigned int *r_type_p,
1478 Elf_Internal_Rela *irel,
1479 struct elf_link_hash_entry *h,
1480 bfd_boolean *converted,
1481 struct bfd_link_info *link_info)
1482 {
1483 struct elf_x86_link_hash_table *htab;
1484 bfd_boolean is_pic;
1485 bfd_boolean no_overflow;
1486 bfd_boolean relocx;
1487 bfd_boolean to_reloc_pc32;
1488 asection *tsec;
1489 bfd_signed_vma raddend;
1490 unsigned int opcode;
1491 unsigned int modrm;
1492 unsigned int r_type = *r_type_p;
1493 unsigned int r_symndx;
1494 bfd_vma roff = irel->r_offset;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return TRUE;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return TRUE;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return TRUE;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 /* Get the symbol referred to by the reloc. */
1539 if (h == NULL)
1540 {
1541 Elf_Internal_Sym *isym
1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1543
1544 /* Skip relocation against undefined symbols. */
1545 if (isym->st_shndx == SHN_UNDEF)
1546 return TRUE;
1547
1548 if (isym->st_shndx == SHN_ABS)
1549 tsec = bfd_abs_section_ptr;
1550 else if (isym->st_shndx == SHN_COMMON)
1551 tsec = bfd_com_section_ptr;
1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1553 tsec = &_bfd_elf_large_com_section;
1554 else
1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1556 }
1557 else
1558 {
1559 /* Undefined weak symbol is only bound locally in executable
1560 and its reference is resolved as 0 without relocation
1561 overflow. We can only perform this optimization for
1562 GOTPCRELX relocations since we need to modify REX byte.
1563 It is OK convert mov with R_X86_64_GOTPCREL to
1564 R_X86_64_PC32. */
1565 bfd_boolean local_ref;
1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1567
1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1570 if ((relocx || opcode == 0x8b)
1571 && (h->root.type == bfd_link_hash_undefweak
1572 && !eh->linker_def
1573 && local_ref))
1574 {
1575 if (opcode == 0xff)
1576 {
1577 /* Skip for branch instructions since R_X86_64_PC32
1578 may overflow. */
1579 if (no_overflow)
1580 return TRUE;
1581 }
1582 else if (relocx)
1583 {
1584 /* For non-branch instructions, we can convert to
1585 R_X86_64_32/R_X86_64_32S since we know if there
1586 is a REX byte. */
1587 to_reloc_pc32 = FALSE;
1588 }
1589
1590 /* Since we don't know the current PC when PIC is true,
1591 we can't convert to R_X86_64_PC32. */
1592 if (to_reloc_pc32 && is_pic)
1593 return TRUE;
1594
1595 goto convert;
1596 }
1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1598 ld.so may use its link-time address. */
1599 else if (h->start_stop
1600 || eh->linker_def
1601 || ((h->def_regular
1602 || h->root.type == bfd_link_hash_defined
1603 || h->root.type == bfd_link_hash_defweak)
1604 && h != htab->elf.hdynamic
1605 && local_ref))
1606 {
1607 /* bfd_link_hash_new or bfd_link_hash_undefined is
1608 set by an assignment in a linker script in
1609 bfd_elf_record_link_assignment. start_stop is set
1610 on __start_SECNAME/__stop_SECNAME which mark section
1611 SECNAME. */
1612 if (h->start_stop
1613 || eh->linker_def
1614 || (h->def_regular
1615 && (h->root.type == bfd_link_hash_new
1616 || h->root.type == bfd_link_hash_undefined
1617 || ((h->root.type == bfd_link_hash_defined
1618 || h->root.type == bfd_link_hash_defweak)
1619 && h->root.u.def.section == bfd_und_section_ptr))))
1620 {
1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1622 if (no_overflow)
1623 return TRUE;
1624 goto convert;
1625 }
1626 tsec = h->root.u.def.section;
1627 }
1628 else
1629 return TRUE;
1630 }
1631
1632 /* Don't convert GOTPCREL relocation against large section. */
1633 if (elf_section_data (tsec) != NULL
1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1635 return TRUE;
1636
1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1638 if (no_overflow)
1639 return TRUE;
1640
1641 convert:
1642 if (opcode == 0xff)
1643 {
1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1645 unsigned int nop;
1646 unsigned int disp;
1647 bfd_vma nop_offset;
1648
1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1650 R_X86_64_PC32. */
1651 modrm = bfd_get_8 (abfd, contents + roff - 1);
1652 if (modrm == 0x25)
1653 {
1654 /* Convert to "jmp foo nop". */
1655 modrm = 0xe9;
1656 nop = NOP_OPCODE;
1657 nop_offset = irel->r_offset + 3;
1658 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1659 irel->r_offset -= 1;
1660 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1661 }
1662 else
1663 {
1664 struct elf_x86_link_hash_entry *eh
1665 = (struct elf_x86_link_hash_entry *) h;
1666
1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1668 is a nop prefix. */
1669 modrm = 0xe8;
1670 /* To support TLS optimization, always use addr32 prefix for
1671 "call *__tls_get_addr@GOTPCREL(%rip)". */
1672 if (eh && eh->tls_get_addr)
1673 {
1674 nop = 0x67;
1675 nop_offset = irel->r_offset - 2;
1676 }
1677 else
1678 {
1679 nop = link_info->call_nop_byte;
1680 if (link_info->call_nop_as_suffix)
1681 {
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 nop_offset = irel->r_offset - 2;
1689 }
1690 }
1691 bfd_put_8 (abfd, nop, contents + nop_offset);
1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1693 r_type = R_X86_64_PC32;
1694 }
1695 else
1696 {
1697 unsigned int rex;
1698 unsigned int rex_mask = REX_R;
1699
1700 if (r_type == R_X86_64_REX_GOTPCRELX)
1701 rex = bfd_get_8 (abfd, contents + roff - 3);
1702 else
1703 rex = 0;
1704
1705 if (opcode == 0x8b)
1706 {
1707 if (to_reloc_pc32)
1708 {
1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1710 "lea foo(%rip), %reg". */
1711 opcode = 0x8d;
1712 r_type = R_X86_64_PC32;
1713 }
1714 else
1715 {
1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1717 "mov $foo, %reg". */
1718 opcode = 0xc7;
1719 modrm = bfd_get_8 (abfd, contents + roff - 1);
1720 modrm = 0xc0 | (modrm & 0x38) >> 3;
1721 if ((rex & REX_W) != 0
1722 && ABI_64_P (link_info->output_bfd))
1723 {
1724 /* Keep the REX_W bit in REX byte for LP64. */
1725 r_type = R_X86_64_32S;
1726 goto rewrite_modrm_rex;
1727 }
1728 else
1729 {
1730 /* If the REX_W bit in REX byte isn't needed,
1731 use R_X86_64_32 and clear the W bit to avoid
1732 sign-extend imm32 to imm64. */
1733 r_type = R_X86_64_32;
1734 /* Clear the W bit in REX byte. */
1735 rex_mask |= REX_W;
1736 goto rewrite_modrm_rex;
1737 }
1738 }
1739 }
1740 else
1741 {
1742 /* R_X86_64_PC32 isn't supported. */
1743 if (to_reloc_pc32)
1744 return TRUE;
1745
1746 modrm = bfd_get_8 (abfd, contents + roff - 1);
1747 if (opcode == 0x85)
1748 {
1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1750 "test $foo, %reg". */
1751 modrm = 0xc0 | (modrm & 0x38) >> 3;
1752 opcode = 0xf7;
1753 }
1754 else
1755 {
1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1757 "binop $foo, %reg". */
1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1759 opcode = 0x81;
1760 }
1761
1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1763 overflow when sign-extending imm32 to imm64. */
1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1765
1766 rewrite_modrm_rex:
1767 bfd_put_8 (abfd, modrm, contents + roff - 1);
1768
1769 if (rex)
1770 {
1771 /* Move the R bit to the B bit in REX byte. */
1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1773 bfd_put_8 (abfd, rex, contents + roff - 3);
1774 }
1775
1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1777 irel->r_addend = 0;
1778 }
1779
1780 bfd_put_8 (abfd, opcode, contents + roff - 2);
1781 }
1782
1783 *r_type_p = r_type;
1784 irel->r_info = htab->r_info (r_symndx,
1785 r_type | R_X86_64_converted_reloc_bit);
1786
1787 *converted = TRUE;
1788
1789 return TRUE;
1790 }
1791
1792 /* Look through the relocs for a section during the first phase, and
1793 calculate needed space in the global offset table, procedure
1794 linkage table, and dynamic reloc sections. */
1795
1796 static bfd_boolean
1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1798 asection *sec,
1799 const Elf_Internal_Rela *relocs)
1800 {
1801 struct elf_x86_link_hash_table *htab;
1802 Elf_Internal_Shdr *symtab_hdr;
1803 struct elf_link_hash_entry **sym_hashes;
1804 const Elf_Internal_Rela *rel;
1805 const Elf_Internal_Rela *rel_end;
1806 asection *sreloc;
1807 bfd_byte *contents;
1808 bfd_boolean converted;
1809
1810 if (bfd_link_relocatable (info))
1811 return TRUE;
1812
1813 /* Don't do anything special with non-loaded, non-alloced sections.
1814 In particular, any relocs in such sections should not affect GOT
1815 and PLT reference counting (ie. we don't allow them to create GOT
1816 or PLT entries), there's no possibility or desire to optimize TLS
1817 relocs, and there's not much point in propagating relocs to shared
1818 libs that the dynamic linker won't relocate. */
1819 if ((sec->flags & SEC_ALLOC) == 0)
1820 return TRUE;
1821
1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1823 if (htab == NULL)
1824 {
1825 sec->check_relocs_failed = 1;
1826 return FALSE;
1827 }
1828
1829 BFD_ASSERT (is_x86_elf (abfd, htab));
1830
1831 /* Get the section contents. */
1832 if (elf_section_data (sec)->this_hdr.contents != NULL)
1833 contents = elf_section_data (sec)->this_hdr.contents;
1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1835 {
1836 sec->check_relocs_failed = 1;
1837 return FALSE;
1838 }
1839
1840 symtab_hdr = &elf_symtab_hdr (abfd);
1841 sym_hashes = elf_sym_hashes (abfd);
1842
1843 converted = FALSE;
1844
1845 sreloc = NULL;
1846
1847 rel_end = relocs + sec->reloc_count;
1848 for (rel = relocs; rel < rel_end; rel++)
1849 {
1850 unsigned int r_type;
1851 unsigned int r_symndx;
1852 struct elf_link_hash_entry *h;
1853 struct elf_x86_link_hash_entry *eh;
1854 Elf_Internal_Sym *isym;
1855 const char *name;
1856 bfd_boolean size_reloc;
1857 bfd_boolean converted_reloc;
1858
1859 r_symndx = htab->r_sym (rel->r_info);
1860 r_type = ELF32_R_TYPE (rel->r_info);
1861
1862 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1863 {
1864 /* xgettext:c-format */
1865 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1866 abfd, r_symndx);
1867 goto error_return;
1868 }
1869
1870 if (r_symndx < symtab_hdr->sh_info)
1871 {
1872 /* A local symbol. */
1873 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1874 abfd, r_symndx);
1875 if (isym == NULL)
1876 goto error_return;
1877
1878 /* Check relocation against local STT_GNU_IFUNC symbol. */
1879 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1880 {
1881 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1882 TRUE);
1883 if (h == NULL)
1884 goto error_return;
1885
1886 /* Fake a STT_GNU_IFUNC symbol. */
1887 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1888 isym, NULL);
1889 h->type = STT_GNU_IFUNC;
1890 h->def_regular = 1;
1891 h->ref_regular = 1;
1892 h->forced_local = 1;
1893 h->root.type = bfd_link_hash_defined;
1894 }
1895 else
1896 h = NULL;
1897 }
1898 else
1899 {
1900 isym = NULL;
1901 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1902 while (h->root.type == bfd_link_hash_indirect
1903 || h->root.type == bfd_link_hash_warning)
1904 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1905 }
1906
1907 /* Check invalid x32 relocations. */
1908 if (!ABI_64_P (abfd))
1909 switch (r_type)
1910 {
1911 default:
1912 break;
1913
1914 case R_X86_64_DTPOFF64:
1915 case R_X86_64_TPOFF64:
1916 case R_X86_64_PC64:
1917 case R_X86_64_GOTOFF64:
1918 case R_X86_64_GOT64:
1919 case R_X86_64_GOTPCREL64:
1920 case R_X86_64_GOTPC64:
1921 case R_X86_64_GOTPLT64:
1922 case R_X86_64_PLTOFF64:
1923 {
1924 if (h)
1925 name = h->root.root.string;
1926 else
1927 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1928 NULL);
1929 _bfd_error_handler
1930 /* xgettext:c-format */
1931 (_("%pB: relocation %s against symbol `%s' isn't "
1932 "supported in x32 mode"), abfd,
1933 x86_64_elf_howto_table[r_type].name, name);
1934 bfd_set_error (bfd_error_bad_value);
1935 goto error_return;
1936 }
1937 break;
1938 }
1939
1940 if (h != NULL)
1941 {
1942 /* It is referenced by a non-shared object. */
1943 h->ref_regular = 1;
1944
1945 if (h->type == STT_GNU_IFUNC)
1946 elf_tdata (info->output_bfd)->has_gnu_symbols
1947 |= elf_gnu_symbol_ifunc;
1948 }
1949
1950 converted_reloc = FALSE;
1951 if ((r_type == R_X86_64_GOTPCREL
1952 || r_type == R_X86_64_GOTPCRELX
1953 || r_type == R_X86_64_REX_GOTPCRELX)
1954 && (h == NULL || h->type != STT_GNU_IFUNC))
1955 {
1956 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1957 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1958 irel, h, &converted_reloc,
1959 info))
1960 goto error_return;
1961
1962 if (converted_reloc)
1963 converted = TRUE;
1964 }
1965
1966 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1967 symtab_hdr, sym_hashes,
1968 &r_type, GOT_UNKNOWN,
1969 rel, rel_end, h, r_symndx, FALSE))
1970 goto error_return;
1971
1972 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1973 if (h == htab->elf.hgot)
1974 htab->got_referenced = TRUE;
1975
1976 eh = (struct elf_x86_link_hash_entry *) h;
1977 switch (r_type)
1978 {
1979 case R_X86_64_TLSLD:
1980 htab->tls_ld_or_ldm_got.refcount = 1;
1981 goto create_got;
1982
1983 case R_X86_64_TPOFF32:
1984 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1985 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1986 &x86_64_elf_howto_table[r_type]);
1987 if (eh != NULL)
1988 eh->zero_undefweak &= 0x2;
1989 break;
1990
1991 case R_X86_64_GOTTPOFF:
1992 if (!bfd_link_executable (info))
1993 info->flags |= DF_STATIC_TLS;
1994 /* Fall through */
1995
1996 case R_X86_64_GOT32:
1997 case R_X86_64_GOTPCREL:
1998 case R_X86_64_GOTPCRELX:
1999 case R_X86_64_REX_GOTPCRELX:
2000 case R_X86_64_TLSGD:
2001 case R_X86_64_GOT64:
2002 case R_X86_64_GOTPCREL64:
2003 case R_X86_64_GOTPLT64:
2004 case R_X86_64_GOTPC32_TLSDESC:
2005 case R_X86_64_TLSDESC_CALL:
2006 /* This symbol requires a global offset table entry. */
2007 {
2008 int tls_type, old_tls_type;
2009
2010 switch (r_type)
2011 {
2012 default: tls_type = GOT_NORMAL; break;
2013 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2014 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2015 case R_X86_64_GOTPC32_TLSDESC:
2016 case R_X86_64_TLSDESC_CALL:
2017 tls_type = GOT_TLS_GDESC; break;
2018 }
2019
2020 if (h != NULL)
2021 {
2022 h->got.refcount = 1;
2023 old_tls_type = eh->tls_type;
2024 }
2025 else
2026 {
2027 bfd_signed_vma *local_got_refcounts;
2028
2029 /* This is a global offset table entry for a local symbol. */
2030 local_got_refcounts = elf_local_got_refcounts (abfd);
2031 if (local_got_refcounts == NULL)
2032 {
2033 bfd_size_type size;
2034
2035 size = symtab_hdr->sh_info;
2036 size *= sizeof (bfd_signed_vma)
2037 + sizeof (bfd_vma) + sizeof (char);
2038 local_got_refcounts = ((bfd_signed_vma *)
2039 bfd_zalloc (abfd, size));
2040 if (local_got_refcounts == NULL)
2041 goto error_return;
2042 elf_local_got_refcounts (abfd) = local_got_refcounts;
2043 elf_x86_local_tlsdesc_gotent (abfd)
2044 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2045 elf_x86_local_got_tls_type (abfd)
2046 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2047 }
2048 local_got_refcounts[r_symndx] = 1;
2049 old_tls_type
2050 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2051 }
2052
2053 /* If a TLS symbol is accessed using IE at least once,
2054 there is no point to use dynamic model for it. */
2055 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2056 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2057 || tls_type != GOT_TLS_IE))
2058 {
2059 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2060 tls_type = old_tls_type;
2061 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2062 && GOT_TLS_GD_ANY_P (tls_type))
2063 tls_type |= old_tls_type;
2064 else
2065 {
2066 if (h)
2067 name = h->root.root.string;
2068 else
2069 name = bfd_elf_sym_name (abfd, symtab_hdr,
2070 isym, NULL);
2071 _bfd_error_handler
2072 /* xgettext:c-format */
2073 (_("%pB: '%s' accessed both as normal and"
2074 " thread local symbol"),
2075 abfd, name);
2076 bfd_set_error (bfd_error_bad_value);
2077 goto error_return;
2078 }
2079 }
2080
2081 if (old_tls_type != tls_type)
2082 {
2083 if (eh != NULL)
2084 eh->tls_type = tls_type;
2085 else
2086 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2087 }
2088 }
2089 /* Fall through */
2090
2091 case R_X86_64_GOTOFF64:
2092 case R_X86_64_GOTPC32:
2093 case R_X86_64_GOTPC64:
2094 create_got:
2095 if (eh != NULL)
2096 eh->zero_undefweak &= 0x2;
2097 break;
2098
2099 case R_X86_64_PLT32:
2100 case R_X86_64_PLT32_BND:
2101 /* This symbol requires a procedure linkage table entry. We
2102 actually build the entry in adjust_dynamic_symbol,
2103 because this might be a case of linking PIC code which is
2104 never referenced by a dynamic object, in which case we
2105 don't need to generate a procedure linkage table entry
2106 after all. */
2107
2108 /* If this is a local symbol, we resolve it directly without
2109 creating a procedure linkage table entry. */
2110 if (h == NULL)
2111 continue;
2112
2113 eh->zero_undefweak &= 0x2;
2114 h->needs_plt = 1;
2115 h->plt.refcount = 1;
2116 break;
2117
2118 case R_X86_64_PLTOFF64:
2119 /* This tries to form the 'address' of a function relative
2120 to GOT. For global symbols we need a PLT entry. */
2121 if (h != NULL)
2122 {
2123 h->needs_plt = 1;
2124 h->plt.refcount = 1;
2125 }
2126 goto create_got;
2127
2128 case R_X86_64_SIZE32:
2129 case R_X86_64_SIZE64:
2130 size_reloc = TRUE;
2131 goto do_size;
2132
2133 case R_X86_64_32:
2134 if (!ABI_64_P (abfd))
2135 goto pointer;
2136 /* Fall through. */
2137 case R_X86_64_8:
2138 case R_X86_64_16:
2139 case R_X86_64_32S:
2140 /* Check relocation overflow as these relocs may lead to
2141 run-time relocation overflow. Don't error out for
2142 sections we don't care about, such as debug sections or
2143 when relocation overflow check is disabled. */
2144 if (!info->no_reloc_overflow_check
2145 && !converted_reloc
2146 && (bfd_link_pic (info)
2147 || (bfd_link_executable (info)
2148 && h != NULL
2149 && !h->def_regular
2150 && h->def_dynamic
2151 && (sec->flags & SEC_READONLY) == 0)))
2152 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2153 &x86_64_elf_howto_table[r_type]);
2154 /* Fall through. */
2155
2156 case R_X86_64_PC8:
2157 case R_X86_64_PC16:
2158 case R_X86_64_PC32:
2159 case R_X86_64_PC32_BND:
2160 case R_X86_64_PC64:
2161 case R_X86_64_64:
2162 pointer:
2163 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2164 eh->zero_undefweak |= 0x2;
2165 /* We are called after all symbols have been resolved. Only
2166 relocation against STT_GNU_IFUNC symbol must go through
2167 PLT. */
2168 if (h != NULL
2169 && (bfd_link_executable (info)
2170 || h->type == STT_GNU_IFUNC))
2171 {
2172 bfd_boolean func_pointer_ref = FALSE;
2173
2174 if (r_type == R_X86_64_PC32)
2175 {
2176 /* Since something like ".long foo - ." may be used
2177 as pointer, make sure that PLT is used if foo is
2178 a function defined in a shared library. */
2179 if ((sec->flags & SEC_CODE) == 0)
2180 {
2181 h->pointer_equality_needed = 1;
2182 if (bfd_link_pie (info)
2183 && h->type == STT_FUNC
2184 && !h->def_regular
2185 && h->def_dynamic)
2186 {
2187 h->needs_plt = 1;
2188 h->plt.refcount = 1;
2189 }
2190 }
2191 }
2192 else if (r_type != R_X86_64_PC32_BND
2193 && r_type != R_X86_64_PC64)
2194 {
2195 h->pointer_equality_needed = 1;
2196 /* At run-time, R_X86_64_64 can be resolved for both
2197 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2198 can only be resolved for x32. */
2199 if ((sec->flags & SEC_READONLY) == 0
2200 && (r_type == R_X86_64_64
2201 || (!ABI_64_P (abfd)
2202 && (r_type == R_X86_64_32
2203 || r_type == R_X86_64_32S))))
2204 func_pointer_ref = TRUE;
2205 }
2206
2207 if (!func_pointer_ref)
2208 {
2209 /* If this reloc is in a read-only section, we might
2210 need a copy reloc. We can't check reliably at this
2211 stage whether the section is read-only, as input
2212 sections have not yet been mapped to output sections.
2213 Tentatively set the flag for now, and correct in
2214 adjust_dynamic_symbol. */
2215 h->non_got_ref = 1;
2216
2217 /* We may need a .plt entry if the symbol is a function
2218 defined in a shared lib or is a function referenced
2219 from the code or read-only section. */
2220 if (!h->def_regular
2221 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2222 h->plt.refcount = 1;
2223 }
2224 }
2225
2226 size_reloc = FALSE;
2227 do_size:
2228 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2229 htab->pointer_r_type))
2230 {
2231 struct elf_dyn_relocs *p;
2232 struct elf_dyn_relocs **head;
2233
2234 /* We must copy these reloc types into the output file.
2235 Create a reloc section in dynobj and make room for
2236 this reloc. */
2237 if (sreloc == NULL)
2238 {
2239 sreloc = _bfd_elf_make_dynamic_reloc_section
2240 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2241 abfd, /*rela?*/ TRUE);
2242
2243 if (sreloc == NULL)
2244 goto error_return;
2245 }
2246
2247 /* If this is a global symbol, we count the number of
2248 relocations we need for this symbol. */
2249 if (h != NULL)
2250 head = &eh->dyn_relocs;
2251 else
2252 {
2253 /* Track dynamic relocs needed for local syms too.
2254 We really need local syms available to do this
2255 easily. Oh well. */
2256 asection *s;
2257 void **vpp;
2258
2259 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2260 abfd, r_symndx);
2261 if (isym == NULL)
2262 goto error_return;
2263
2264 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2265 if (s == NULL)
2266 s = sec;
2267
2268 /* Beware of type punned pointers vs strict aliasing
2269 rules. */
2270 vpp = &(elf_section_data (s)->local_dynrel);
2271 head = (struct elf_dyn_relocs **)vpp;
2272 }
2273
2274 p = *head;
2275 if (p == NULL || p->sec != sec)
2276 {
2277 bfd_size_type amt = sizeof *p;
2278
2279 p = ((struct elf_dyn_relocs *)
2280 bfd_alloc (htab->elf.dynobj, amt));
2281 if (p == NULL)
2282 goto error_return;
2283 p->next = *head;
2284 *head = p;
2285 p->sec = sec;
2286 p->count = 0;
2287 p->pc_count = 0;
2288 }
2289
2290 p->count += 1;
2291 /* Count size relocation as PC-relative relocation. */
2292 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2293 p->pc_count += 1;
2294 }
2295 break;
2296
2297 /* This relocation describes the C++ object vtable hierarchy.
2298 Reconstruct it for later use during GC. */
2299 case R_X86_64_GNU_VTINHERIT:
2300 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2301 goto error_return;
2302 break;
2303
2304 /* This relocation describes which C++ vtable entries are actually
2305 used. Record for later use during GC. */
2306 case R_X86_64_GNU_VTENTRY:
2307 BFD_ASSERT (h != NULL);
2308 if (h != NULL
2309 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2310 goto error_return;
2311 break;
2312
2313 default:
2314 break;
2315 }
2316 }
2317
2318 if (elf_section_data (sec)->this_hdr.contents != contents)
2319 {
2320 if (!converted && !info->keep_memory)
2321 free (contents);
2322 else
2323 {
2324 /* Cache the section contents for elf_link_input_bfd if any
2325 load is converted or --no-keep-memory isn't used. */
2326 elf_section_data (sec)->this_hdr.contents = contents;
2327 }
2328 }
2329
2330 /* Cache relocations if any load is converted. */
2331 if (elf_section_data (sec)->relocs != relocs && converted)
2332 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2333
2334 return TRUE;
2335
2336 error_return:
2337 if (elf_section_data (sec)->this_hdr.contents != contents)
2338 free (contents);
2339 sec->check_relocs_failed = 1;
2340 return FALSE;
2341 }
2342
2343 /* Return the relocation value for @tpoff relocation
2344 if STT_TLS virtual address is ADDRESS. */
2345
2346 static bfd_vma
2347 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2348 {
2349 struct elf_link_hash_table *htab = elf_hash_table (info);
2350 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2351 bfd_vma static_tls_size;
2352
2353 /* If tls_segment is NULL, we should have signalled an error already. */
2354 if (htab->tls_sec == NULL)
2355 return 0;
2356
2357 /* Consider special static TLS alignment requirements. */
2358 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2359 return address - static_tls_size - htab->tls_sec->vma;
2360 }
2361
2362 /* Relocate an x86_64 ELF section. */
2363
2364 static bfd_boolean
2365 elf_x86_64_relocate_section (bfd *output_bfd,
2366 struct bfd_link_info *info,
2367 bfd *input_bfd,
2368 asection *input_section,
2369 bfd_byte *contents,
2370 Elf_Internal_Rela *relocs,
2371 Elf_Internal_Sym *local_syms,
2372 asection **local_sections)
2373 {
2374 struct elf_x86_link_hash_table *htab;
2375 Elf_Internal_Shdr *symtab_hdr;
2376 struct elf_link_hash_entry **sym_hashes;
2377 bfd_vma *local_got_offsets;
2378 bfd_vma *local_tlsdesc_gotents;
2379 Elf_Internal_Rela *rel;
2380 Elf_Internal_Rela *wrel;
2381 Elf_Internal_Rela *relend;
2382 unsigned int plt_entry_size;
2383
2384 /* Skip if check_relocs failed. */
2385 if (input_section->check_relocs_failed)
2386 return FALSE;
2387
2388 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2389 if (htab == NULL)
2390 return FALSE;
2391
2392 if (!is_x86_elf (input_bfd, htab))
2393 {
2394 bfd_set_error (bfd_error_wrong_format);
2395 return FALSE;
2396 }
2397
2398 plt_entry_size = htab->plt.plt_entry_size;
2399 symtab_hdr = &elf_symtab_hdr (input_bfd);
2400 sym_hashes = elf_sym_hashes (input_bfd);
2401 local_got_offsets = elf_local_got_offsets (input_bfd);
2402 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2403
2404 _bfd_x86_elf_set_tls_module_base (info);
2405
2406 rel = wrel = relocs;
2407 relend = relocs + input_section->reloc_count;
2408 for (; rel < relend; wrel++, rel++)
2409 {
2410 unsigned int r_type, r_type_tls;
2411 reloc_howto_type *howto;
2412 unsigned long r_symndx;
2413 struct elf_link_hash_entry *h;
2414 struct elf_x86_link_hash_entry *eh;
2415 Elf_Internal_Sym *sym;
2416 asection *sec;
2417 bfd_vma off, offplt, plt_offset;
2418 bfd_vma relocation;
2419 bfd_boolean unresolved_reloc;
2420 bfd_reloc_status_type r;
2421 int tls_type;
2422 asection *base_got, *resolved_plt;
2423 bfd_vma st_size;
2424 bfd_boolean resolved_to_zero;
2425 bfd_boolean relative_reloc;
2426 bfd_boolean converted_reloc;
2427 bfd_boolean need_copy_reloc_in_pie;
2428
2429 r_type = ELF32_R_TYPE (rel->r_info);
2430 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2431 || r_type == (int) R_X86_64_GNU_VTENTRY)
2432 {
2433 if (wrel != rel)
2434 *wrel = *rel;
2435 continue;
2436 }
2437
2438 r_symndx = htab->r_sym (rel->r_info);
2439 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2440 if (converted_reloc)
2441 {
2442 r_type &= ~R_X86_64_converted_reloc_bit;
2443 rel->r_info = htab->r_info (r_symndx, r_type);
2444 }
2445
2446 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2447 if (howto == NULL)
2448 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2449
2450 h = NULL;
2451 sym = NULL;
2452 sec = NULL;
2453 unresolved_reloc = FALSE;
2454 if (r_symndx < symtab_hdr->sh_info)
2455 {
2456 sym = local_syms + r_symndx;
2457 sec = local_sections[r_symndx];
2458
2459 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2460 &sec, rel);
2461 st_size = sym->st_size;
2462
2463 /* Relocate against local STT_GNU_IFUNC symbol. */
2464 if (!bfd_link_relocatable (info)
2465 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2466 {
2467 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2468 rel, FALSE);
2469 if (h == NULL)
2470 abort ();
2471
2472 /* Set STT_GNU_IFUNC symbol value. */
2473 h->root.u.def.value = sym->st_value;
2474 h->root.u.def.section = sec;
2475 }
2476 }
2477 else
2478 {
2479 bfd_boolean warned ATTRIBUTE_UNUSED;
2480 bfd_boolean ignored ATTRIBUTE_UNUSED;
2481
2482 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2483 r_symndx, symtab_hdr, sym_hashes,
2484 h, sec, relocation,
2485 unresolved_reloc, warned, ignored);
2486 st_size = h->size;
2487 }
2488
2489 if (sec != NULL && discarded_section (sec))
2490 {
2491 _bfd_clear_contents (howto, input_bfd, input_section,
2492 contents, rel->r_offset);
2493 wrel->r_offset = rel->r_offset;
2494 wrel->r_info = 0;
2495 wrel->r_addend = 0;
2496
2497 /* For ld -r, remove relocations in debug sections against
2498 sections defined in discarded sections. Not done for
2499 eh_frame editing code expects to be present. */
2500 if (bfd_link_relocatable (info)
2501 && (input_section->flags & SEC_DEBUGGING))
2502 wrel--;
2503
2504 continue;
2505 }
2506
2507 if (bfd_link_relocatable (info))
2508 {
2509 if (wrel != rel)
2510 *wrel = *rel;
2511 continue;
2512 }
2513
2514 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2515 {
2516 if (r_type == R_X86_64_64)
2517 {
2518 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2519 zero-extend it to 64bit if addend is zero. */
2520 r_type = R_X86_64_32;
2521 memset (contents + rel->r_offset + 4, 0, 4);
2522 }
2523 else if (r_type == R_X86_64_SIZE64)
2524 {
2525 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2526 zero-extend it to 64bit if addend is zero. */
2527 r_type = R_X86_64_SIZE32;
2528 memset (contents + rel->r_offset + 4, 0, 4);
2529 }
2530 }
2531
2532 eh = (struct elf_x86_link_hash_entry *) h;
2533
2534 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2535 it here if it is defined in a non-shared object. */
2536 if (h != NULL
2537 && h->type == STT_GNU_IFUNC
2538 && h->def_regular)
2539 {
2540 bfd_vma plt_index;
2541 const char *name;
2542
2543 if ((input_section->flags & SEC_ALLOC) == 0)
2544 {
2545 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2546 STT_GNU_IFUNC symbol as STT_FUNC. */
2547 if (elf_section_type (input_section) == SHT_NOTE)
2548 goto skip_ifunc;
2549 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2550 sections because such sections are not SEC_ALLOC and
2551 thus ld.so will not process them. */
2552 if ((input_section->flags & SEC_DEBUGGING) != 0)
2553 continue;
2554 abort ();
2555 }
2556
2557 switch (r_type)
2558 {
2559 default:
2560 break;
2561
2562 case R_X86_64_GOTPCREL:
2563 case R_X86_64_GOTPCRELX:
2564 case R_X86_64_REX_GOTPCRELX:
2565 case R_X86_64_GOTPCREL64:
2566 base_got = htab->elf.sgot;
2567 off = h->got.offset;
2568
2569 if (base_got == NULL)
2570 abort ();
2571
2572 if (off == (bfd_vma) -1)
2573 {
2574 /* We can't use h->got.offset here to save state, or
2575 even just remember the offset, as finish_dynamic_symbol
2576 would use that as offset into .got. */
2577
2578 if (h->plt.offset == (bfd_vma) -1)
2579 abort ();
2580
2581 if (htab->elf.splt != NULL)
2582 {
2583 plt_index = (h->plt.offset / plt_entry_size
2584 - htab->plt.has_plt0);
2585 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2586 base_got = htab->elf.sgotplt;
2587 }
2588 else
2589 {
2590 plt_index = h->plt.offset / plt_entry_size;
2591 off = plt_index * GOT_ENTRY_SIZE;
2592 base_got = htab->elf.igotplt;
2593 }
2594
2595 if (h->dynindx == -1
2596 || h->forced_local
2597 || info->symbolic)
2598 {
2599 /* This references the local defitionion. We must
2600 initialize this entry in the global offset table.
2601 Since the offset must always be a multiple of 8,
2602 we use the least significant bit to record
2603 whether we have initialized it already.
2604
2605 When doing a dynamic link, we create a .rela.got
2606 relocation entry to initialize the value. This
2607 is done in the finish_dynamic_symbol routine. */
2608 if ((off & 1) != 0)
2609 off &= ~1;
2610 else
2611 {
2612 bfd_put_64 (output_bfd, relocation,
2613 base_got->contents + off);
2614 /* Note that this is harmless for the GOTPLT64
2615 case, as -1 | 1 still is -1. */
2616 h->got.offset |= 1;
2617 }
2618 }
2619 }
2620
2621 relocation = (base_got->output_section->vma
2622 + base_got->output_offset + off);
2623
2624 goto do_relocation;
2625 }
2626
2627 if (h->plt.offset == (bfd_vma) -1)
2628 {
2629 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2630 if (r_type == htab->pointer_r_type
2631 && (input_section->flags & SEC_CODE) == 0)
2632 goto do_ifunc_pointer;
2633 goto bad_ifunc_reloc;
2634 }
2635
2636 /* STT_GNU_IFUNC symbol must go through PLT. */
2637 if (htab->elf.splt != NULL)
2638 {
2639 if (htab->plt_second != NULL)
2640 {
2641 resolved_plt = htab->plt_second;
2642 plt_offset = eh->plt_second.offset;
2643 }
2644 else
2645 {
2646 resolved_plt = htab->elf.splt;
2647 plt_offset = h->plt.offset;
2648 }
2649 }
2650 else
2651 {
2652 resolved_plt = htab->elf.iplt;
2653 plt_offset = h->plt.offset;
2654 }
2655
2656 relocation = (resolved_plt->output_section->vma
2657 + resolved_plt->output_offset + plt_offset);
2658
2659 switch (r_type)
2660 {
2661 default:
2662 bad_ifunc_reloc:
2663 if (h->root.root.string)
2664 name = h->root.root.string;
2665 else
2666 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2667 NULL);
2668 _bfd_error_handler
2669 /* xgettext:c-format */
2670 (_("%pB: relocation %s against STT_GNU_IFUNC "
2671 "symbol `%s' isn't supported"), input_bfd,
2672 howto->name, name);
2673 bfd_set_error (bfd_error_bad_value);
2674 return FALSE;
2675
2676 case R_X86_64_32S:
2677 if (bfd_link_pic (info))
2678 abort ();
2679 goto do_relocation;
2680
2681 case R_X86_64_32:
2682 if (ABI_64_P (output_bfd))
2683 goto do_relocation;
2684 /* FALLTHROUGH */
2685 case R_X86_64_64:
2686 do_ifunc_pointer:
2687 if (rel->r_addend != 0)
2688 {
2689 if (h->root.root.string)
2690 name = h->root.root.string;
2691 else
2692 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2693 sym, NULL);
2694 _bfd_error_handler
2695 /* xgettext:c-format */
2696 (_("%pB: relocation %s against STT_GNU_IFUNC "
2697 "symbol `%s' has non-zero addend: %" PRId64),
2698 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2699 bfd_set_error (bfd_error_bad_value);
2700 return FALSE;
2701 }
2702
2703 /* Generate dynamic relcoation only when there is a
2704 non-GOT reference in a shared object or there is no
2705 PLT. */
2706 if ((bfd_link_pic (info) && h->non_got_ref)
2707 || h->plt.offset == (bfd_vma) -1)
2708 {
2709 Elf_Internal_Rela outrel;
2710 asection *sreloc;
2711
2712 /* Need a dynamic relocation to get the real function
2713 address. */
2714 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2715 info,
2716 input_section,
2717 rel->r_offset);
2718 if (outrel.r_offset == (bfd_vma) -1
2719 || outrel.r_offset == (bfd_vma) -2)
2720 abort ();
2721
2722 outrel.r_offset += (input_section->output_section->vma
2723 + input_section->output_offset);
2724
2725 if (POINTER_LOCAL_IFUNC_P (info, h))
2726 {
2727 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2728 h->root.root.string,
2729 h->root.u.def.section->owner);
2730
2731 /* This symbol is resolved locally. */
2732 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2733 outrel.r_addend = (h->root.u.def.value
2734 + h->root.u.def.section->output_section->vma
2735 + h->root.u.def.section->output_offset);
2736 }
2737 else
2738 {
2739 outrel.r_info = htab->r_info (h->dynindx, r_type);
2740 outrel.r_addend = 0;
2741 }
2742
2743 /* Dynamic relocations are stored in
2744 1. .rela.ifunc section in PIC object.
2745 2. .rela.got section in dynamic executable.
2746 3. .rela.iplt section in static executable. */
2747 if (bfd_link_pic (info))
2748 sreloc = htab->elf.irelifunc;
2749 else if (htab->elf.splt != NULL)
2750 sreloc = htab->elf.srelgot;
2751 else
2752 sreloc = htab->elf.irelplt;
2753 elf_append_rela (output_bfd, sreloc, &outrel);
2754
2755 /* If this reloc is against an external symbol, we
2756 do not want to fiddle with the addend. Otherwise,
2757 we need to include the symbol value so that it
2758 becomes an addend for the dynamic reloc. For an
2759 internal symbol, we have updated addend. */
2760 continue;
2761 }
2762 /* FALLTHROUGH */
2763 case R_X86_64_PC32:
2764 case R_X86_64_PC32_BND:
2765 case R_X86_64_PC64:
2766 case R_X86_64_PLT32:
2767 case R_X86_64_PLT32_BND:
2768 goto do_relocation;
2769 }
2770 }
2771
2772 skip_ifunc:
2773 resolved_to_zero = (eh != NULL
2774 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2775
2776 /* When generating a shared object, the relocations handled here are
2777 copied into the output file to be resolved at run time. */
2778 switch (r_type)
2779 {
2780 case R_X86_64_GOT32:
2781 case R_X86_64_GOT64:
2782 /* Relocation is to the entry for this symbol in the global
2783 offset table. */
2784 case R_X86_64_GOTPCREL:
2785 case R_X86_64_GOTPCRELX:
2786 case R_X86_64_REX_GOTPCRELX:
2787 case R_X86_64_GOTPCREL64:
2788 /* Use global offset table entry as symbol value. */
2789 case R_X86_64_GOTPLT64:
2790 /* This is obsolete and treated the same as GOT64. */
2791 base_got = htab->elf.sgot;
2792
2793 if (htab->elf.sgot == NULL)
2794 abort ();
2795
2796 relative_reloc = FALSE;
2797 if (h != NULL)
2798 {
2799 off = h->got.offset;
2800 if (h->needs_plt
2801 && h->plt.offset != (bfd_vma)-1
2802 && off == (bfd_vma)-1)
2803 {
2804 /* We can't use h->got.offset here to save
2805 state, or even just remember the offset, as
2806 finish_dynamic_symbol would use that as offset into
2807 .got. */
2808 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2809 - htab->plt.has_plt0);
2810 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2811 base_got = htab->elf.sgotplt;
2812 }
2813
2814 if (RESOLVED_LOCALLY_P (info, h, htab))
2815 {
2816 /* We must initialize this entry in the global offset
2817 table. Since the offset must always be a multiple
2818 of 8, we use the least significant bit to record
2819 whether we have initialized it already.
2820
2821 When doing a dynamic link, we create a .rela.got
2822 relocation entry to initialize the value. This is
2823 done in the finish_dynamic_symbol routine. */
2824 if ((off & 1) != 0)
2825 off &= ~1;
2826 else
2827 {
2828 bfd_put_64 (output_bfd, relocation,
2829 base_got->contents + off);
2830 /* Note that this is harmless for the GOTPLT64 case,
2831 as -1 | 1 still is -1. */
2832 h->got.offset |= 1;
2833
2834 if (GENERATE_RELATIVE_RELOC_P (info, h))
2835 {
2836 /* If this symbol isn't dynamic in PIC,
2837 generate R_X86_64_RELATIVE here. */
2838 eh->no_finish_dynamic_symbol = 1;
2839 relative_reloc = TRUE;
2840 }
2841 }
2842 }
2843 else
2844 unresolved_reloc = FALSE;
2845 }
2846 else
2847 {
2848 if (local_got_offsets == NULL)
2849 abort ();
2850
2851 off = local_got_offsets[r_symndx];
2852
2853 /* The offset must always be a multiple of 8. We use
2854 the least significant bit to record whether we have
2855 already generated the necessary reloc. */
2856 if ((off & 1) != 0)
2857 off &= ~1;
2858 else
2859 {
2860 bfd_put_64 (output_bfd, relocation,
2861 base_got->contents + off);
2862 local_got_offsets[r_symndx] |= 1;
2863
2864 if (bfd_link_pic (info))
2865 relative_reloc = TRUE;
2866 }
2867 }
2868
2869 if (relative_reloc)
2870 {
2871 asection *s;
2872 Elf_Internal_Rela outrel;
2873
2874 /* We need to generate a R_X86_64_RELATIVE reloc
2875 for the dynamic linker. */
2876 s = htab->elf.srelgot;
2877 if (s == NULL)
2878 abort ();
2879
2880 outrel.r_offset = (base_got->output_section->vma
2881 + base_got->output_offset
2882 + off);
2883 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2884 outrel.r_addend = relocation;
2885 elf_append_rela (output_bfd, s, &outrel);
2886 }
2887
2888 if (off >= (bfd_vma) -2)
2889 abort ();
2890
2891 relocation = base_got->output_section->vma
2892 + base_got->output_offset + off;
2893 if (r_type != R_X86_64_GOTPCREL
2894 && r_type != R_X86_64_GOTPCRELX
2895 && r_type != R_X86_64_REX_GOTPCRELX
2896 && r_type != R_X86_64_GOTPCREL64)
2897 relocation -= htab->elf.sgotplt->output_section->vma
2898 - htab->elf.sgotplt->output_offset;
2899
2900 break;
2901
2902 case R_X86_64_GOTOFF64:
2903 /* Relocation is relative to the start of the global offset
2904 table. */
2905
2906 /* Check to make sure it isn't a protected function or data
2907 symbol for shared library since it may not be local when
2908 used as function address or with copy relocation. We also
2909 need to make sure that a symbol is referenced locally. */
2910 if (bfd_link_pic (info) && h)
2911 {
2912 if (!h->def_regular)
2913 {
2914 const char *v;
2915
2916 switch (ELF_ST_VISIBILITY (h->other))
2917 {
2918 case STV_HIDDEN:
2919 v = _("hidden symbol");
2920 break;
2921 case STV_INTERNAL:
2922 v = _("internal symbol");
2923 break;
2924 case STV_PROTECTED:
2925 v = _("protected symbol");
2926 break;
2927 default:
2928 v = _("symbol");
2929 break;
2930 }
2931
2932 _bfd_error_handler
2933 /* xgettext:c-format */
2934 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2935 " `%s' can not be used when making a shared object"),
2936 input_bfd, v, h->root.root.string);
2937 bfd_set_error (bfd_error_bad_value);
2938 return FALSE;
2939 }
2940 else if (!bfd_link_executable (info)
2941 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2942 && (h->type == STT_FUNC
2943 || h->type == STT_OBJECT)
2944 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2945 {
2946 _bfd_error_handler
2947 /* xgettext:c-format */
2948 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2949 " `%s' can not be used when making a shared object"),
2950 input_bfd,
2951 h->type == STT_FUNC ? "function" : "data",
2952 h->root.root.string);
2953 bfd_set_error (bfd_error_bad_value);
2954 return FALSE;
2955 }
2956 }
2957
2958 /* Note that sgot is not involved in this
2959 calculation. We always want the start of .got.plt. If we
2960 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2961 permitted by the ABI, we might have to change this
2962 calculation. */
2963 relocation -= htab->elf.sgotplt->output_section->vma
2964 + htab->elf.sgotplt->output_offset;
2965 break;
2966
2967 case R_X86_64_GOTPC32:
2968 case R_X86_64_GOTPC64:
2969 /* Use global offset table as symbol value. */
2970 relocation = htab->elf.sgotplt->output_section->vma
2971 + htab->elf.sgotplt->output_offset;
2972 unresolved_reloc = FALSE;
2973 break;
2974
2975 case R_X86_64_PLTOFF64:
2976 /* Relocation is PLT entry relative to GOT. For local
2977 symbols it's the symbol itself relative to GOT. */
2978 if (h != NULL
2979 /* See PLT32 handling. */
2980 && (h->plt.offset != (bfd_vma) -1
2981 || eh->plt_got.offset != (bfd_vma) -1)
2982 && htab->elf.splt != NULL)
2983 {
2984 if (eh->plt_got.offset != (bfd_vma) -1)
2985 {
2986 /* Use the GOT PLT. */
2987 resolved_plt = htab->plt_got;
2988 plt_offset = eh->plt_got.offset;
2989 }
2990 else if (htab->plt_second != NULL)
2991 {
2992 resolved_plt = htab->plt_second;
2993 plt_offset = eh->plt_second.offset;
2994 }
2995 else
2996 {
2997 resolved_plt = htab->elf.splt;
2998 plt_offset = h->plt.offset;
2999 }
3000
3001 relocation = (resolved_plt->output_section->vma
3002 + resolved_plt->output_offset
3003 + plt_offset);
3004 unresolved_reloc = FALSE;
3005 }
3006
3007 relocation -= htab->elf.sgotplt->output_section->vma
3008 + htab->elf.sgotplt->output_offset;
3009 break;
3010
3011 case R_X86_64_PLT32:
3012 case R_X86_64_PLT32_BND:
3013 /* Relocation is to the entry for this symbol in the
3014 procedure linkage table. */
3015
3016 /* Resolve a PLT32 reloc against a local symbol directly,
3017 without using the procedure linkage table. */
3018 if (h == NULL)
3019 break;
3020
3021 if ((h->plt.offset == (bfd_vma) -1
3022 && eh->plt_got.offset == (bfd_vma) -1)
3023 || htab->elf.splt == NULL)
3024 {
3025 /* We didn't make a PLT entry for this symbol. This
3026 happens when statically linking PIC code, or when
3027 using -Bsymbolic. */
3028 break;
3029 }
3030
3031 use_plt:
3032 if (h->plt.offset != (bfd_vma) -1)
3033 {
3034 if (htab->plt_second != NULL)
3035 {
3036 resolved_plt = htab->plt_second;
3037 plt_offset = eh->plt_second.offset;
3038 }
3039 else
3040 {
3041 resolved_plt = htab->elf.splt;
3042 plt_offset = h->plt.offset;
3043 }
3044 }
3045 else
3046 {
3047 /* Use the GOT PLT. */
3048 resolved_plt = htab->plt_got;
3049 plt_offset = eh->plt_got.offset;
3050 }
3051
3052 relocation = (resolved_plt->output_section->vma
3053 + resolved_plt->output_offset
3054 + plt_offset);
3055 unresolved_reloc = FALSE;
3056 break;
3057
3058 case R_X86_64_SIZE32:
3059 case R_X86_64_SIZE64:
3060 /* Set to symbol size. */
3061 relocation = st_size;
3062 goto direct;
3063
3064 case R_X86_64_PC8:
3065 case R_X86_64_PC16:
3066 case R_X86_64_PC32:
3067 case R_X86_64_PC32_BND:
3068 /* Don't complain about -fPIC if the symbol is undefined when
3069 building executable unless it is unresolved weak symbol,
3070 references a dynamic definition in PIE or -z nocopyreloc
3071 is used. */
3072 if ((input_section->flags & SEC_ALLOC) != 0
3073 && (input_section->flags & SEC_READONLY) != 0
3074 && h != NULL
3075 && ((bfd_link_executable (info)
3076 && ((h->root.type == bfd_link_hash_undefweak
3077 && !resolved_to_zero)
3078 || (bfd_link_pie (info)
3079 && !h->def_regular
3080 && h->def_dynamic)
3081 || ((info->nocopyreloc
3082 || (eh->def_protected
3083 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3084 && h->def_dynamic
3085 && !(h->root.u.def.section->flags & SEC_CODE))))
3086 || bfd_link_dll (info)))
3087 {
3088 bfd_boolean fail = FALSE;
3089 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3090 {
3091 /* Symbol is referenced locally. Make sure it is
3092 defined locally. */
3093 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3094 }
3095 else if (!(bfd_link_pie (info)
3096 && (h->needs_copy || eh->needs_copy)))
3097 {
3098 /* Symbol doesn't need copy reloc and isn't referenced
3099 locally. Address of protected function may not be
3100 reachable at run-time. */
3101 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3102 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3103 && h->type == STT_FUNC));
3104 }
3105
3106 if (fail)
3107 return elf_x86_64_need_pic (info, input_bfd, input_section,
3108 h, NULL, NULL, howto);
3109 }
3110 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3111 as function address. */
3112 else if (h != NULL
3113 && (input_section->flags & SEC_CODE) == 0
3114 && bfd_link_pie (info)
3115 && h->type == STT_FUNC
3116 && !h->def_regular
3117 && h->def_dynamic)
3118 goto use_plt;
3119 /* Fall through. */
3120
3121 case R_X86_64_8:
3122 case R_X86_64_16:
3123 case R_X86_64_32:
3124 case R_X86_64_PC64:
3125 case R_X86_64_64:
3126 /* FIXME: The ABI says the linker should make sure the value is
3127 the same when it's zeroextended to 64 bit. */
3128
3129 direct:
3130 if ((input_section->flags & SEC_ALLOC) == 0)
3131 break;
3132
3133 need_copy_reloc_in_pie = (bfd_link_pie (info)
3134 && h != NULL
3135 && (h->needs_copy
3136 || eh->needs_copy
3137 || (h->root.type
3138 == bfd_link_hash_undefined))
3139 && (X86_PCREL_TYPE_P (r_type)
3140 || X86_SIZE_TYPE_P (r_type)));
3141
3142 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3143 need_copy_reloc_in_pie,
3144 resolved_to_zero, FALSE))
3145 {
3146 Elf_Internal_Rela outrel;
3147 bfd_boolean skip, relocate;
3148 asection *sreloc;
3149
3150 /* When generating a shared object, these relocations
3151 are copied into the output file to be resolved at run
3152 time. */
3153 skip = FALSE;
3154 relocate = FALSE;
3155
3156 outrel.r_offset =
3157 _bfd_elf_section_offset (output_bfd, info, input_section,
3158 rel->r_offset);
3159 if (outrel.r_offset == (bfd_vma) -1)
3160 skip = TRUE;
3161 else if (outrel.r_offset == (bfd_vma) -2)
3162 skip = TRUE, relocate = TRUE;
3163
3164 outrel.r_offset += (input_section->output_section->vma
3165 + input_section->output_offset);
3166
3167 if (skip)
3168 memset (&outrel, 0, sizeof outrel);
3169
3170 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3171 {
3172 outrel.r_info = htab->r_info (h->dynindx, r_type);
3173 outrel.r_addend = rel->r_addend;
3174 }
3175 else
3176 {
3177 /* This symbol is local, or marked to become local.
3178 When relocation overflow check is disabled, we
3179 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3180 if (r_type == htab->pointer_r_type
3181 || (r_type == R_X86_64_32
3182 && info->no_reloc_overflow_check))
3183 {
3184 relocate = TRUE;
3185 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3186 outrel.r_addend = relocation + rel->r_addend;
3187 }
3188 else if (r_type == R_X86_64_64
3189 && !ABI_64_P (output_bfd))
3190 {
3191 relocate = TRUE;
3192 outrel.r_info = htab->r_info (0,
3193 R_X86_64_RELATIVE64);
3194 outrel.r_addend = relocation + rel->r_addend;
3195 /* Check addend overflow. */
3196 if ((outrel.r_addend & 0x80000000)
3197 != (rel->r_addend & 0x80000000))
3198 {
3199 const char *name;
3200 int addend = rel->r_addend;
3201 if (h && h->root.root.string)
3202 name = h->root.root.string;
3203 else
3204 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3205 sym, NULL);
3206 _bfd_error_handler
3207 /* xgettext:c-format */
3208 (_("%pB: addend %s%#x in relocation %s against "
3209 "symbol `%s' at %#" PRIx64
3210 " in section `%pA' is out of range"),
3211 input_bfd, addend < 0 ? "-" : "", addend,
3212 howto->name, name, (uint64_t) rel->r_offset,
3213 input_section);
3214 bfd_set_error (bfd_error_bad_value);
3215 return FALSE;
3216 }
3217 }
3218 else
3219 {
3220 long sindx;
3221
3222 if (bfd_is_abs_section (sec))
3223 sindx = 0;
3224 else if (sec == NULL || sec->owner == NULL)
3225 {
3226 bfd_set_error (bfd_error_bad_value);
3227 return FALSE;
3228 }
3229 else
3230 {
3231 asection *osec;
3232
3233 /* We are turning this relocation into one
3234 against a section symbol. It would be
3235 proper to subtract the symbol's value,
3236 osec->vma, from the emitted reloc addend,
3237 but ld.so expects buggy relocs. */
3238 osec = sec->output_section;
3239 sindx = elf_section_data (osec)->dynindx;
3240 if (sindx == 0)
3241 {
3242 asection *oi = htab->elf.text_index_section;
3243 sindx = elf_section_data (oi)->dynindx;
3244 }
3245 BFD_ASSERT (sindx != 0);
3246 }
3247
3248 outrel.r_info = htab->r_info (sindx, r_type);
3249 outrel.r_addend = relocation + rel->r_addend;
3250 }
3251 }
3252
3253 sreloc = elf_section_data (input_section)->sreloc;
3254
3255 if (sreloc == NULL || sreloc->contents == NULL)
3256 {
3257 r = bfd_reloc_notsupported;
3258 goto check_relocation_error;
3259 }
3260
3261 elf_append_rela (output_bfd, sreloc, &outrel);
3262
3263 /* If this reloc is against an external symbol, we do
3264 not want to fiddle with the addend. Otherwise, we
3265 need to include the symbol value so that it becomes
3266 an addend for the dynamic reloc. */
3267 if (! relocate)
3268 continue;
3269 }
3270
3271 break;
3272
3273 case R_X86_64_TLSGD:
3274 case R_X86_64_GOTPC32_TLSDESC:
3275 case R_X86_64_TLSDESC_CALL:
3276 case R_X86_64_GOTTPOFF:
3277 tls_type = GOT_UNKNOWN;
3278 if (h == NULL && local_got_offsets)
3279 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3280 else if (h != NULL)
3281 tls_type = elf_x86_hash_entry (h)->tls_type;
3282
3283 r_type_tls = r_type;
3284 if (! elf_x86_64_tls_transition (info, input_bfd,
3285 input_section, contents,
3286 symtab_hdr, sym_hashes,
3287 &r_type_tls, tls_type, rel,
3288 relend, h, r_symndx, TRUE))
3289 return FALSE;
3290
3291 if (r_type_tls == R_X86_64_TPOFF32)
3292 {
3293 bfd_vma roff = rel->r_offset;
3294
3295 BFD_ASSERT (! unresolved_reloc);
3296
3297 if (r_type == R_X86_64_TLSGD)
3298 {
3299 /* GD->LE transition. For 64bit, change
3300 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3301 .word 0x6666; rex64; call __tls_get_addr@PLT
3302 or
3303 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3304 .byte 0x66; rex64
3305 call *__tls_get_addr@GOTPCREL(%rip)
3306 which may be converted to
3307 addr32 call __tls_get_addr
3308 into:
3309 movq %fs:0, %rax
3310 leaq foo@tpoff(%rax), %rax
3311 For 32bit, change
3312 leaq foo@tlsgd(%rip), %rdi
3313 .word 0x6666; rex64; call __tls_get_addr@PLT
3314 or
3315 leaq foo@tlsgd(%rip), %rdi
3316 .byte 0x66; rex64
3317 call *__tls_get_addr@GOTPCREL(%rip)
3318 which may be converted to
3319 addr32 call __tls_get_addr
3320 into:
3321 movl %fs:0, %eax
3322 leaq foo@tpoff(%rax), %rax
3323 For largepic, change:
3324 leaq foo@tlsgd(%rip), %rdi
3325 movabsq $__tls_get_addr@pltoff, %rax
3326 addq %r15, %rax
3327 call *%rax
3328 into:
3329 movq %fs:0, %rax
3330 leaq foo@tpoff(%rax), %rax
3331 nopw 0x0(%rax,%rax,1) */
3332 int largepic = 0;
3333 if (ABI_64_P (output_bfd))
3334 {
3335 if (contents[roff + 5] == 0xb8)
3336 {
3337 memcpy (contents + roff - 3,
3338 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3339 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3340 largepic = 1;
3341 }
3342 else
3343 memcpy (contents + roff - 4,
3344 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3345 16);
3346 }
3347 else
3348 memcpy (contents + roff - 3,
3349 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3350 15);
3351 bfd_put_32 (output_bfd,
3352 elf_x86_64_tpoff (info, relocation),
3353 contents + roff + 8 + largepic);
3354 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3355 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3356 rel++;
3357 wrel++;
3358 continue;
3359 }
3360 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3361 {
3362 /* GDesc -> LE transition.
3363 It's originally something like:
3364 leaq x@tlsdesc(%rip), %rax
3365
3366 Change it to:
3367 movl $x@tpoff, %rax. */
3368
3369 unsigned int val, type;
3370
3371 type = bfd_get_8 (input_bfd, contents + roff - 3);
3372 val = bfd_get_8 (input_bfd, contents + roff - 1);
3373 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3374 contents + roff - 3);
3375 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3376 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3377 contents + roff - 1);
3378 bfd_put_32 (output_bfd,
3379 elf_x86_64_tpoff (info, relocation),
3380 contents + roff);
3381 continue;
3382 }
3383 else if (r_type == R_X86_64_TLSDESC_CALL)
3384 {
3385 /* GDesc -> LE transition.
3386 It's originally:
3387 call *(%rax)
3388 Turn it into:
3389 xchg %ax,%ax. */
3390 bfd_put_8 (output_bfd, 0x66, contents + roff);
3391 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3392 continue;
3393 }
3394 else if (r_type == R_X86_64_GOTTPOFF)
3395 {
3396 /* IE->LE transition:
3397 For 64bit, originally it can be one of:
3398 movq foo@gottpoff(%rip), %reg
3399 addq foo@gottpoff(%rip), %reg
3400 We change it into:
3401 movq $foo, %reg
3402 leaq foo(%reg), %reg
3403 addq $foo, %reg.
3404 For 32bit, originally it can be one of:
3405 movq foo@gottpoff(%rip), %reg
3406 addl foo@gottpoff(%rip), %reg
3407 We change it into:
3408 movq $foo, %reg
3409 leal foo(%reg), %reg
3410 addl $foo, %reg. */
3411
3412 unsigned int val, type, reg;
3413
3414 if (roff >= 3)
3415 val = bfd_get_8 (input_bfd, contents + roff - 3);
3416 else
3417 val = 0;
3418 type = bfd_get_8 (input_bfd, contents + roff - 2);
3419 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3420 reg >>= 3;
3421 if (type == 0x8b)
3422 {
3423 /* movq */
3424 if (val == 0x4c)
3425 bfd_put_8 (output_bfd, 0x49,
3426 contents + roff - 3);
3427 else if (!ABI_64_P (output_bfd) && val == 0x44)
3428 bfd_put_8 (output_bfd, 0x41,
3429 contents + roff - 3);
3430 bfd_put_8 (output_bfd, 0xc7,
3431 contents + roff - 2);
3432 bfd_put_8 (output_bfd, 0xc0 | reg,
3433 contents + roff - 1);
3434 }
3435 else if (reg == 4)
3436 {
3437 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3438 is special */
3439 if (val == 0x4c)
3440 bfd_put_8 (output_bfd, 0x49,
3441 contents + roff - 3);
3442 else if (!ABI_64_P (output_bfd) && val == 0x44)
3443 bfd_put_8 (output_bfd, 0x41,
3444 contents + roff - 3);
3445 bfd_put_8 (output_bfd, 0x81,
3446 contents + roff - 2);
3447 bfd_put_8 (output_bfd, 0xc0 | reg,
3448 contents + roff - 1);
3449 }
3450 else
3451 {
3452 /* addq/addl -> leaq/leal */
3453 if (val == 0x4c)
3454 bfd_put_8 (output_bfd, 0x4d,
3455 contents + roff - 3);
3456 else if (!ABI_64_P (output_bfd) && val == 0x44)
3457 bfd_put_8 (output_bfd, 0x45,
3458 contents + roff - 3);
3459 bfd_put_8 (output_bfd, 0x8d,
3460 contents + roff - 2);
3461 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3462 contents + roff - 1);
3463 }
3464 bfd_put_32 (output_bfd,
3465 elf_x86_64_tpoff (info, relocation),
3466 contents + roff);
3467 continue;
3468 }
3469 else
3470 BFD_ASSERT (FALSE);
3471 }
3472
3473 if (htab->elf.sgot == NULL)
3474 abort ();
3475
3476 if (h != NULL)
3477 {
3478 off = h->got.offset;
3479 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3480 }
3481 else
3482 {
3483 if (local_got_offsets == NULL)
3484 abort ();
3485
3486 off = local_got_offsets[r_symndx];
3487 offplt = local_tlsdesc_gotents[r_symndx];
3488 }
3489
3490 if ((off & 1) != 0)
3491 off &= ~1;
3492 else
3493 {
3494 Elf_Internal_Rela outrel;
3495 int dr_type, indx;
3496 asection *sreloc;
3497
3498 if (htab->elf.srelgot == NULL)
3499 abort ();
3500
3501 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3502
3503 if (GOT_TLS_GDESC_P (tls_type))
3504 {
3505 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3506 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3507 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3508 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3509 + htab->elf.sgotplt->output_offset
3510 + offplt
3511 + htab->sgotplt_jump_table_size);
3512 sreloc = htab->elf.srelplt;
3513 if (indx == 0)
3514 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3515 else
3516 outrel.r_addend = 0;
3517 elf_append_rela (output_bfd, sreloc, &outrel);
3518 }
3519
3520 sreloc = htab->elf.srelgot;
3521
3522 outrel.r_offset = (htab->elf.sgot->output_section->vma
3523 + htab->elf.sgot->output_offset + off);
3524
3525 if (GOT_TLS_GD_P (tls_type))
3526 dr_type = R_X86_64_DTPMOD64;
3527 else if (GOT_TLS_GDESC_P (tls_type))
3528 goto dr_done;
3529 else
3530 dr_type = R_X86_64_TPOFF64;
3531
3532 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3533 outrel.r_addend = 0;
3534 if ((dr_type == R_X86_64_TPOFF64
3535 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3536 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3537 outrel.r_info = htab->r_info (indx, dr_type);
3538
3539 elf_append_rela (output_bfd, sreloc, &outrel);
3540
3541 if (GOT_TLS_GD_P (tls_type))
3542 {
3543 if (indx == 0)
3544 {
3545 BFD_ASSERT (! unresolved_reloc);
3546 bfd_put_64 (output_bfd,
3547 relocation - _bfd_x86_elf_dtpoff_base (info),
3548 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3549 }
3550 else
3551 {
3552 bfd_put_64 (output_bfd, 0,
3553 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3554 outrel.r_info = htab->r_info (indx,
3555 R_X86_64_DTPOFF64);
3556 outrel.r_offset += GOT_ENTRY_SIZE;
3557 elf_append_rela (output_bfd, sreloc,
3558 &outrel);
3559 }
3560 }
3561
3562 dr_done:
3563 if (h != NULL)
3564 h->got.offset |= 1;
3565 else
3566 local_got_offsets[r_symndx] |= 1;
3567 }
3568
3569 if (off >= (bfd_vma) -2
3570 && ! GOT_TLS_GDESC_P (tls_type))
3571 abort ();
3572 if (r_type_tls == r_type)
3573 {
3574 if (r_type == R_X86_64_GOTPC32_TLSDESC
3575 || r_type == R_X86_64_TLSDESC_CALL)
3576 relocation = htab->elf.sgotplt->output_section->vma
3577 + htab->elf.sgotplt->output_offset
3578 + offplt + htab->sgotplt_jump_table_size;
3579 else
3580 relocation = htab->elf.sgot->output_section->vma
3581 + htab->elf.sgot->output_offset + off;
3582 unresolved_reloc = FALSE;
3583 }
3584 else
3585 {
3586 bfd_vma roff = rel->r_offset;
3587
3588 if (r_type == R_X86_64_TLSGD)
3589 {
3590 /* GD->IE transition. For 64bit, change
3591 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3592 .word 0x6666; rex64; call __tls_get_addr@PLT
3593 or
3594 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3595 .byte 0x66; rex64
3596 call *__tls_get_addr@GOTPCREL(%rip
3597 which may be converted to
3598 addr32 call __tls_get_addr
3599 into:
3600 movq %fs:0, %rax
3601 addq foo@gottpoff(%rip), %rax
3602 For 32bit, change
3603 leaq foo@tlsgd(%rip), %rdi
3604 .word 0x6666; rex64; call __tls_get_addr@PLT
3605 or
3606 leaq foo@tlsgd(%rip), %rdi
3607 .byte 0x66; rex64;
3608 call *__tls_get_addr@GOTPCREL(%rip)
3609 which may be converted to
3610 addr32 call __tls_get_addr
3611 into:
3612 movl %fs:0, %eax
3613 addq foo@gottpoff(%rip), %rax
3614 For largepic, change:
3615 leaq foo@tlsgd(%rip), %rdi
3616 movabsq $__tls_get_addr@pltoff, %rax
3617 addq %r15, %rax
3618 call *%rax
3619 into:
3620 movq %fs:0, %rax
3621 addq foo@gottpoff(%rax), %rax
3622 nopw 0x0(%rax,%rax,1) */
3623 int largepic = 0;
3624 if (ABI_64_P (output_bfd))
3625 {
3626 if (contents[roff + 5] == 0xb8)
3627 {
3628 memcpy (contents + roff - 3,
3629 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3630 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3631 largepic = 1;
3632 }
3633 else
3634 memcpy (contents + roff - 4,
3635 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3636 16);
3637 }
3638 else
3639 memcpy (contents + roff - 3,
3640 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3641 15);
3642
3643 relocation = (htab->elf.sgot->output_section->vma
3644 + htab->elf.sgot->output_offset + off
3645 - roff
3646 - largepic
3647 - input_section->output_section->vma
3648 - input_section->output_offset
3649 - 12);
3650 bfd_put_32 (output_bfd, relocation,
3651 contents + roff + 8 + largepic);
3652 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3653 rel++;
3654 wrel++;
3655 continue;
3656 }
3657 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3658 {
3659 /* GDesc -> IE transition.
3660 It's originally something like:
3661 leaq x@tlsdesc(%rip), %rax
3662
3663 Change it to:
3664 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3665
3666 /* Now modify the instruction as appropriate. To
3667 turn a leaq into a movq in the form we use it, it
3668 suffices to change the second byte from 0x8d to
3669 0x8b. */
3670 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3671
3672 bfd_put_32 (output_bfd,
3673 htab->elf.sgot->output_section->vma
3674 + htab->elf.sgot->output_offset + off
3675 - rel->r_offset
3676 - input_section->output_section->vma
3677 - input_section->output_offset
3678 - 4,
3679 contents + roff);
3680 continue;
3681 }
3682 else if (r_type == R_X86_64_TLSDESC_CALL)
3683 {
3684 /* GDesc -> IE transition.
3685 It's originally:
3686 call *(%rax)
3687
3688 Change it to:
3689 xchg %ax, %ax. */
3690
3691 bfd_put_8 (output_bfd, 0x66, contents + roff);
3692 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3693 continue;
3694 }
3695 else
3696 BFD_ASSERT (FALSE);
3697 }
3698 break;
3699
3700 case R_X86_64_TLSLD:
3701 if (! elf_x86_64_tls_transition (info, input_bfd,
3702 input_section, contents,
3703 symtab_hdr, sym_hashes,
3704 &r_type, GOT_UNKNOWN, rel,
3705 relend, h, r_symndx, TRUE))
3706 return FALSE;
3707
3708 if (r_type != R_X86_64_TLSLD)
3709 {
3710 /* LD->LE transition:
3711 leaq foo@tlsld(%rip), %rdi
3712 call __tls_get_addr@PLT
3713 For 64bit, we change it into:
3714 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3715 For 32bit, we change it into:
3716 nopl 0x0(%rax); movl %fs:0, %eax
3717 Or
3718 leaq foo@tlsld(%rip), %rdi;
3719 call *__tls_get_addr@GOTPCREL(%rip)
3720 which may be converted to
3721 addr32 call __tls_get_addr
3722 For 64bit, we change it into:
3723 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3724 For 32bit, we change it into:
3725 nopw 0x0(%rax); movl %fs:0, %eax
3726 For largepic, change:
3727 leaq foo@tlsgd(%rip), %rdi
3728 movabsq $__tls_get_addr@pltoff, %rax
3729 addq %rbx, %rax
3730 call *%rax
3731 into
3732 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3733 movq %fs:0, %eax */
3734
3735 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3736 if (ABI_64_P (output_bfd))
3737 {
3738 if (contents[rel->r_offset + 5] == 0xb8)
3739 memcpy (contents + rel->r_offset - 3,
3740 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3741 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3742 else if (contents[rel->r_offset + 4] == 0xff
3743 || contents[rel->r_offset + 4] == 0x67)
3744 memcpy (contents + rel->r_offset - 3,
3745 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3746 13);
3747 else
3748 memcpy (contents + rel->r_offset - 3,
3749 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3750 }
3751 else
3752 {
3753 if (contents[rel->r_offset + 4] == 0xff)
3754 memcpy (contents + rel->r_offset - 3,
3755 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3756 13);
3757 else
3758 memcpy (contents + rel->r_offset - 3,
3759 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3760 }
3761 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3762 and R_X86_64_PLTOFF64. */
3763 rel++;
3764 wrel++;
3765 continue;
3766 }
3767
3768 if (htab->elf.sgot == NULL)
3769 abort ();
3770
3771 off = htab->tls_ld_or_ldm_got.offset;
3772 if (off & 1)
3773 off &= ~1;
3774 else
3775 {
3776 Elf_Internal_Rela outrel;
3777
3778 if (htab->elf.srelgot == NULL)
3779 abort ();
3780
3781 outrel.r_offset = (htab->elf.sgot->output_section->vma
3782 + htab->elf.sgot->output_offset + off);
3783
3784 bfd_put_64 (output_bfd, 0,
3785 htab->elf.sgot->contents + off);
3786 bfd_put_64 (output_bfd, 0,
3787 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3788 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3789 outrel.r_addend = 0;
3790 elf_append_rela (output_bfd, htab->elf.srelgot,
3791 &outrel);
3792 htab->tls_ld_or_ldm_got.offset |= 1;
3793 }
3794 relocation = htab->elf.sgot->output_section->vma
3795 + htab->elf.sgot->output_offset + off;
3796 unresolved_reloc = FALSE;
3797 break;
3798
3799 case R_X86_64_DTPOFF32:
3800 if (!bfd_link_executable (info)
3801 || (input_section->flags & SEC_CODE) == 0)
3802 relocation -= _bfd_x86_elf_dtpoff_base (info);
3803 else
3804 relocation = elf_x86_64_tpoff (info, relocation);
3805 break;
3806
3807 case R_X86_64_TPOFF32:
3808 case R_X86_64_TPOFF64:
3809 BFD_ASSERT (bfd_link_executable (info));
3810 relocation = elf_x86_64_tpoff (info, relocation);
3811 break;
3812
3813 case R_X86_64_DTPOFF64:
3814 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3815 relocation -= _bfd_x86_elf_dtpoff_base (info);
3816 break;
3817
3818 default:
3819 break;
3820 }
3821
3822 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3823 because such sections are not SEC_ALLOC and thus ld.so will
3824 not process them. */
3825 if (unresolved_reloc
3826 && !((input_section->flags & SEC_DEBUGGING) != 0
3827 && h->def_dynamic)
3828 && _bfd_elf_section_offset (output_bfd, info, input_section,
3829 rel->r_offset) != (bfd_vma) -1)
3830 {
3831 switch (r_type)
3832 {
3833 case R_X86_64_32S:
3834 sec = h->root.u.def.section;
3835 if ((info->nocopyreloc
3836 || (eh->def_protected
3837 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3838 && !(h->root.u.def.section->flags & SEC_CODE))
3839 return elf_x86_64_need_pic (info, input_bfd, input_section,
3840 h, NULL, NULL, howto);
3841 /* Fall through. */
3842
3843 default:
3844 _bfd_error_handler
3845 /* xgettext:c-format */
3846 (_("%pB(%pA+%#" PRIx64 "): "
3847 "unresolvable %s relocation against symbol `%s'"),
3848 input_bfd,
3849 input_section,
3850 (uint64_t) rel->r_offset,
3851 howto->name,
3852 h->root.root.string);
3853 return FALSE;
3854 }
3855 }
3856
3857 do_relocation:
3858 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3859 contents, rel->r_offset,
3860 relocation, rel->r_addend);
3861
3862 check_relocation_error:
3863 if (r != bfd_reloc_ok)
3864 {
3865 const char *name;
3866
3867 if (h != NULL)
3868 name = h->root.root.string;
3869 else
3870 {
3871 name = bfd_elf_string_from_elf_section (input_bfd,
3872 symtab_hdr->sh_link,
3873 sym->st_name);
3874 if (name == NULL)
3875 return FALSE;
3876 if (*name == '\0')
3877 name = bfd_section_name (input_bfd, sec);
3878 }
3879
3880 if (r == bfd_reloc_overflow)
3881 {
3882 if (converted_reloc)
3883 {
3884 info->callbacks->einfo
3885 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3886 return FALSE;
3887 }
3888 (*info->callbacks->reloc_overflow)
3889 (info, (h ? &h->root : NULL), name, howto->name,
3890 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3891 }
3892 else
3893 {
3894 _bfd_error_handler
3895 /* xgettext:c-format */
3896 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3897 input_bfd, input_section,
3898 (uint64_t) rel->r_offset, name, (int) r);
3899 return FALSE;
3900 }
3901 }
3902
3903 if (wrel != rel)
3904 *wrel = *rel;
3905 }
3906
3907 if (wrel != rel)
3908 {
3909 Elf_Internal_Shdr *rel_hdr;
3910 size_t deleted = rel - wrel;
3911
3912 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3913 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3914 if (rel_hdr->sh_size == 0)
3915 {
3916 /* It is too late to remove an empty reloc section. Leave
3917 one NONE reloc.
3918 ??? What is wrong with an empty section??? */
3919 rel_hdr->sh_size = rel_hdr->sh_entsize;
3920 deleted -= 1;
3921 }
3922 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3923 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3924 input_section->reloc_count -= deleted;
3925 }
3926
3927 return TRUE;
3928 }
3929
3930 /* Finish up dynamic symbol handling. We set the contents of various
3931 dynamic sections here. */
3932
3933 static bfd_boolean
3934 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3935 struct bfd_link_info *info,
3936 struct elf_link_hash_entry *h,
3937 Elf_Internal_Sym *sym)
3938 {
3939 struct elf_x86_link_hash_table *htab;
3940 bfd_boolean use_plt_second;
3941 struct elf_x86_link_hash_entry *eh;
3942 bfd_boolean local_undefweak;
3943
3944 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3945 if (htab == NULL)
3946 return FALSE;
3947
3948 /* Use the second PLT section only if there is .plt section. */
3949 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3950
3951 eh = (struct elf_x86_link_hash_entry *) h;
3952 if (eh->no_finish_dynamic_symbol)
3953 abort ();
3954
3955 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3956 resolved undefined weak symbols in executable so that their
3957 references have value 0 at run-time. */
3958 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3959
3960 if (h->plt.offset != (bfd_vma) -1)
3961 {
3962 bfd_vma plt_index;
3963 bfd_vma got_offset, plt_offset;
3964 Elf_Internal_Rela rela;
3965 bfd_byte *loc;
3966 asection *plt, *gotplt, *relplt, *resolved_plt;
3967 const struct elf_backend_data *bed;
3968 bfd_vma plt_got_pcrel_offset;
3969
3970 /* When building a static executable, use .iplt, .igot.plt and
3971 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3972 if (htab->elf.splt != NULL)
3973 {
3974 plt = htab->elf.splt;
3975 gotplt = htab->elf.sgotplt;
3976 relplt = htab->elf.srelplt;
3977 }
3978 else
3979 {
3980 plt = htab->elf.iplt;
3981 gotplt = htab->elf.igotplt;
3982 relplt = htab->elf.irelplt;
3983 }
3984
3985 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3986
3987 /* Get the index in the procedure linkage table which
3988 corresponds to this symbol. This is the index of this symbol
3989 in all the symbols for which we are making plt entries. The
3990 first entry in the procedure linkage table is reserved.
3991
3992 Get the offset into the .got table of the entry that
3993 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3994 bytes. The first three are reserved for the dynamic linker.
3995
3996 For static executables, we don't reserve anything. */
3997
3998 if (plt == htab->elf.splt)
3999 {
4000 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4001 - htab->plt.has_plt0);
4002 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4003 }
4004 else
4005 {
4006 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4007 got_offset = got_offset * GOT_ENTRY_SIZE;
4008 }
4009
4010 /* Fill in the entry in the procedure linkage table. */
4011 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4012 htab->plt.plt_entry_size);
4013 if (use_plt_second)
4014 {
4015 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4016 htab->non_lazy_plt->plt_entry,
4017 htab->non_lazy_plt->plt_entry_size);
4018
4019 resolved_plt = htab->plt_second;
4020 plt_offset = eh->plt_second.offset;
4021 }
4022 else
4023 {
4024 resolved_plt = plt;
4025 plt_offset = h->plt.offset;
4026 }
4027
4028 /* Insert the relocation positions of the plt section. */
4029
4030 /* Put offset the PC-relative instruction referring to the GOT entry,
4031 subtracting the size of that instruction. */
4032 plt_got_pcrel_offset = (gotplt->output_section->vma
4033 + gotplt->output_offset
4034 + got_offset
4035 - resolved_plt->output_section->vma
4036 - resolved_plt->output_offset
4037 - plt_offset
4038 - htab->plt.plt_got_insn_size);
4039
4040 /* Check PC-relative offset overflow in PLT entry. */
4041 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4042 /* xgettext:c-format */
4043 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4044 output_bfd, h->root.root.string);
4045
4046 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4047 (resolved_plt->contents + plt_offset
4048 + htab->plt.plt_got_offset));
4049
4050 /* Fill in the entry in the global offset table, initially this
4051 points to the second part of the PLT entry. Leave the entry
4052 as zero for undefined weak symbol in PIE. No PLT relocation
4053 against undefined weak symbol in PIE. */
4054 if (!local_undefweak)
4055 {
4056 if (htab->plt.has_plt0)
4057 bfd_put_64 (output_bfd, (plt->output_section->vma
4058 + plt->output_offset
4059 + h->plt.offset
4060 + htab->lazy_plt->plt_lazy_offset),
4061 gotplt->contents + got_offset);
4062
4063 /* Fill in the entry in the .rela.plt section. */
4064 rela.r_offset = (gotplt->output_section->vma
4065 + gotplt->output_offset
4066 + got_offset);
4067 if (PLT_LOCAL_IFUNC_P (info, h))
4068 {
4069 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4070 h->root.root.string,
4071 h->root.u.def.section->owner);
4072
4073 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4074 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4075 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4076 rela.r_addend = (h->root.u.def.value
4077 + h->root.u.def.section->output_section->vma
4078 + h->root.u.def.section->output_offset);
4079 /* R_X86_64_IRELATIVE comes last. */
4080 plt_index = htab->next_irelative_index--;
4081 }
4082 else
4083 {
4084 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4085 rela.r_addend = 0;
4086 plt_index = htab->next_jump_slot_index++;
4087 }
4088
4089 /* Don't fill the second and third slots in PLT entry for
4090 static executables nor without PLT0. */
4091 if (plt == htab->elf.splt && htab->plt.has_plt0)
4092 {
4093 bfd_vma plt0_offset
4094 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4095
4096 /* Put relocation index. */
4097 bfd_put_32 (output_bfd, plt_index,
4098 (plt->contents + h->plt.offset
4099 + htab->lazy_plt->plt_reloc_offset));
4100
4101 /* Put offset for jmp .PLT0 and check for overflow. We don't
4102 check relocation index for overflow since branch displacement
4103 will overflow first. */
4104 if (plt0_offset > 0x80000000)
4105 /* xgettext:c-format */
4106 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4107 output_bfd, h->root.root.string);
4108 bfd_put_32 (output_bfd, - plt0_offset,
4109 (plt->contents + h->plt.offset
4110 + htab->lazy_plt->plt_plt_offset));
4111 }
4112
4113 bed = get_elf_backend_data (output_bfd);
4114 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4115 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4116 }
4117 }
4118 else if (eh->plt_got.offset != (bfd_vma) -1)
4119 {
4120 bfd_vma got_offset, plt_offset;
4121 asection *plt, *got;
4122 bfd_boolean got_after_plt;
4123 int32_t got_pcrel_offset;
4124
4125 /* Set the entry in the GOT procedure linkage table. */
4126 plt = htab->plt_got;
4127 got = htab->elf.sgot;
4128 got_offset = h->got.offset;
4129
4130 if (got_offset == (bfd_vma) -1
4131 || (h->type == STT_GNU_IFUNC && h->def_regular)
4132 || plt == NULL
4133 || got == NULL)
4134 abort ();
4135
4136 /* Use the non-lazy PLT entry template for the GOT PLT since they
4137 are the identical. */
4138 /* Fill in the entry in the GOT procedure linkage table. */
4139 plt_offset = eh->plt_got.offset;
4140 memcpy (plt->contents + plt_offset,
4141 htab->non_lazy_plt->plt_entry,
4142 htab->non_lazy_plt->plt_entry_size);
4143
4144 /* Put offset the PC-relative instruction referring to the GOT
4145 entry, subtracting the size of that instruction. */
4146 got_pcrel_offset = (got->output_section->vma
4147 + got->output_offset
4148 + got_offset
4149 - plt->output_section->vma
4150 - plt->output_offset
4151 - plt_offset
4152 - htab->non_lazy_plt->plt_got_insn_size);
4153
4154 /* Check PC-relative offset overflow in GOT PLT entry. */
4155 got_after_plt = got->output_section->vma > plt->output_section->vma;
4156 if ((got_after_plt && got_pcrel_offset < 0)
4157 || (!got_after_plt && got_pcrel_offset > 0))
4158 /* xgettext:c-format */
4159 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4160 output_bfd, h->root.root.string);
4161
4162 bfd_put_32 (output_bfd, got_pcrel_offset,
4163 (plt->contents + plt_offset
4164 + htab->non_lazy_plt->plt_got_offset));
4165 }
4166
4167 if (!local_undefweak
4168 && !h->def_regular
4169 && (h->plt.offset != (bfd_vma) -1
4170 || eh->plt_got.offset != (bfd_vma) -1))
4171 {
4172 /* Mark the symbol as undefined, rather than as defined in
4173 the .plt section. Leave the value if there were any
4174 relocations where pointer equality matters (this is a clue
4175 for the dynamic linker, to make function pointer
4176 comparisons work between an application and shared
4177 library), otherwise set it to zero. If a function is only
4178 called from a binary, there is no need to slow down
4179 shared libraries because of that. */
4180 sym->st_shndx = SHN_UNDEF;
4181 if (!h->pointer_equality_needed)
4182 sym->st_value = 0;
4183 }
4184
4185 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4186
4187 /* Don't generate dynamic GOT relocation against undefined weak
4188 symbol in executable. */
4189 if (h->got.offset != (bfd_vma) -1
4190 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4191 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4192 && !local_undefweak)
4193 {
4194 Elf_Internal_Rela rela;
4195 asection *relgot = htab->elf.srelgot;
4196
4197 /* This symbol has an entry in the global offset table. Set it
4198 up. */
4199 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4200 abort ();
4201
4202 rela.r_offset = (htab->elf.sgot->output_section->vma
4203 + htab->elf.sgot->output_offset
4204 + (h->got.offset &~ (bfd_vma) 1));
4205
4206 /* If this is a static link, or it is a -Bsymbolic link and the
4207 symbol is defined locally or was forced to be local because
4208 of a version file, we just want to emit a RELATIVE reloc.
4209 The entry in the global offset table will already have been
4210 initialized in the relocate_section function. */
4211 if (h->def_regular
4212 && h->type == STT_GNU_IFUNC)
4213 {
4214 if (h->plt.offset == (bfd_vma) -1)
4215 {
4216 /* STT_GNU_IFUNC is referenced without PLT. */
4217 if (htab->elf.splt == NULL)
4218 {
4219 /* use .rel[a].iplt section to store .got relocations
4220 in static executable. */
4221 relgot = htab->elf.irelplt;
4222 }
4223 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4224 {
4225 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4226 h->root.root.string,
4227 h->root.u.def.section->owner);
4228
4229 rela.r_info = htab->r_info (0,
4230 R_X86_64_IRELATIVE);
4231 rela.r_addend = (h->root.u.def.value
4232 + h->root.u.def.section->output_section->vma
4233 + h->root.u.def.section->output_offset);
4234 }
4235 else
4236 goto do_glob_dat;
4237 }
4238 else if (bfd_link_pic (info))
4239 {
4240 /* Generate R_X86_64_GLOB_DAT. */
4241 goto do_glob_dat;
4242 }
4243 else
4244 {
4245 asection *plt;
4246 bfd_vma plt_offset;
4247
4248 if (!h->pointer_equality_needed)
4249 abort ();
4250
4251 /* For non-shared object, we can't use .got.plt, which
4252 contains the real function addres if we need pointer
4253 equality. We load the GOT entry with the PLT entry. */
4254 if (htab->plt_second != NULL)
4255 {
4256 plt = htab->plt_second;
4257 plt_offset = eh->plt_second.offset;
4258 }
4259 else
4260 {
4261 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4262 plt_offset = h->plt.offset;
4263 }
4264 bfd_put_64 (output_bfd, (plt->output_section->vma
4265 + plt->output_offset
4266 + plt_offset),
4267 htab->elf.sgot->contents + h->got.offset);
4268 return TRUE;
4269 }
4270 }
4271 else if (bfd_link_pic (info)
4272 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4273 {
4274 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4275 return FALSE;
4276 BFD_ASSERT((h->got.offset & 1) != 0);
4277 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4278 rela.r_addend = (h->root.u.def.value
4279 + h->root.u.def.section->output_section->vma
4280 + h->root.u.def.section->output_offset);
4281 }
4282 else
4283 {
4284 BFD_ASSERT((h->got.offset & 1) == 0);
4285 do_glob_dat:
4286 bfd_put_64 (output_bfd, (bfd_vma) 0,
4287 htab->elf.sgot->contents + h->got.offset);
4288 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4289 rela.r_addend = 0;
4290 }
4291
4292 elf_append_rela (output_bfd, relgot, &rela);
4293 }
4294
4295 if (h->needs_copy)
4296 {
4297 Elf_Internal_Rela rela;
4298 asection *s;
4299
4300 /* This symbol needs a copy reloc. Set it up. */
4301 VERIFY_COPY_RELOC (h, htab)
4302
4303 rela.r_offset = (h->root.u.def.value
4304 + h->root.u.def.section->output_section->vma
4305 + h->root.u.def.section->output_offset);
4306 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4307 rela.r_addend = 0;
4308 if (h->root.u.def.section == htab->elf.sdynrelro)
4309 s = htab->elf.sreldynrelro;
4310 else
4311 s = htab->elf.srelbss;
4312 elf_append_rela (output_bfd, s, &rela);
4313 }
4314
4315 return TRUE;
4316 }
4317
4318 /* Finish up local dynamic symbol handling. We set the contents of
4319 various dynamic sections here. */
4320
4321 static bfd_boolean
4322 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4323 {
4324 struct elf_link_hash_entry *h
4325 = (struct elf_link_hash_entry *) *slot;
4326 struct bfd_link_info *info
4327 = (struct bfd_link_info *) inf;
4328
4329 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4330 info, h, NULL);
4331 }
4332
4333 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4334 here since undefined weak symbol may not be dynamic and may not be
4335 called for elf_x86_64_finish_dynamic_symbol. */
4336
4337 static bfd_boolean
4338 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4339 void *inf)
4340 {
4341 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4342 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4343
4344 if (h->root.type != bfd_link_hash_undefweak
4345 || h->dynindx != -1)
4346 return TRUE;
4347
4348 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4349 info, h, NULL);
4350 }
4351
4352 /* Used to decide how to sort relocs in an optimal manner for the
4353 dynamic linker, before writing them out. */
4354
4355 static enum elf_reloc_type_class
4356 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4357 const asection *rel_sec ATTRIBUTE_UNUSED,
4358 const Elf_Internal_Rela *rela)
4359 {
4360 bfd *abfd = info->output_bfd;
4361 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4362 struct elf_x86_link_hash_table *htab
4363 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4364
4365 if (htab->elf.dynsym != NULL
4366 && htab->elf.dynsym->contents != NULL)
4367 {
4368 /* Check relocation against STT_GNU_IFUNC symbol if there are
4369 dynamic symbols. */
4370 unsigned long r_symndx = htab->r_sym (rela->r_info);
4371 if (r_symndx != STN_UNDEF)
4372 {
4373 Elf_Internal_Sym sym;
4374 if (!bed->s->swap_symbol_in (abfd,
4375 (htab->elf.dynsym->contents
4376 + r_symndx * bed->s->sizeof_sym),
4377 0, &sym))
4378 abort ();
4379
4380 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4381 return reloc_class_ifunc;
4382 }
4383 }
4384
4385 switch ((int) ELF32_R_TYPE (rela->r_info))
4386 {
4387 case R_X86_64_IRELATIVE:
4388 return reloc_class_ifunc;
4389 case R_X86_64_RELATIVE:
4390 case R_X86_64_RELATIVE64:
4391 return reloc_class_relative;
4392 case R_X86_64_JUMP_SLOT:
4393 return reloc_class_plt;
4394 case R_X86_64_COPY:
4395 return reloc_class_copy;
4396 default:
4397 return reloc_class_normal;
4398 }
4399 }
4400
4401 /* Finish up the dynamic sections. */
4402
4403 static bfd_boolean
4404 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4405 struct bfd_link_info *info)
4406 {
4407 struct elf_x86_link_hash_table *htab;
4408
4409 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4410 if (htab == NULL)
4411 return FALSE;
4412
4413 if (! htab->elf.dynamic_sections_created)
4414 return TRUE;
4415
4416 if (htab->elf.splt && htab->elf.splt->size > 0)
4417 {
4418 elf_section_data (htab->elf.splt->output_section)
4419 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4420
4421 if (htab->plt.has_plt0)
4422 {
4423 /* Fill in the special first entry in the procedure linkage
4424 table. */
4425 memcpy (htab->elf.splt->contents,
4426 htab->lazy_plt->plt0_entry,
4427 htab->lazy_plt->plt0_entry_size);
4428 /* Add offset for pushq GOT+8(%rip), since the instruction
4429 uses 6 bytes subtract this value. */
4430 bfd_put_32 (output_bfd,
4431 (htab->elf.sgotplt->output_section->vma
4432 + htab->elf.sgotplt->output_offset
4433 + 8
4434 - htab->elf.splt->output_section->vma
4435 - htab->elf.splt->output_offset
4436 - 6),
4437 (htab->elf.splt->contents
4438 + htab->lazy_plt->plt0_got1_offset));
4439 /* Add offset for the PC-relative instruction accessing
4440 GOT+16, subtracting the offset to the end of that
4441 instruction. */
4442 bfd_put_32 (output_bfd,
4443 (htab->elf.sgotplt->output_section->vma
4444 + htab->elf.sgotplt->output_offset
4445 + 16
4446 - htab->elf.splt->output_section->vma
4447 - htab->elf.splt->output_offset
4448 - htab->lazy_plt->plt0_got2_insn_end),
4449 (htab->elf.splt->contents
4450 + htab->lazy_plt->plt0_got2_offset));
4451 }
4452
4453 if (htab->tlsdesc_plt)
4454 {
4455 bfd_put_64 (output_bfd, (bfd_vma) 0,
4456 htab->elf.sgot->contents + htab->tlsdesc_got);
4457
4458 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4459 htab->lazy_plt->plt_tlsdesc_entry,
4460 htab->lazy_plt->plt_tlsdesc_entry_size);
4461
4462 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4463 bytes and the instruction uses 6 bytes, subtract these
4464 values. */
4465 bfd_put_32 (output_bfd,
4466 (htab->elf.sgotplt->output_section->vma
4467 + htab->elf.sgotplt->output_offset
4468 + 8
4469 - htab->elf.splt->output_section->vma
4470 - htab->elf.splt->output_offset
4471 - htab->tlsdesc_plt
4472 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4473 (htab->elf.splt->contents
4474 + htab->tlsdesc_plt
4475 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4476 /* Add offset for indirect branch via GOT+TDG, where TDG
4477 stands for htab->tlsdesc_got, subtracting the offset
4478 to the end of that instruction. */
4479 bfd_put_32 (output_bfd,
4480 (htab->elf.sgot->output_section->vma
4481 + htab->elf.sgot->output_offset
4482 + htab->tlsdesc_got
4483 - htab->elf.splt->output_section->vma
4484 - htab->elf.splt->output_offset
4485 - htab->tlsdesc_plt
4486 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4487 (htab->elf.splt->contents
4488 + htab->tlsdesc_plt
4489 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4490 }
4491 }
4492
4493 /* Fill PLT entries for undefined weak symbols in PIE. */
4494 if (bfd_link_pie (info))
4495 bfd_hash_traverse (&info->hash->table,
4496 elf_x86_64_pie_finish_undefweak_symbol,
4497 info);
4498
4499 return TRUE;
4500 }
4501
4502 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4503 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4504 It has to be done before elf_link_sort_relocs is called so that
4505 dynamic relocations are properly sorted. */
4506
4507 static bfd_boolean
4508 elf_x86_64_output_arch_local_syms
4509 (bfd *output_bfd ATTRIBUTE_UNUSED,
4510 struct bfd_link_info *info,
4511 void *flaginfo ATTRIBUTE_UNUSED,
4512 int (*func) (void *, const char *,
4513 Elf_Internal_Sym *,
4514 asection *,
4515 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4516 {
4517 struct elf_x86_link_hash_table *htab
4518 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4519 if (htab == NULL)
4520 return FALSE;
4521
4522 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4523 htab_traverse (htab->loc_hash_table,
4524 elf_x86_64_finish_local_dynamic_symbol,
4525 info);
4526
4527 return TRUE;
4528 }
4529
4530 /* Forward declaration. */
4531 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4532
4533 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4534 dynamic relocations. */
4535
4536 static long
4537 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4538 long symcount ATTRIBUTE_UNUSED,
4539 asymbol **syms ATTRIBUTE_UNUSED,
4540 long dynsymcount,
4541 asymbol **dynsyms,
4542 asymbol **ret)
4543 {
4544 long count, i, n;
4545 int j;
4546 bfd_byte *plt_contents;
4547 long relsize;
4548 const struct elf_x86_lazy_plt_layout *lazy_plt;
4549 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4550 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4551 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4552 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4553 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4554 asection *plt;
4555 enum elf_x86_plt_type plt_type;
4556 struct elf_x86_plt plts[] =
4557 {
4558 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4559 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4560 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4561 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4562 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4563 };
4564
4565 *ret = NULL;
4566
4567 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4568 return 0;
4569
4570 if (dynsymcount <= 0)
4571 return 0;
4572
4573 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4574 if (relsize <= 0)
4575 return -1;
4576
4577 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4578 {
4579 lazy_plt = &elf_x86_64_lazy_plt;
4580 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4581 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4582 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4583 if (ABI_64_P (abfd))
4584 {
4585 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4586 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4587 }
4588 else
4589 {
4590 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4591 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4592 }
4593 }
4594 else
4595 {
4596 lazy_plt = &elf_x86_64_nacl_plt;
4597 non_lazy_plt = NULL;
4598 lazy_bnd_plt = NULL;
4599 non_lazy_bnd_plt = NULL;
4600 lazy_ibt_plt = NULL;
4601 non_lazy_ibt_plt = NULL;
4602 }
4603
4604 count = 0;
4605 for (j = 0; plts[j].name != NULL; j++)
4606 {
4607 plt = bfd_get_section_by_name (abfd, plts[j].name);
4608 if (plt == NULL || plt->size == 0)
4609 continue;
4610
4611 /* Get the PLT section contents. */
4612 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4613 if (plt_contents == NULL)
4614 break;
4615 if (!bfd_get_section_contents (abfd, (asection *) plt,
4616 plt_contents, 0, plt->size))
4617 {
4618 free (plt_contents);
4619 break;
4620 }
4621
4622 /* Check what kind of PLT it is. */
4623 plt_type = plt_unknown;
4624 if (plts[j].type == plt_unknown
4625 && (plt->size >= (lazy_plt->plt_entry_size
4626 + lazy_plt->plt_entry_size)))
4627 {
4628 /* Match lazy PLT first. Need to check the first two
4629 instructions. */
4630 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4631 lazy_plt->plt0_got1_offset) == 0)
4632 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4633 2) == 0))
4634 plt_type = plt_lazy;
4635 else if (lazy_bnd_plt != NULL
4636 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4637 lazy_bnd_plt->plt0_got1_offset) == 0)
4638 && (memcmp (plt_contents + 6,
4639 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4640 {
4641 plt_type = plt_lazy | plt_second;
4642 /* The fist entry in the lazy IBT PLT is the same as the
4643 lazy BND PLT. */
4644 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4645 lazy_ibt_plt->plt_entry,
4646 lazy_ibt_plt->plt_got_offset) == 0))
4647 lazy_plt = lazy_ibt_plt;
4648 else
4649 lazy_plt = lazy_bnd_plt;
4650 }
4651 }
4652
4653 if (non_lazy_plt != NULL
4654 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4655 && plt->size >= non_lazy_plt->plt_entry_size)
4656 {
4657 /* Match non-lazy PLT. */
4658 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4659 non_lazy_plt->plt_got_offset) == 0)
4660 plt_type = plt_non_lazy;
4661 }
4662
4663 if (plt_type == plt_unknown || plt_type == plt_second)
4664 {
4665 if (non_lazy_bnd_plt != NULL
4666 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4667 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4668 non_lazy_bnd_plt->plt_got_offset) == 0))
4669 {
4670 /* Match BND PLT. */
4671 plt_type = plt_second;
4672 non_lazy_plt = non_lazy_bnd_plt;
4673 }
4674 else if (non_lazy_ibt_plt != NULL
4675 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4676 && (memcmp (plt_contents,
4677 non_lazy_ibt_plt->plt_entry,
4678 non_lazy_ibt_plt->plt_got_offset) == 0))
4679 {
4680 /* Match IBT PLT. */
4681 plt_type = plt_second;
4682 non_lazy_plt = non_lazy_ibt_plt;
4683 }
4684 }
4685
4686 if (plt_type == plt_unknown)
4687 {
4688 free (plt_contents);
4689 continue;
4690 }
4691
4692 plts[j].sec = plt;
4693 plts[j].type = plt_type;
4694
4695 if ((plt_type & plt_lazy))
4696 {
4697 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4698 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4699 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4700 /* Skip PLT0 in lazy PLT. */
4701 i = 1;
4702 }
4703 else
4704 {
4705 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4706 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4707 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4708 i = 0;
4709 }
4710
4711 /* Skip lazy PLT when the second PLT is used. */
4712 if (plt_type == (plt_lazy | plt_second))
4713 plts[j].count = 0;
4714 else
4715 {
4716 n = plt->size / plts[j].plt_entry_size;
4717 plts[j].count = n;
4718 count += n - i;
4719 }
4720
4721 plts[j].contents = plt_contents;
4722 }
4723
4724 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4725 (bfd_vma) 0, plts, dynsyms,
4726 ret);
4727 }
4728
4729 /* Handle an x86-64 specific section when reading an object file. This
4730 is called when elfcode.h finds a section with an unknown type. */
4731
4732 static bfd_boolean
4733 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4734 const char *name, int shindex)
4735 {
4736 if (hdr->sh_type != SHT_X86_64_UNWIND)
4737 return FALSE;
4738
4739 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4740 return FALSE;
4741
4742 return TRUE;
4743 }
4744
4745 /* Hook called by the linker routine which adds symbols from an object
4746 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4747 of .bss. */
4748
4749 static bfd_boolean
4750 elf_x86_64_add_symbol_hook (bfd *abfd,
4751 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4752 Elf_Internal_Sym *sym,
4753 const char **namep ATTRIBUTE_UNUSED,
4754 flagword *flagsp ATTRIBUTE_UNUSED,
4755 asection **secp,
4756 bfd_vma *valp)
4757 {
4758 asection *lcomm;
4759
4760 switch (sym->st_shndx)
4761 {
4762 case SHN_X86_64_LCOMMON:
4763 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4764 if (lcomm == NULL)
4765 {
4766 lcomm = bfd_make_section_with_flags (abfd,
4767 "LARGE_COMMON",
4768 (SEC_ALLOC
4769 | SEC_IS_COMMON
4770 | SEC_LINKER_CREATED));
4771 if (lcomm == NULL)
4772 return FALSE;
4773 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4774 }
4775 *secp = lcomm;
4776 *valp = sym->st_size;
4777 return TRUE;
4778 }
4779
4780 return TRUE;
4781 }
4782
4783
4784 /* Given a BFD section, try to locate the corresponding ELF section
4785 index. */
4786
4787 static bfd_boolean
4788 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4789 asection *sec, int *index_return)
4790 {
4791 if (sec == &_bfd_elf_large_com_section)
4792 {
4793 *index_return = SHN_X86_64_LCOMMON;
4794 return TRUE;
4795 }
4796 return FALSE;
4797 }
4798
4799 /* Process a symbol. */
4800
4801 static void
4802 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4803 asymbol *asym)
4804 {
4805 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4806
4807 switch (elfsym->internal_elf_sym.st_shndx)
4808 {
4809 case SHN_X86_64_LCOMMON:
4810 asym->section = &_bfd_elf_large_com_section;
4811 asym->value = elfsym->internal_elf_sym.st_size;
4812 /* Common symbol doesn't set BSF_GLOBAL. */
4813 asym->flags &= ~BSF_GLOBAL;
4814 break;
4815 }
4816 }
4817
4818 static bfd_boolean
4819 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4820 {
4821 return (sym->st_shndx == SHN_COMMON
4822 || sym->st_shndx == SHN_X86_64_LCOMMON);
4823 }
4824
4825 static unsigned int
4826 elf_x86_64_common_section_index (asection *sec)
4827 {
4828 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4829 return SHN_COMMON;
4830 else
4831 return SHN_X86_64_LCOMMON;
4832 }
4833
4834 static asection *
4835 elf_x86_64_common_section (asection *sec)
4836 {
4837 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4838 return bfd_com_section_ptr;
4839 else
4840 return &_bfd_elf_large_com_section;
4841 }
4842
4843 static bfd_boolean
4844 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4845 const Elf_Internal_Sym *sym,
4846 asection **psec,
4847 bfd_boolean newdef,
4848 bfd_boolean olddef,
4849 bfd *oldbfd,
4850 const asection *oldsec)
4851 {
4852 /* A normal common symbol and a large common symbol result in a
4853 normal common symbol. We turn the large common symbol into a
4854 normal one. */
4855 if (!olddef
4856 && h->root.type == bfd_link_hash_common
4857 && !newdef
4858 && bfd_is_com_section (*psec)
4859 && oldsec != *psec)
4860 {
4861 if (sym->st_shndx == SHN_COMMON
4862 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4863 {
4864 h->root.u.c.p->section
4865 = bfd_make_section_old_way (oldbfd, "COMMON");
4866 h->root.u.c.p->section->flags = SEC_ALLOC;
4867 }
4868 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4869 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4870 *psec = bfd_com_section_ptr;
4871 }
4872
4873 return TRUE;
4874 }
4875
4876 static int
4877 elf_x86_64_additional_program_headers (bfd *abfd,
4878 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4879 {
4880 asection *s;
4881 int count = 0;
4882
4883 /* Check to see if we need a large readonly segment. */
4884 s = bfd_get_section_by_name (abfd, ".lrodata");
4885 if (s && (s->flags & SEC_LOAD))
4886 count++;
4887
4888 /* Check to see if we need a large data segment. Since .lbss sections
4889 is placed right after the .bss section, there should be no need for
4890 a large data segment just because of .lbss. */
4891 s = bfd_get_section_by_name (abfd, ".ldata");
4892 if (s && (s->flags & SEC_LOAD))
4893 count++;
4894
4895 return count;
4896 }
4897
4898 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4899
4900 static bfd_boolean
4901 elf_x86_64_relocs_compatible (const bfd_target *input,
4902 const bfd_target *output)
4903 {
4904 return ((xvec_get_elf_backend_data (input)->s->elfclass
4905 == xvec_get_elf_backend_data (output)->s->elfclass)
4906 && _bfd_elf_relocs_compatible (input, output));
4907 }
4908
4909 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4910 with GNU properties if found. Otherwise, return NULL. */
4911
4912 static bfd *
4913 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4914 {
4915 struct elf_x86_init_table init_table;
4916
4917 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4918 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4919 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4920 != (int) R_X86_64_GNU_VTINHERIT)
4921 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4922 != (int) R_X86_64_GNU_VTENTRY))
4923 abort ();
4924
4925 /* This is unused for x86-64. */
4926 init_table.plt0_pad_byte = 0x90;
4927
4928 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4929 {
4930 if (info->bndplt)
4931 {
4932 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4933 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4934 }
4935 else
4936 {
4937 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4938 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4939 }
4940
4941 if (ABI_64_P (info->output_bfd))
4942 {
4943 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4944 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4945 }
4946 else
4947 {
4948 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4949 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4950 }
4951 }
4952 else
4953 {
4954 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4955 init_table.non_lazy_plt = NULL;
4956 init_table.lazy_ibt_plt = NULL;
4957 init_table.non_lazy_ibt_plt = NULL;
4958 }
4959
4960 if (ABI_64_P (info->output_bfd))
4961 {
4962 init_table.r_info = elf64_r_info;
4963 init_table.r_sym = elf64_r_sym;
4964 }
4965 else
4966 {
4967 init_table.r_info = elf32_r_info;
4968 init_table.r_sym = elf32_r_sym;
4969 }
4970
4971 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4972 }
4973
4974 static const struct bfd_elf_special_section
4975 elf_x86_64_special_sections[]=
4976 {
4977 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4978 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4979 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4980 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4981 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4982 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4983 { NULL, 0, 0, 0, 0 }
4984 };
4985
4986 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4987 #define TARGET_LITTLE_NAME "elf64-x86-64"
4988 #define ELF_ARCH bfd_arch_i386
4989 #define ELF_TARGET_ID X86_64_ELF_DATA
4990 #define ELF_MACHINE_CODE EM_X86_64
4991 #if DEFAULT_LD_Z_SEPARATE_CODE
4992 # define ELF_MAXPAGESIZE 0x1000
4993 #else
4994 # define ELF_MAXPAGESIZE 0x200000
4995 #endif
4996 #define ELF_MINPAGESIZE 0x1000
4997 #define ELF_COMMONPAGESIZE 0x1000
4998
4999 #define elf_backend_can_gc_sections 1
5000 #define elf_backend_can_refcount 1
5001 #define elf_backend_want_got_plt 1
5002 #define elf_backend_plt_readonly 1
5003 #define elf_backend_want_plt_sym 0
5004 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5005 #define elf_backend_rela_normal 1
5006 #define elf_backend_plt_alignment 4
5007 #define elf_backend_extern_protected_data 1
5008 #define elf_backend_caches_rawsize 1
5009 #define elf_backend_dtrel_excludes_plt 1
5010 #define elf_backend_want_dynrelro 1
5011
5012 #define elf_info_to_howto elf_x86_64_info_to_howto
5013
5014 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5015 #define bfd_elf64_bfd_reloc_name_lookup \
5016 elf_x86_64_reloc_name_lookup
5017
5018 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5019 #define elf_backend_check_relocs elf_x86_64_check_relocs
5020 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5021 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5022 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5023 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5024 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5025 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5026 #ifdef CORE_HEADER
5027 #define elf_backend_write_core_note elf_x86_64_write_core_note
5028 #endif
5029 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5030 #define elf_backend_relocate_section elf_x86_64_relocate_section
5031 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5032 #define elf_backend_object_p elf64_x86_64_elf_object_p
5033 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5034
5035 #define elf_backend_section_from_shdr \
5036 elf_x86_64_section_from_shdr
5037
5038 #define elf_backend_section_from_bfd_section \
5039 elf_x86_64_elf_section_from_bfd_section
5040 #define elf_backend_add_symbol_hook \
5041 elf_x86_64_add_symbol_hook
5042 #define elf_backend_symbol_processing \
5043 elf_x86_64_symbol_processing
5044 #define elf_backend_common_section_index \
5045 elf_x86_64_common_section_index
5046 #define elf_backend_common_section \
5047 elf_x86_64_common_section
5048 #define elf_backend_common_definition \
5049 elf_x86_64_common_definition
5050 #define elf_backend_merge_symbol \
5051 elf_x86_64_merge_symbol
5052 #define elf_backend_special_sections \
5053 elf_x86_64_special_sections
5054 #define elf_backend_additional_program_headers \
5055 elf_x86_64_additional_program_headers
5056 #define elf_backend_setup_gnu_properties \
5057 elf_x86_64_link_setup_gnu_properties
5058 #define elf_backend_hide_symbol \
5059 _bfd_x86_elf_hide_symbol
5060
5061 #undef elf64_bed
5062 #define elf64_bed elf64_x86_64_bed
5063
5064 #include "elf64-target.h"
5065
5066 /* CloudABI support. */
5067
5068 #undef TARGET_LITTLE_SYM
5069 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5070 #undef TARGET_LITTLE_NAME
5071 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5072
5073 #undef ELF_OSABI
5074 #define ELF_OSABI ELFOSABI_CLOUDABI
5075
5076 #undef elf64_bed
5077 #define elf64_bed elf64_x86_64_cloudabi_bed
5078
5079 #include "elf64-target.h"
5080
5081 /* FreeBSD support. */
5082
5083 #undef TARGET_LITTLE_SYM
5084 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5085 #undef TARGET_LITTLE_NAME
5086 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5087
5088 #undef ELF_OSABI
5089 #define ELF_OSABI ELFOSABI_FREEBSD
5090
5091 #undef elf64_bed
5092 #define elf64_bed elf64_x86_64_fbsd_bed
5093
5094 #include "elf64-target.h"
5095
5096 /* Solaris 2 support. */
5097
5098 #undef TARGET_LITTLE_SYM
5099 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5100 #undef TARGET_LITTLE_NAME
5101 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5102
5103 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5104 {
5105 is_solaris /* os */
5106 };
5107
5108 #undef elf_backend_arch_data
5109 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5110
5111 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5112 objects won't be recognized. */
5113 #undef ELF_OSABI
5114
5115 #undef elf64_bed
5116 #define elf64_bed elf64_x86_64_sol2_bed
5117
5118 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5119 boundary. */
5120 #undef elf_backend_static_tls_alignment
5121 #define elf_backend_static_tls_alignment 16
5122
5123 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5124
5125 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5126 File, p.63. */
5127 #undef elf_backend_want_plt_sym
5128 #define elf_backend_want_plt_sym 1
5129
5130 #undef elf_backend_strtab_flags
5131 #define elf_backend_strtab_flags SHF_STRINGS
5132
5133 static bfd_boolean
5134 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5135 bfd *obfd ATTRIBUTE_UNUSED,
5136 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5137 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5138 {
5139 /* PR 19938: FIXME: Need to add code for setting the sh_info
5140 and sh_link fields of Solaris specific section types. */
5141 return FALSE;
5142 }
5143
5144 #undef elf_backend_copy_special_section_fields
5145 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5146
5147 #include "elf64-target.h"
5148
5149 /* Native Client support. */
5150
5151 static bfd_boolean
5152 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5153 {
5154 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5155 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5156 return TRUE;
5157 }
5158
5159 #undef TARGET_LITTLE_SYM
5160 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5161 #undef TARGET_LITTLE_NAME
5162 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5163 #undef elf64_bed
5164 #define elf64_bed elf64_x86_64_nacl_bed
5165
5166 #undef ELF_MAXPAGESIZE
5167 #undef ELF_MINPAGESIZE
5168 #undef ELF_COMMONPAGESIZE
5169 #define ELF_MAXPAGESIZE 0x10000
5170 #define ELF_MINPAGESIZE 0x10000
5171 #define ELF_COMMONPAGESIZE 0x10000
5172
5173 /* Restore defaults. */
5174 #undef ELF_OSABI
5175 #undef elf_backend_static_tls_alignment
5176 #undef elf_backend_want_plt_sym
5177 #define elf_backend_want_plt_sym 0
5178 #undef elf_backend_strtab_flags
5179 #undef elf_backend_copy_special_section_fields
5180
5181 /* NaCl uses substantially different PLT entries for the same effects. */
5182
5183 #undef elf_backend_plt_alignment
5184 #define elf_backend_plt_alignment 5
5185 #define NACL_PLT_ENTRY_SIZE 64
5186 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5187
5188 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5189 {
5190 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5191 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5192 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5193 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5194 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5195
5196 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5197 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5198
5199 /* 32 bytes of nop to pad out to the standard size. */
5200 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5201 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5202 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5203 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5204 0x66, /* excess data16 prefix */
5205 0x90 /* nop */
5206 };
5207
5208 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5209 {
5210 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5211 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5212 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5213 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5214
5215 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5216 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5217 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5218
5219 /* Lazy GOT entries point here (32-byte aligned). */
5220 0x68, /* pushq immediate */
5221 0, 0, 0, 0, /* replaced with index into relocation table. */
5222 0xe9, /* jmp relative */
5223 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5224
5225 /* 22 bytes of nop to pad out to the standard size. */
5226 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5227 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5228 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5229 };
5230
5231 /* .eh_frame covering the .plt section. */
5232
5233 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5234 {
5235 #if (PLT_CIE_LENGTH != 20 \
5236 || PLT_FDE_LENGTH != 36 \
5237 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5238 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5239 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5240 #endif
5241 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5242 0, 0, 0, 0, /* CIE ID */
5243 1, /* CIE version */
5244 'z', 'R', 0, /* Augmentation string */
5245 1, /* Code alignment factor */
5246 0x78, /* Data alignment factor */
5247 16, /* Return address column */
5248 1, /* Augmentation size */
5249 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5250 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5251 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5252 DW_CFA_nop, DW_CFA_nop,
5253
5254 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5255 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5256 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5257 0, 0, 0, 0, /* .plt size goes here */
5258 0, /* Augmentation size */
5259 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5260 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5261 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5262 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5263 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5264 13, /* Block length */
5265 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5266 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5267 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5268 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5269 DW_CFA_nop, DW_CFA_nop
5270 };
5271
5272 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5273 {
5274 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5275 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5276 elf_x86_64_nacl_plt_entry, /* plt_entry */
5277 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5278 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5279 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5280 2, /* plt_tlsdesc_got1_offset */
5281 9, /* plt_tlsdesc_got2_offset */
5282 6, /* plt_tlsdesc_got1_insn_end */
5283 13, /* plt_tlsdesc_got2_insn_end */
5284 2, /* plt0_got1_offset */
5285 9, /* plt0_got2_offset */
5286 13, /* plt0_got2_insn_end */
5287 3, /* plt_got_offset */
5288 33, /* plt_reloc_offset */
5289 38, /* plt_plt_offset */
5290 7, /* plt_got_insn_size */
5291 42, /* plt_plt_insn_end */
5292 32, /* plt_lazy_offset */
5293 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5294 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5295 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5296 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5297 };
5298
5299 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5300 {
5301 is_nacl /* os */
5302 };
5303
5304 #undef elf_backend_arch_data
5305 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5306
5307 #undef elf_backend_object_p
5308 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5309 #undef elf_backend_modify_segment_map
5310 #define elf_backend_modify_segment_map nacl_modify_segment_map
5311 #undef elf_backend_modify_program_headers
5312 #define elf_backend_modify_program_headers nacl_modify_program_headers
5313 #undef elf_backend_final_write_processing
5314 #define elf_backend_final_write_processing nacl_final_write_processing
5315
5316 #include "elf64-target.h"
5317
5318 /* Native Client x32 support. */
5319
5320 static bfd_boolean
5321 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5322 {
5323 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5324 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5325 return TRUE;
5326 }
5327
5328 #undef TARGET_LITTLE_SYM
5329 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5330 #undef TARGET_LITTLE_NAME
5331 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5332 #undef elf32_bed
5333 #define elf32_bed elf32_x86_64_nacl_bed
5334
5335 #define bfd_elf32_bfd_reloc_type_lookup \
5336 elf_x86_64_reloc_type_lookup
5337 #define bfd_elf32_bfd_reloc_name_lookup \
5338 elf_x86_64_reloc_name_lookup
5339 #define bfd_elf32_get_synthetic_symtab \
5340 elf_x86_64_get_synthetic_symtab
5341
5342 #undef elf_backend_object_p
5343 #define elf_backend_object_p \
5344 elf32_x86_64_nacl_elf_object_p
5345
5346 #undef elf_backend_bfd_from_remote_memory
5347 #define elf_backend_bfd_from_remote_memory \
5348 _bfd_elf32_bfd_from_remote_memory
5349
5350 #undef elf_backend_size_info
5351 #define elf_backend_size_info \
5352 _bfd_elf32_size_info
5353
5354 #undef elf32_bed
5355 #define elf32_bed elf32_x86_64_bed
5356
5357 #include "elf32-target.h"
5358
5359 /* Restore defaults. */
5360 #undef elf_backend_object_p
5361 #define elf_backend_object_p elf64_x86_64_elf_object_p
5362 #undef elf_backend_bfd_from_remote_memory
5363 #undef elf_backend_size_info
5364 #undef elf_backend_modify_segment_map
5365 #undef elf_backend_modify_program_headers
5366 #undef elf_backend_final_write_processing
5367
5368 /* Intel L1OM support. */
5369
5370 static bfd_boolean
5371 elf64_l1om_elf_object_p (bfd *abfd)
5372 {
5373 /* Set the right machine number for an L1OM elf64 file. */
5374 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5375 return TRUE;
5376 }
5377
5378 #undef TARGET_LITTLE_SYM
5379 #define TARGET_LITTLE_SYM l1om_elf64_vec
5380 #undef TARGET_LITTLE_NAME
5381 #define TARGET_LITTLE_NAME "elf64-l1om"
5382 #undef ELF_ARCH
5383 #define ELF_ARCH bfd_arch_l1om
5384
5385 #undef ELF_MACHINE_CODE
5386 #define ELF_MACHINE_CODE EM_L1OM
5387
5388 #undef ELF_OSABI
5389
5390 #undef elf64_bed
5391 #define elf64_bed elf64_l1om_bed
5392
5393 #undef elf_backend_object_p
5394 #define elf_backend_object_p elf64_l1om_elf_object_p
5395
5396 /* Restore defaults. */
5397 #undef ELF_MAXPAGESIZE
5398 #undef ELF_MINPAGESIZE
5399 #undef ELF_COMMONPAGESIZE
5400 #if DEFAULT_LD_Z_SEPARATE_CODE
5401 # define ELF_MAXPAGESIZE 0x1000
5402 #else
5403 # define ELF_MAXPAGESIZE 0x200000
5404 #endif
5405 #define ELF_MINPAGESIZE 0x1000
5406 #define ELF_COMMONPAGESIZE 0x1000
5407 #undef elf_backend_plt_alignment
5408 #define elf_backend_plt_alignment 4
5409 #undef elf_backend_arch_data
5410 #define elf_backend_arch_data &elf_x86_64_arch_bed
5411
5412 #include "elf64-target.h"
5413
5414 /* FreeBSD L1OM support. */
5415
5416 #undef TARGET_LITTLE_SYM
5417 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5418 #undef TARGET_LITTLE_NAME
5419 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5420
5421 #undef ELF_OSABI
5422 #define ELF_OSABI ELFOSABI_FREEBSD
5423
5424 #undef elf64_bed
5425 #define elf64_bed elf64_l1om_fbsd_bed
5426
5427 #include "elf64-target.h"
5428
5429 /* Intel K1OM support. */
5430
5431 static bfd_boolean
5432 elf64_k1om_elf_object_p (bfd *abfd)
5433 {
5434 /* Set the right machine number for an K1OM elf64 file. */
5435 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5436 return TRUE;
5437 }
5438
5439 #undef TARGET_LITTLE_SYM
5440 #define TARGET_LITTLE_SYM k1om_elf64_vec
5441 #undef TARGET_LITTLE_NAME
5442 #define TARGET_LITTLE_NAME "elf64-k1om"
5443 #undef ELF_ARCH
5444 #define ELF_ARCH bfd_arch_k1om
5445
5446 #undef ELF_MACHINE_CODE
5447 #define ELF_MACHINE_CODE EM_K1OM
5448
5449 #undef ELF_OSABI
5450
5451 #undef elf64_bed
5452 #define elf64_bed elf64_k1om_bed
5453
5454 #undef elf_backend_object_p
5455 #define elf_backend_object_p elf64_k1om_elf_object_p
5456
5457 #undef elf_backend_static_tls_alignment
5458
5459 #undef elf_backend_want_plt_sym
5460 #define elf_backend_want_plt_sym 0
5461
5462 #include "elf64-target.h"
5463
5464 /* FreeBSD K1OM support. */
5465
5466 #undef TARGET_LITTLE_SYM
5467 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5468 #undef TARGET_LITTLE_NAME
5469 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5470
5471 #undef ELF_OSABI
5472 #define ELF_OSABI ELFOSABI_FREEBSD
5473
5474 #undef elf64_bed
5475 #define elf64_bed elf64_k1om_fbsd_bed
5476
5477 #include "elf64-target.h"
5478
5479 /* 32bit x86-64 support. */
5480
5481 #undef TARGET_LITTLE_SYM
5482 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5483 #undef TARGET_LITTLE_NAME
5484 #define TARGET_LITTLE_NAME "elf32-x86-64"
5485 #undef elf32_bed
5486
5487 #undef ELF_ARCH
5488 #define ELF_ARCH bfd_arch_i386
5489
5490 #undef ELF_MACHINE_CODE
5491 #define ELF_MACHINE_CODE EM_X86_64
5492
5493 #undef ELF_OSABI
5494
5495 #undef elf_backend_object_p
5496 #define elf_backend_object_p \
5497 elf32_x86_64_elf_object_p
5498
5499 #undef elf_backend_bfd_from_remote_memory
5500 #define elf_backend_bfd_from_remote_memory \
5501 _bfd_elf32_bfd_from_remote_memory
5502
5503 #undef elf_backend_size_info
5504 #define elf_backend_size_info \
5505 _bfd_elf32_size_info
5506
5507 #include "elf32-target.h"
This page took 0.281366 seconds and 5 git commands to generate.