x86: Add GENERATE_DYNAMIC_RELOCATION_P
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* Architecture-specific backend data for x86-64. */
824
825 struct elf_x86_64_backend_data
826 {
827 /* Target system. */
828 enum
829 {
830 is_normal,
831 is_nacl
832 } os;
833 };
834
835 #define get_elf_x86_64_arch_data(bed) \
836 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
837
838 #define get_elf_x86_64_backend_data(abfd) \
839 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
840
841 /* These are the standard parameters. */
842 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
843 {
844 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
845 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
846 elf_x86_64_lazy_plt_entry, /* plt_entry */
847 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
848 2, /* plt0_got1_offset */
849 8, /* plt0_got2_offset */
850 12, /* plt0_got2_insn_end */
851 2, /* plt_got_offset */
852 7, /* plt_reloc_offset */
853 12, /* plt_plt_offset */
854 6, /* plt_got_insn_size */
855 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
856 6, /* plt_lazy_offset */
857 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
858 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
859 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
860 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
861 };
862
863 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
864 {
865 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
866 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
867 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
868 2, /* plt_got_offset */
869 6, /* plt_got_insn_size */
870 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
871 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
872 };
873
874 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
875 {
876 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
877 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
878 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
879 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
880 2, /* plt0_got1_offset */
881 1+8, /* plt0_got2_offset */
882 1+12, /* plt0_got2_insn_end */
883 1+2, /* plt_got_offset */
884 1, /* plt_reloc_offset */
885 7, /* plt_plt_offset */
886 1+6, /* plt_got_insn_size */
887 11, /* plt_plt_insn_end */
888 0, /* plt_lazy_offset */
889 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
890 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
891 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
892 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
893 };
894
895 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
896 {
897 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
898 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
899 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
900 1+2, /* plt_got_offset */
901 1+6, /* plt_got_insn_size */
902 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
903 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
904 };
905
906 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
907 {
908 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
909 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
910 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
911 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
912 2, /* plt0_got1_offset */
913 1+8, /* plt0_got2_offset */
914 1+12, /* plt0_got2_insn_end */
915 4+1+2, /* plt_got_offset */
916 4+1, /* plt_reloc_offset */
917 4+1+6, /* plt_plt_offset */
918 4+1+6, /* plt_got_insn_size */
919 4+1+5+5, /* plt_plt_insn_end */
920 0, /* plt_lazy_offset */
921 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
922 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
923 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
924 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
925 };
926
927 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
928 {
929 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
930 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
931 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
932 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
933 2, /* plt0_got1_offset */
934 8, /* plt0_got2_offset */
935 12, /* plt0_got2_insn_end */
936 4+2, /* plt_got_offset */
937 4+1, /* plt_reloc_offset */
938 4+6, /* plt_plt_offset */
939 4+6, /* plt_got_insn_size */
940 4+5+5, /* plt_plt_insn_end */
941 0, /* plt_lazy_offset */
942 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
943 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
944 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
945 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
946 };
947
948 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
949 {
950 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
951 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
952 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
953 4+1+2, /* plt_got_offset */
954 4+1+6, /* plt_got_insn_size */
955 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
956 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
957 };
958
959 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
960 {
961 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
962 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
963 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
964 4+2, /* plt_got_offset */
965 4+6, /* plt_got_insn_size */
966 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
967 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
968 };
969
970 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
971 {
972 is_normal /* os */
973 };
974
975 #define elf_backend_arch_data &elf_x86_64_arch_bed
976
977 static bfd_boolean
978 elf64_x86_64_elf_object_p (bfd *abfd)
979 {
980 /* Set the right machine number for an x86-64 elf64 file. */
981 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
982 return TRUE;
983 }
984
985 static bfd_boolean
986 elf32_x86_64_elf_object_p (bfd *abfd)
987 {
988 /* Set the right machine number for an x86-64 elf32 file. */
989 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
990 return TRUE;
991 }
992
993 /* Return TRUE if the TLS access code sequence support transition
994 from R_TYPE. */
995
996 static bfd_boolean
997 elf_x86_64_check_tls_transition (bfd *abfd,
998 struct bfd_link_info *info,
999 asection *sec,
1000 bfd_byte *contents,
1001 Elf_Internal_Shdr *symtab_hdr,
1002 struct elf_link_hash_entry **sym_hashes,
1003 unsigned int r_type,
1004 const Elf_Internal_Rela *rel,
1005 const Elf_Internal_Rela *relend)
1006 {
1007 unsigned int val;
1008 unsigned long r_symndx;
1009 bfd_boolean largepic = FALSE;
1010 struct elf_link_hash_entry *h;
1011 bfd_vma offset;
1012 struct elf_x86_link_hash_table *htab;
1013 bfd_byte *call;
1014 bfd_boolean indirect_call;
1015
1016 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1017 offset = rel->r_offset;
1018 switch (r_type)
1019 {
1020 case R_X86_64_TLSGD:
1021 case R_X86_64_TLSLD:
1022 if ((rel + 1) >= relend)
1023 return FALSE;
1024
1025 if (r_type == R_X86_64_TLSGD)
1026 {
1027 /* Check transition from GD access model. For 64bit, only
1028 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1029 .word 0x6666; rex64; call __tls_get_addr@PLT
1030 or
1031 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1032 .byte 0x66; rex64
1033 call *__tls_get_addr@GOTPCREL(%rip)
1034 which may be converted to
1035 addr32 call __tls_get_addr
1036 can transit to different access model. For 32bit, only
1037 leaq foo@tlsgd(%rip), %rdi
1038 .word 0x6666; rex64; call __tls_get_addr@PLT
1039 or
1040 leaq foo@tlsgd(%rip), %rdi
1041 .byte 0x66; rex64
1042 call *__tls_get_addr@GOTPCREL(%rip)
1043 which may be converted to
1044 addr32 call __tls_get_addr
1045 can transit to different access model. For largepic,
1046 we also support:
1047 leaq foo@tlsgd(%rip), %rdi
1048 movabsq $__tls_get_addr@pltoff, %rax
1049 addq $r15, %rax
1050 call *%rax
1051 or
1052 leaq foo@tlsgd(%rip), %rdi
1053 movabsq $__tls_get_addr@pltoff, %rax
1054 addq $rbx, %rax
1055 call *%rax */
1056
1057 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1058
1059 if ((offset + 12) > sec->size)
1060 return FALSE;
1061
1062 call = contents + offset + 4;
1063 if (call[0] != 0x66
1064 || !((call[1] == 0x48
1065 && call[2] == 0xff
1066 && call[3] == 0x15)
1067 || (call[1] == 0x48
1068 && call[2] == 0x67
1069 && call[3] == 0xe8)
1070 || (call[1] == 0x66
1071 && call[2] == 0x48
1072 && call[3] == 0xe8)))
1073 {
1074 if (!ABI_64_P (abfd)
1075 || (offset + 19) > sec->size
1076 || offset < 3
1077 || memcmp (call - 7, leaq + 1, 3) != 0
1078 || memcmp (call, "\x48\xb8", 2) != 0
1079 || call[11] != 0x01
1080 || call[13] != 0xff
1081 || call[14] != 0xd0
1082 || !((call[10] == 0x48 && call[12] == 0xd8)
1083 || (call[10] == 0x4c && call[12] == 0xf8)))
1084 return FALSE;
1085 largepic = TRUE;
1086 }
1087 else if (ABI_64_P (abfd))
1088 {
1089 if (offset < 4
1090 || memcmp (contents + offset - 4, leaq, 4) != 0)
1091 return FALSE;
1092 }
1093 else
1094 {
1095 if (offset < 3
1096 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1097 return FALSE;
1098 }
1099 indirect_call = call[2] == 0xff;
1100 }
1101 else
1102 {
1103 /* Check transition from LD access model. Only
1104 leaq foo@tlsld(%rip), %rdi;
1105 call __tls_get_addr@PLT
1106 or
1107 leaq foo@tlsld(%rip), %rdi;
1108 call *__tls_get_addr@GOTPCREL(%rip)
1109 which may be converted to
1110 addr32 call __tls_get_addr
1111 can transit to different access model. For largepic
1112 we also support:
1113 leaq foo@tlsld(%rip), %rdi
1114 movabsq $__tls_get_addr@pltoff, %rax
1115 addq $r15, %rax
1116 call *%rax
1117 or
1118 leaq foo@tlsld(%rip), %rdi
1119 movabsq $__tls_get_addr@pltoff, %rax
1120 addq $rbx, %rax
1121 call *%rax */
1122
1123 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1124
1125 if (offset < 3 || (offset + 9) > sec->size)
1126 return FALSE;
1127
1128 if (memcmp (contents + offset - 3, lea, 3) != 0)
1129 return FALSE;
1130
1131 call = contents + offset + 4;
1132 if (!(call[0] == 0xe8
1133 || (call[0] == 0xff && call[1] == 0x15)
1134 || (call[0] == 0x67 && call[1] == 0xe8)))
1135 {
1136 if (!ABI_64_P (abfd)
1137 || (offset + 19) > sec->size
1138 || memcmp (call, "\x48\xb8", 2) != 0
1139 || call[11] != 0x01
1140 || call[13] != 0xff
1141 || call[14] != 0xd0
1142 || !((call[10] == 0x48 && call[12] == 0xd8)
1143 || (call[10] == 0x4c && call[12] == 0xf8)))
1144 return FALSE;
1145 largepic = TRUE;
1146 }
1147 indirect_call = call[0] == 0xff;
1148 }
1149
1150 r_symndx = htab->r_sym (rel[1].r_info);
1151 if (r_symndx < symtab_hdr->sh_info)
1152 return FALSE;
1153
1154 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1155 if (h == NULL
1156 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1157 return FALSE;
1158 else
1159 {
1160 r_type = (ELF32_R_TYPE (rel[1].r_info)
1161 & ~R_X86_64_converted_reloc_bit);
1162 if (largepic)
1163 return r_type == R_X86_64_PLTOFF64;
1164 else if (indirect_call)
1165 return r_type == R_X86_64_GOTPCRELX;
1166 else
1167 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1168 }
1169
1170 case R_X86_64_GOTTPOFF:
1171 /* Check transition from IE access model:
1172 mov foo@gottpoff(%rip), %reg
1173 add foo@gottpoff(%rip), %reg
1174 */
1175
1176 /* Check REX prefix first. */
1177 if (offset >= 3 && (offset + 4) <= sec->size)
1178 {
1179 val = bfd_get_8 (abfd, contents + offset - 3);
1180 if (val != 0x48 && val != 0x4c)
1181 {
1182 /* X32 may have 0x44 REX prefix or no REX prefix. */
1183 if (ABI_64_P (abfd))
1184 return FALSE;
1185 }
1186 }
1187 else
1188 {
1189 /* X32 may not have any REX prefix. */
1190 if (ABI_64_P (abfd))
1191 return FALSE;
1192 if (offset < 2 || (offset + 3) > sec->size)
1193 return FALSE;
1194 }
1195
1196 val = bfd_get_8 (abfd, contents + offset - 2);
1197 if (val != 0x8b && val != 0x03)
1198 return FALSE;
1199
1200 val = bfd_get_8 (abfd, contents + offset - 1);
1201 return (val & 0xc7) == 5;
1202
1203 case R_X86_64_GOTPC32_TLSDESC:
1204 /* Check transition from GDesc access model:
1205 leaq x@tlsdesc(%rip), %rax
1206
1207 Make sure it's a leaq adding rip to a 32-bit offset
1208 into any register, although it's probably almost always
1209 going to be rax. */
1210
1211 if (offset < 3 || (offset + 4) > sec->size)
1212 return FALSE;
1213
1214 val = bfd_get_8 (abfd, contents + offset - 3);
1215 if ((val & 0xfb) != 0x48)
1216 return FALSE;
1217
1218 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 0x05;
1223
1224 case R_X86_64_TLSDESC_CALL:
1225 /* Check transition from GDesc access model:
1226 call *x@tlsdesc(%rax)
1227 */
1228 if (offset + 2 <= sec->size)
1229 {
1230 /* Make sure that it's a call *x@tlsdesc(%rax). */
1231 call = contents + offset;
1232 return call[0] == 0xff && call[1] == 0x10;
1233 }
1234
1235 return FALSE;
1236
1237 default:
1238 abort ();
1239 }
1240 }
1241
1242 /* Return TRUE if the TLS access transition is OK or no transition
1243 will be performed. Update R_TYPE if there is a transition. */
1244
1245 static bfd_boolean
1246 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1247 asection *sec, bfd_byte *contents,
1248 Elf_Internal_Shdr *symtab_hdr,
1249 struct elf_link_hash_entry **sym_hashes,
1250 unsigned int *r_type, int tls_type,
1251 const Elf_Internal_Rela *rel,
1252 const Elf_Internal_Rela *relend,
1253 struct elf_link_hash_entry *h,
1254 unsigned long r_symndx,
1255 bfd_boolean from_relocate_section)
1256 {
1257 unsigned int from_type = *r_type;
1258 unsigned int to_type = from_type;
1259 bfd_boolean check = TRUE;
1260
1261 /* Skip TLS transition for functions. */
1262 if (h != NULL
1263 && (h->type == STT_FUNC
1264 || h->type == STT_GNU_IFUNC))
1265 return TRUE;
1266
1267 switch (from_type)
1268 {
1269 case R_X86_64_TLSGD:
1270 case R_X86_64_GOTPC32_TLSDESC:
1271 case R_X86_64_TLSDESC_CALL:
1272 case R_X86_64_GOTTPOFF:
1273 if (bfd_link_executable (info))
1274 {
1275 if (h == NULL)
1276 to_type = R_X86_64_TPOFF32;
1277 else
1278 to_type = R_X86_64_GOTTPOFF;
1279 }
1280
1281 /* When we are called from elf_x86_64_relocate_section, there may
1282 be additional transitions based on TLS_TYPE. */
1283 if (from_relocate_section)
1284 {
1285 unsigned int new_to_type = to_type;
1286
1287 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1288 new_to_type = R_X86_64_TPOFF32;
1289
1290 if (to_type == R_X86_64_TLSGD
1291 || to_type == R_X86_64_GOTPC32_TLSDESC
1292 || to_type == R_X86_64_TLSDESC_CALL)
1293 {
1294 if (tls_type == GOT_TLS_IE)
1295 new_to_type = R_X86_64_GOTTPOFF;
1296 }
1297
1298 /* We checked the transition before when we were called from
1299 elf_x86_64_check_relocs. We only want to check the new
1300 transition which hasn't been checked before. */
1301 check = new_to_type != to_type && from_type == to_type;
1302 to_type = new_to_type;
1303 }
1304
1305 break;
1306
1307 case R_X86_64_TLSLD:
1308 if (bfd_link_executable (info))
1309 to_type = R_X86_64_TPOFF32;
1310 break;
1311
1312 default:
1313 return TRUE;
1314 }
1315
1316 /* Return TRUE if there is no transition. */
1317 if (from_type == to_type)
1318 return TRUE;
1319
1320 /* Check if the transition can be performed. */
1321 if (check
1322 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1323 symtab_hdr, sym_hashes,
1324 from_type, rel, relend))
1325 {
1326 reloc_howto_type *from, *to;
1327 const char *name;
1328
1329 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1330 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1331
1332 if (h)
1333 name = h->root.root.string;
1334 else
1335 {
1336 struct elf_x86_link_hash_table *htab;
1337
1338 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1339 if (htab == NULL)
1340 name = "*unknown*";
1341 else
1342 {
1343 Elf_Internal_Sym *isym;
1344
1345 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1346 abfd, r_symndx);
1347 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1348 }
1349 }
1350
1351 _bfd_error_handler
1352 /* xgettext:c-format */
1353 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1354 "in section `%A' failed"),
1355 abfd, from->name, to->name, name, rel->r_offset, sec);
1356 bfd_set_error (bfd_error_bad_value);
1357 return FALSE;
1358 }
1359
1360 *r_type = to_type;
1361 return TRUE;
1362 }
1363
1364 /* Rename some of the generic section flags to better document how they
1365 are used here. */
1366 #define check_relocs_failed sec_flg0
1367
1368 static bfd_boolean
1369 elf_x86_64_need_pic (struct bfd_link_info *info,
1370 bfd *input_bfd, asection *sec,
1371 struct elf_link_hash_entry *h,
1372 Elf_Internal_Shdr *symtab_hdr,
1373 Elf_Internal_Sym *isym,
1374 reloc_howto_type *howto)
1375 {
1376 const char *v = "";
1377 const char *und = "";
1378 const char *pic = "";
1379 const char *object;
1380
1381 const char *name;
1382 if (h)
1383 {
1384 name = h->root.root.string;
1385 switch (ELF_ST_VISIBILITY (h->other))
1386 {
1387 case STV_HIDDEN:
1388 v = _("hidden symbol ");
1389 break;
1390 case STV_INTERNAL:
1391 v = _("internal symbol ");
1392 break;
1393 case STV_PROTECTED:
1394 v = _("protected symbol ");
1395 break;
1396 default:
1397 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1398 v = _("protected symbol ");
1399 else
1400 v = _("symbol ");
1401 pic = _("; recompile with -fPIC");
1402 break;
1403 }
1404
1405 if (!h->def_regular && !h->def_dynamic)
1406 und = _("undefined ");
1407 }
1408 else
1409 {
1410 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1411 pic = _("; recompile with -fPIC");
1412 }
1413
1414 if (bfd_link_dll (info))
1415 object = _("a shared object");
1416 else if (bfd_link_pie (info))
1417 object = _("a PIE object");
1418 else
1419 object = _("a PDE object");
1420
1421 /* xgettext:c-format */
1422 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1423 "not be used when making %s%s"),
1424 input_bfd, howto->name, und, v, name,
1425 object, pic);
1426 bfd_set_error (bfd_error_bad_value);
1427 sec->check_relocs_failed = 1;
1428 return FALSE;
1429 }
1430
1431 /* With the local symbol, foo, we convert
1432 mov foo@GOTPCREL(%rip), %reg
1433 to
1434 lea foo(%rip), %reg
1435 and convert
1436 call/jmp *foo@GOTPCREL(%rip)
1437 to
1438 nop call foo/jmp foo nop
1439 When PIC is false, convert
1440 test %reg, foo@GOTPCREL(%rip)
1441 to
1442 test $foo, %reg
1443 and convert
1444 binop foo@GOTPCREL(%rip), %reg
1445 to
1446 binop $foo, %reg
1447 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1448 instructions. */
1449
1450 static bfd_boolean
1451 elf_x86_64_convert_load_reloc (bfd *abfd,
1452 bfd_byte *contents,
1453 unsigned int *r_type_p,
1454 Elf_Internal_Rela *irel,
1455 struct elf_link_hash_entry *h,
1456 bfd_boolean *converted,
1457 struct bfd_link_info *link_info)
1458 {
1459 struct elf_x86_link_hash_table *htab;
1460 bfd_boolean is_pic;
1461 bfd_boolean no_overflow;
1462 bfd_boolean relocx;
1463 bfd_boolean to_reloc_pc32;
1464 asection *tsec;
1465 bfd_signed_vma raddend;
1466 unsigned int opcode;
1467 unsigned int modrm;
1468 unsigned int r_type = *r_type_p;
1469 unsigned int r_symndx;
1470 bfd_vma roff = irel->r_offset;
1471
1472 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1473 return TRUE;
1474
1475 raddend = irel->r_addend;
1476 /* Addend for 32-bit PC-relative relocation must be -4. */
1477 if (raddend != -4)
1478 return TRUE;
1479
1480 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1481 is_pic = bfd_link_pic (link_info);
1482
1483 relocx = (r_type == R_X86_64_GOTPCRELX
1484 || r_type == R_X86_64_REX_GOTPCRELX);
1485
1486 /* TRUE if --no-relax is used. */
1487 no_overflow = link_info->disable_target_specific_optimizations > 1;
1488
1489 r_symndx = htab->r_sym (irel->r_info);
1490
1491 opcode = bfd_get_8 (abfd, contents + roff - 2);
1492
1493 /* Convert mov to lea since it has been done for a while. */
1494 if (opcode != 0x8b)
1495 {
1496 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1497 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1498 test, xor instructions. */
1499 if (!relocx)
1500 return TRUE;
1501 }
1502
1503 /* We convert only to R_X86_64_PC32:
1504 1. Branch.
1505 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1506 3. no_overflow is true.
1507 4. PIC.
1508 */
1509 to_reloc_pc32 = (opcode == 0xff
1510 || !relocx
1511 || no_overflow
1512 || is_pic);
1513
1514 /* Get the symbol referred to by the reloc. */
1515 if (h == NULL)
1516 {
1517 Elf_Internal_Sym *isym
1518 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1519
1520 /* Skip relocation against undefined symbols. */
1521 if (isym->st_shndx == SHN_UNDEF)
1522 return TRUE;
1523
1524 if (isym->st_shndx == SHN_ABS)
1525 tsec = bfd_abs_section_ptr;
1526 else if (isym->st_shndx == SHN_COMMON)
1527 tsec = bfd_com_section_ptr;
1528 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1529 tsec = &_bfd_elf_large_com_section;
1530 else
1531 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1532 }
1533 else
1534 {
1535 /* Undefined weak symbol is only bound locally in executable
1536 and its reference is resolved as 0 without relocation
1537 overflow. We can only perform this optimization for
1538 GOTPCRELX relocations since we need to modify REX byte.
1539 It is OK convert mov with R_X86_64_GOTPCREL to
1540 R_X86_64_PC32. */
1541 bfd_boolean local_ref;
1542 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1543
1544 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1545 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1546 if ((relocx || opcode == 0x8b)
1547 && (h->root.type == bfd_link_hash_undefweak
1548 && !eh->linker_def
1549 && local_ref))
1550 {
1551 if (opcode == 0xff)
1552 {
1553 /* Skip for branch instructions since R_X86_64_PC32
1554 may overflow. */
1555 if (no_overflow)
1556 return TRUE;
1557 }
1558 else if (relocx)
1559 {
1560 /* For non-branch instructions, we can convert to
1561 R_X86_64_32/R_X86_64_32S since we know if there
1562 is a REX byte. */
1563 to_reloc_pc32 = FALSE;
1564 }
1565
1566 /* Since we don't know the current PC when PIC is true,
1567 we can't convert to R_X86_64_PC32. */
1568 if (to_reloc_pc32 && is_pic)
1569 return TRUE;
1570
1571 goto convert;
1572 }
1573 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1574 ld.so may use its link-time address. */
1575 else if (h->start_stop
1576 || eh->linker_def
1577 || ((h->def_regular
1578 || h->root.type == bfd_link_hash_defined
1579 || h->root.type == bfd_link_hash_defweak)
1580 && h != htab->elf.hdynamic
1581 && local_ref))
1582 {
1583 /* bfd_link_hash_new or bfd_link_hash_undefined is
1584 set by an assignment in a linker script in
1585 bfd_elf_record_link_assignment. start_stop is set
1586 on __start_SECNAME/__stop_SECNAME which mark section
1587 SECNAME. */
1588 if (h->start_stop
1589 || eh->linker_def
1590 || (h->def_regular
1591 && (h->root.type == bfd_link_hash_new
1592 || h->root.type == bfd_link_hash_undefined
1593 || ((h->root.type == bfd_link_hash_defined
1594 || h->root.type == bfd_link_hash_defweak)
1595 && h->root.u.def.section == bfd_und_section_ptr))))
1596 {
1597 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1598 if (no_overflow)
1599 return TRUE;
1600 goto convert;
1601 }
1602 tsec = h->root.u.def.section;
1603 }
1604 else
1605 return TRUE;
1606 }
1607
1608 /* Don't convert GOTPCREL relocation against large section. */
1609 if (elf_section_data (tsec) != NULL
1610 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1611 return TRUE;
1612
1613 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1614 if (no_overflow)
1615 return TRUE;
1616
1617 convert:
1618 if (opcode == 0xff)
1619 {
1620 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1621 unsigned int nop;
1622 unsigned int disp;
1623 bfd_vma nop_offset;
1624
1625 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1626 R_X86_64_PC32. */
1627 modrm = bfd_get_8 (abfd, contents + roff - 1);
1628 if (modrm == 0x25)
1629 {
1630 /* Convert to "jmp foo nop". */
1631 modrm = 0xe9;
1632 nop = NOP_OPCODE;
1633 nop_offset = irel->r_offset + 3;
1634 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1635 irel->r_offset -= 1;
1636 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1637 }
1638 else
1639 {
1640 struct elf_x86_link_hash_entry *eh
1641 = (struct elf_x86_link_hash_entry *) h;
1642
1643 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1644 is a nop prefix. */
1645 modrm = 0xe8;
1646 /* To support TLS optimization, always use addr32 prefix for
1647 "call *__tls_get_addr@GOTPCREL(%rip)". */
1648 if (eh && eh->tls_get_addr)
1649 {
1650 nop = 0x67;
1651 nop_offset = irel->r_offset - 2;
1652 }
1653 else
1654 {
1655 nop = link_info->call_nop_byte;
1656 if (link_info->call_nop_as_suffix)
1657 {
1658 nop_offset = irel->r_offset + 3;
1659 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1660 irel->r_offset -= 1;
1661 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1662 }
1663 else
1664 nop_offset = irel->r_offset - 2;
1665 }
1666 }
1667 bfd_put_8 (abfd, nop, contents + nop_offset);
1668 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1669 r_type = R_X86_64_PC32;
1670 }
1671 else
1672 {
1673 unsigned int rex;
1674 unsigned int rex_mask = REX_R;
1675
1676 if (r_type == R_X86_64_REX_GOTPCRELX)
1677 rex = bfd_get_8 (abfd, contents + roff - 3);
1678 else
1679 rex = 0;
1680
1681 if (opcode == 0x8b)
1682 {
1683 if (to_reloc_pc32)
1684 {
1685 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1686 "lea foo(%rip), %reg". */
1687 opcode = 0x8d;
1688 r_type = R_X86_64_PC32;
1689 }
1690 else
1691 {
1692 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1693 "mov $foo, %reg". */
1694 opcode = 0xc7;
1695 modrm = bfd_get_8 (abfd, contents + roff - 1);
1696 modrm = 0xc0 | (modrm & 0x38) >> 3;
1697 if ((rex & REX_W) != 0
1698 && ABI_64_P (link_info->output_bfd))
1699 {
1700 /* Keep the REX_W bit in REX byte for LP64. */
1701 r_type = R_X86_64_32S;
1702 goto rewrite_modrm_rex;
1703 }
1704 else
1705 {
1706 /* If the REX_W bit in REX byte isn't needed,
1707 use R_X86_64_32 and clear the W bit to avoid
1708 sign-extend imm32 to imm64. */
1709 r_type = R_X86_64_32;
1710 /* Clear the W bit in REX byte. */
1711 rex_mask |= REX_W;
1712 goto rewrite_modrm_rex;
1713 }
1714 }
1715 }
1716 else
1717 {
1718 /* R_X86_64_PC32 isn't supported. */
1719 if (to_reloc_pc32)
1720 return TRUE;
1721
1722 modrm = bfd_get_8 (abfd, contents + roff - 1);
1723 if (opcode == 0x85)
1724 {
1725 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1726 "test $foo, %reg". */
1727 modrm = 0xc0 | (modrm & 0x38) >> 3;
1728 opcode = 0xf7;
1729 }
1730 else
1731 {
1732 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1733 "binop $foo, %reg". */
1734 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1735 opcode = 0x81;
1736 }
1737
1738 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1739 overflow when sign-extending imm32 to imm64. */
1740 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1741
1742 rewrite_modrm_rex:
1743 bfd_put_8 (abfd, modrm, contents + roff - 1);
1744
1745 if (rex)
1746 {
1747 /* Move the R bit to the B bit in REX byte. */
1748 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1749 bfd_put_8 (abfd, rex, contents + roff - 3);
1750 }
1751
1752 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1753 irel->r_addend = 0;
1754 }
1755
1756 bfd_put_8 (abfd, opcode, contents + roff - 2);
1757 }
1758
1759 *r_type_p = r_type;
1760 irel->r_info = htab->r_info (r_symndx,
1761 r_type | R_X86_64_converted_reloc_bit);
1762
1763 *converted = TRUE;
1764
1765 return TRUE;
1766 }
1767
1768 /* Look through the relocs for a section during the first phase, and
1769 calculate needed space in the global offset table, procedure
1770 linkage table, and dynamic reloc sections. */
1771
1772 static bfd_boolean
1773 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1774 asection *sec,
1775 const Elf_Internal_Rela *relocs)
1776 {
1777 struct elf_x86_link_hash_table *htab;
1778 Elf_Internal_Shdr *symtab_hdr;
1779 struct elf_link_hash_entry **sym_hashes;
1780 const Elf_Internal_Rela *rel;
1781 const Elf_Internal_Rela *rel_end;
1782 asection *sreloc;
1783 bfd_byte *contents;
1784 bfd_boolean converted;
1785
1786 if (bfd_link_relocatable (info))
1787 return TRUE;
1788
1789 /* Don't do anything special with non-loaded, non-alloced sections.
1790 In particular, any relocs in such sections should not affect GOT
1791 and PLT reference counting (ie. we don't allow them to create GOT
1792 or PLT entries), there's no possibility or desire to optimize TLS
1793 relocs, and there's not much point in propagating relocs to shared
1794 libs that the dynamic linker won't relocate. */
1795 if ((sec->flags & SEC_ALLOC) == 0)
1796 return TRUE;
1797
1798 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1799 if (htab == NULL)
1800 {
1801 sec->check_relocs_failed = 1;
1802 return FALSE;
1803 }
1804
1805 BFD_ASSERT (is_x86_elf (abfd, htab));
1806
1807 /* Get the section contents. */
1808 if (elf_section_data (sec)->this_hdr.contents != NULL)
1809 contents = elf_section_data (sec)->this_hdr.contents;
1810 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1811 {
1812 sec->check_relocs_failed = 1;
1813 return FALSE;
1814 }
1815
1816 symtab_hdr = &elf_symtab_hdr (abfd);
1817 sym_hashes = elf_sym_hashes (abfd);
1818
1819 converted = FALSE;
1820
1821 sreloc = NULL;
1822
1823 rel_end = relocs + sec->reloc_count;
1824 for (rel = relocs; rel < rel_end; rel++)
1825 {
1826 unsigned int r_type;
1827 unsigned int r_symndx;
1828 struct elf_link_hash_entry *h;
1829 struct elf_x86_link_hash_entry *eh;
1830 Elf_Internal_Sym *isym;
1831 const char *name;
1832 bfd_boolean size_reloc;
1833 bfd_boolean converted_reloc;
1834
1835 r_symndx = htab->r_sym (rel->r_info);
1836 r_type = ELF32_R_TYPE (rel->r_info);
1837
1838 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1839 {
1840 /* xgettext:c-format */
1841 _bfd_error_handler (_("%B: bad symbol index: %d"),
1842 abfd, r_symndx);
1843 goto error_return;
1844 }
1845
1846 if (r_symndx < symtab_hdr->sh_info)
1847 {
1848 /* A local symbol. */
1849 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1850 abfd, r_symndx);
1851 if (isym == NULL)
1852 goto error_return;
1853
1854 /* Check relocation against local STT_GNU_IFUNC symbol. */
1855 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1856 {
1857 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1858 TRUE);
1859 if (h == NULL)
1860 goto error_return;
1861
1862 /* Fake a STT_GNU_IFUNC symbol. */
1863 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1864 isym, NULL);
1865 h->type = STT_GNU_IFUNC;
1866 h->def_regular = 1;
1867 h->ref_regular = 1;
1868 h->forced_local = 1;
1869 h->root.type = bfd_link_hash_defined;
1870 }
1871 else
1872 h = NULL;
1873 }
1874 else
1875 {
1876 isym = NULL;
1877 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1878 while (h->root.type == bfd_link_hash_indirect
1879 || h->root.type == bfd_link_hash_warning)
1880 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1881 }
1882
1883 /* Check invalid x32 relocations. */
1884 if (!ABI_64_P (abfd))
1885 switch (r_type)
1886 {
1887 default:
1888 break;
1889
1890 case R_X86_64_DTPOFF64:
1891 case R_X86_64_TPOFF64:
1892 case R_X86_64_PC64:
1893 case R_X86_64_GOTOFF64:
1894 case R_X86_64_GOT64:
1895 case R_X86_64_GOTPCREL64:
1896 case R_X86_64_GOTPC64:
1897 case R_X86_64_GOTPLT64:
1898 case R_X86_64_PLTOFF64:
1899 {
1900 if (h)
1901 name = h->root.root.string;
1902 else
1903 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1904 NULL);
1905 _bfd_error_handler
1906 /* xgettext:c-format */
1907 (_("%B: relocation %s against symbol `%s' isn't "
1908 "supported in x32 mode"), abfd,
1909 x86_64_elf_howto_table[r_type].name, name);
1910 bfd_set_error (bfd_error_bad_value);
1911 goto error_return;
1912 }
1913 break;
1914 }
1915
1916 if (h != NULL)
1917 {
1918 /* It is referenced by a non-shared object. */
1919 h->ref_regular = 1;
1920 h->root.non_ir_ref_regular = 1;
1921
1922 if (h->type == STT_GNU_IFUNC)
1923 elf_tdata (info->output_bfd)->has_gnu_symbols
1924 |= elf_gnu_symbol_ifunc;
1925 }
1926
1927 converted_reloc = FALSE;
1928 if ((r_type == R_X86_64_GOTPCREL
1929 || r_type == R_X86_64_GOTPCRELX
1930 || r_type == R_X86_64_REX_GOTPCRELX)
1931 && (h == NULL || h->type != STT_GNU_IFUNC))
1932 {
1933 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1934 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1935 irel, h, &converted_reloc,
1936 info))
1937 goto error_return;
1938
1939 if (converted_reloc)
1940 converted = TRUE;
1941 }
1942
1943 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1944 symtab_hdr, sym_hashes,
1945 &r_type, GOT_UNKNOWN,
1946 rel, rel_end, h, r_symndx, FALSE))
1947 goto error_return;
1948
1949 eh = (struct elf_x86_link_hash_entry *) h;
1950 switch (r_type)
1951 {
1952 case R_X86_64_TLSLD:
1953 htab->tls_ld_or_ldm_got.refcount += 1;
1954 goto create_got;
1955
1956 case R_X86_64_TPOFF32:
1957 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1958 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1959 &x86_64_elf_howto_table[r_type]);
1960 if (eh != NULL)
1961 eh->has_got_reloc = 1;
1962 break;
1963
1964 case R_X86_64_GOTTPOFF:
1965 if (!bfd_link_executable (info))
1966 info->flags |= DF_STATIC_TLS;
1967 /* Fall through */
1968
1969 case R_X86_64_GOT32:
1970 case R_X86_64_GOTPCREL:
1971 case R_X86_64_GOTPCRELX:
1972 case R_X86_64_REX_GOTPCRELX:
1973 case R_X86_64_TLSGD:
1974 case R_X86_64_GOT64:
1975 case R_X86_64_GOTPCREL64:
1976 case R_X86_64_GOTPLT64:
1977 case R_X86_64_GOTPC32_TLSDESC:
1978 case R_X86_64_TLSDESC_CALL:
1979 /* This symbol requires a global offset table entry. */
1980 {
1981 int tls_type, old_tls_type;
1982
1983 switch (r_type)
1984 {
1985 default: tls_type = GOT_NORMAL; break;
1986 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1987 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1988 case R_X86_64_GOTPC32_TLSDESC:
1989 case R_X86_64_TLSDESC_CALL:
1990 tls_type = GOT_TLS_GDESC; break;
1991 }
1992
1993 if (h != NULL)
1994 {
1995 h->got.refcount += 1;
1996 old_tls_type = eh->tls_type;
1997 }
1998 else
1999 {
2000 bfd_signed_vma *local_got_refcounts;
2001
2002 /* This is a global offset table entry for a local symbol. */
2003 local_got_refcounts = elf_local_got_refcounts (abfd);
2004 if (local_got_refcounts == NULL)
2005 {
2006 bfd_size_type size;
2007
2008 size = symtab_hdr->sh_info;
2009 size *= sizeof (bfd_signed_vma)
2010 + sizeof (bfd_vma) + sizeof (char);
2011 local_got_refcounts = ((bfd_signed_vma *)
2012 bfd_zalloc (abfd, size));
2013 if (local_got_refcounts == NULL)
2014 goto error_return;
2015 elf_local_got_refcounts (abfd) = local_got_refcounts;
2016 elf_x86_local_tlsdesc_gotent (abfd)
2017 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2018 elf_x86_local_got_tls_type (abfd)
2019 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2020 }
2021 local_got_refcounts[r_symndx] += 1;
2022 old_tls_type
2023 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2024 }
2025
2026 /* If a TLS symbol is accessed using IE at least once,
2027 there is no point to use dynamic model for it. */
2028 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2029 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2030 || tls_type != GOT_TLS_IE))
2031 {
2032 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2033 tls_type = old_tls_type;
2034 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2035 && GOT_TLS_GD_ANY_P (tls_type))
2036 tls_type |= old_tls_type;
2037 else
2038 {
2039 if (h)
2040 name = h->root.root.string;
2041 else
2042 name = bfd_elf_sym_name (abfd, symtab_hdr,
2043 isym, NULL);
2044 _bfd_error_handler
2045 /* xgettext:c-format */
2046 (_("%B: '%s' accessed both as normal and"
2047 " thread local symbol"),
2048 abfd, name);
2049 bfd_set_error (bfd_error_bad_value);
2050 goto error_return;
2051 }
2052 }
2053
2054 if (old_tls_type != tls_type)
2055 {
2056 if (eh != NULL)
2057 eh->tls_type = tls_type;
2058 else
2059 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2060 }
2061 }
2062 /* Fall through */
2063
2064 case R_X86_64_GOTOFF64:
2065 case R_X86_64_GOTPC32:
2066 case R_X86_64_GOTPC64:
2067 create_got:
2068 if (eh != NULL)
2069 eh->has_got_reloc = 1;
2070 break;
2071
2072 case R_X86_64_PLT32:
2073 case R_X86_64_PLT32_BND:
2074 /* This symbol requires a procedure linkage table entry. We
2075 actually build the entry in adjust_dynamic_symbol,
2076 because this might be a case of linking PIC code which is
2077 never referenced by a dynamic object, in which case we
2078 don't need to generate a procedure linkage table entry
2079 after all. */
2080
2081 /* If this is a local symbol, we resolve it directly without
2082 creating a procedure linkage table entry. */
2083 if (h == NULL)
2084 continue;
2085
2086 eh->has_got_reloc = 1;
2087 h->needs_plt = 1;
2088 h->plt.refcount += 1;
2089 break;
2090
2091 case R_X86_64_PLTOFF64:
2092 /* This tries to form the 'address' of a function relative
2093 to GOT. For global symbols we need a PLT entry. */
2094 if (h != NULL)
2095 {
2096 h->needs_plt = 1;
2097 h->plt.refcount += 1;
2098 }
2099 goto create_got;
2100
2101 case R_X86_64_SIZE32:
2102 case R_X86_64_SIZE64:
2103 size_reloc = TRUE;
2104 goto do_size;
2105
2106 case R_X86_64_32:
2107 if (!ABI_64_P (abfd))
2108 goto pointer;
2109 /* Fall through. */
2110 case R_X86_64_8:
2111 case R_X86_64_16:
2112 case R_X86_64_32S:
2113 /* Check relocation overflow as these relocs may lead to
2114 run-time relocation overflow. Don't error out for
2115 sections we don't care about, such as debug sections or
2116 when relocation overflow check is disabled. */
2117 if (!info->no_reloc_overflow_check
2118 && !converted_reloc
2119 && (bfd_link_pic (info)
2120 || (bfd_link_executable (info)
2121 && h != NULL
2122 && !h->def_regular
2123 && h->def_dynamic
2124 && (sec->flags & SEC_READONLY) == 0)))
2125 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2126 &x86_64_elf_howto_table[r_type]);
2127 /* Fall through. */
2128
2129 case R_X86_64_PC8:
2130 case R_X86_64_PC16:
2131 case R_X86_64_PC32:
2132 case R_X86_64_PC32_BND:
2133 case R_X86_64_PC64:
2134 case R_X86_64_64:
2135 pointer:
2136 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2137 eh->has_non_got_reloc = 1;
2138 /* We are called after all symbols have been resolved. Only
2139 relocation against STT_GNU_IFUNC symbol must go through
2140 PLT. */
2141 if (h != NULL
2142 && (bfd_link_executable (info)
2143 || h->type == STT_GNU_IFUNC))
2144 {
2145 /* If this reloc is in a read-only section, we might
2146 need a copy reloc. We can't check reliably at this
2147 stage whether the section is read-only, as input
2148 sections have not yet been mapped to output sections.
2149 Tentatively set the flag for now, and correct in
2150 adjust_dynamic_symbol. */
2151 h->non_got_ref = 1;
2152
2153 /* We may need a .plt entry if the symbol is a function
2154 defined in a shared lib or is a STT_GNU_IFUNC function
2155 referenced from the code or read-only section. */
2156 if (!h->def_regular
2157 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2158 h->plt.refcount += 1;
2159
2160 if (r_type == R_X86_64_PC32)
2161 {
2162 /* Since something like ".long foo - ." may be used
2163 as pointer, make sure that PLT is used if foo is
2164 a function defined in a shared library. */
2165 if ((sec->flags & SEC_CODE) == 0)
2166 h->pointer_equality_needed = 1;
2167 }
2168 else if (r_type != R_X86_64_PC32_BND
2169 && r_type != R_X86_64_PC64)
2170 {
2171 h->pointer_equality_needed = 1;
2172 /* At run-time, R_X86_64_64 can be resolved for both
2173 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2174 can only be resolved for x32. */
2175 if ((sec->flags & SEC_READONLY) == 0
2176 && (r_type == R_X86_64_64
2177 || (!ABI_64_P (abfd)
2178 && (r_type == R_X86_64_32
2179 || r_type == R_X86_64_32S))))
2180 eh->func_pointer_refcount += 1;
2181 }
2182 }
2183
2184 size_reloc = FALSE;
2185 do_size:
2186 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2187 htab->pointer_r_type))
2188 {
2189 struct elf_dyn_relocs *p;
2190 struct elf_dyn_relocs **head;
2191
2192 /* We must copy these reloc types into the output file.
2193 Create a reloc section in dynobj and make room for
2194 this reloc. */
2195 if (sreloc == NULL)
2196 {
2197 sreloc = _bfd_elf_make_dynamic_reloc_section
2198 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2199 abfd, /*rela?*/ TRUE);
2200
2201 if (sreloc == NULL)
2202 goto error_return;
2203 }
2204
2205 /* If this is a global symbol, we count the number of
2206 relocations we need for this symbol. */
2207 if (h != NULL)
2208 head = &eh->dyn_relocs;
2209 else
2210 {
2211 /* Track dynamic relocs needed for local syms too.
2212 We really need local syms available to do this
2213 easily. Oh well. */
2214 asection *s;
2215 void **vpp;
2216
2217 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2218 abfd, r_symndx);
2219 if (isym == NULL)
2220 goto error_return;
2221
2222 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2223 if (s == NULL)
2224 s = sec;
2225
2226 /* Beware of type punned pointers vs strict aliasing
2227 rules. */
2228 vpp = &(elf_section_data (s)->local_dynrel);
2229 head = (struct elf_dyn_relocs **)vpp;
2230 }
2231
2232 p = *head;
2233 if (p == NULL || p->sec != sec)
2234 {
2235 bfd_size_type amt = sizeof *p;
2236
2237 p = ((struct elf_dyn_relocs *)
2238 bfd_alloc (htab->elf.dynobj, amt));
2239 if (p == NULL)
2240 goto error_return;
2241 p->next = *head;
2242 *head = p;
2243 p->sec = sec;
2244 p->count = 0;
2245 p->pc_count = 0;
2246 }
2247
2248 p->count += 1;
2249 /* Count size relocation as PC-relative relocation. */
2250 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2251 p->pc_count += 1;
2252 }
2253 break;
2254
2255 /* This relocation describes the C++ object vtable hierarchy.
2256 Reconstruct it for later use during GC. */
2257 case R_X86_64_GNU_VTINHERIT:
2258 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2259 goto error_return;
2260 break;
2261
2262 /* This relocation describes which C++ vtable entries are actually
2263 used. Record for later use during GC. */
2264 case R_X86_64_GNU_VTENTRY:
2265 BFD_ASSERT (h != NULL);
2266 if (h != NULL
2267 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2268 goto error_return;
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275
2276 if (elf_section_data (sec)->this_hdr.contents != contents)
2277 {
2278 if (!converted && !info->keep_memory)
2279 free (contents);
2280 else
2281 {
2282 /* Cache the section contents for elf_link_input_bfd if any
2283 load is converted or --no-keep-memory isn't used. */
2284 elf_section_data (sec)->this_hdr.contents = contents;
2285 }
2286 }
2287
2288 /* Cache relocations if any load is converted. */
2289 if (elf_section_data (sec)->relocs != relocs && converted)
2290 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2291
2292 return TRUE;
2293
2294 error_return:
2295 if (elf_section_data (sec)->this_hdr.contents != contents)
2296 free (contents);
2297 sec->check_relocs_failed = 1;
2298 return FALSE;
2299 }
2300
2301 /* Return the relocation value for @tpoff relocation
2302 if STT_TLS virtual address is ADDRESS. */
2303
2304 static bfd_vma
2305 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2306 {
2307 struct elf_link_hash_table *htab = elf_hash_table (info);
2308 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2309 bfd_vma static_tls_size;
2310
2311 /* If tls_segment is NULL, we should have signalled an error already. */
2312 if (htab->tls_sec == NULL)
2313 return 0;
2314
2315 /* Consider special static TLS alignment requirements. */
2316 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2317 return address - static_tls_size - htab->tls_sec->vma;
2318 }
2319
2320 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2321 branch? */
2322
2323 static bfd_boolean
2324 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2325 {
2326 /* Opcode Instruction
2327 0xe8 call
2328 0xe9 jump
2329 0x0f 0x8x conditional jump */
2330 return ((offset > 0
2331 && (contents [offset - 1] == 0xe8
2332 || contents [offset - 1] == 0xe9))
2333 || (offset > 1
2334 && contents [offset - 2] == 0x0f
2335 && (contents [offset - 1] & 0xf0) == 0x80));
2336 }
2337
2338 /* Relocate an x86_64 ELF section. */
2339
2340 static bfd_boolean
2341 elf_x86_64_relocate_section (bfd *output_bfd,
2342 struct bfd_link_info *info,
2343 bfd *input_bfd,
2344 asection *input_section,
2345 bfd_byte *contents,
2346 Elf_Internal_Rela *relocs,
2347 Elf_Internal_Sym *local_syms,
2348 asection **local_sections)
2349 {
2350 struct elf_x86_link_hash_table *htab;
2351 Elf_Internal_Shdr *symtab_hdr;
2352 struct elf_link_hash_entry **sym_hashes;
2353 bfd_vma *local_got_offsets;
2354 bfd_vma *local_tlsdesc_gotents;
2355 Elf_Internal_Rela *rel;
2356 Elf_Internal_Rela *wrel;
2357 Elf_Internal_Rela *relend;
2358 unsigned int plt_entry_size;
2359
2360 /* Skip if check_relocs failed. */
2361 if (input_section->check_relocs_failed)
2362 return FALSE;
2363
2364 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2365 if (htab == NULL)
2366 return FALSE;
2367
2368 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2369
2370 plt_entry_size = htab->plt.plt_entry_size;
2371 symtab_hdr = &elf_symtab_hdr (input_bfd);
2372 sym_hashes = elf_sym_hashes (input_bfd);
2373 local_got_offsets = elf_local_got_offsets (input_bfd);
2374 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2375
2376 _bfd_x86_elf_set_tls_module_base (info);
2377
2378 rel = wrel = relocs;
2379 relend = relocs + input_section->reloc_count;
2380 for (; rel < relend; wrel++, rel++)
2381 {
2382 unsigned int r_type, r_type_tls;
2383 reloc_howto_type *howto;
2384 unsigned long r_symndx;
2385 struct elf_link_hash_entry *h;
2386 struct elf_x86_link_hash_entry *eh;
2387 Elf_Internal_Sym *sym;
2388 asection *sec;
2389 bfd_vma off, offplt, plt_offset;
2390 bfd_vma relocation;
2391 bfd_boolean unresolved_reloc;
2392 bfd_reloc_status_type r;
2393 int tls_type;
2394 asection *base_got, *resolved_plt;
2395 bfd_vma st_size;
2396 bfd_boolean resolved_to_zero;
2397 bfd_boolean relative_reloc;
2398 bfd_boolean converted_reloc;
2399 bfd_boolean need_copy_reloc_in_pie;
2400
2401 r_type = ELF32_R_TYPE (rel->r_info);
2402 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2403 || r_type == (int) R_X86_64_GNU_VTENTRY)
2404 {
2405 if (wrel != rel)
2406 *wrel = *rel;
2407 continue;
2408 }
2409
2410 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2411 r_type &= ~R_X86_64_converted_reloc_bit;
2412
2413 if (r_type >= (int) R_X86_64_standard)
2414 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2415
2416 if (r_type != (int) R_X86_64_32
2417 || ABI_64_P (output_bfd))
2418 howto = x86_64_elf_howto_table + r_type;
2419 else
2420 howto = (x86_64_elf_howto_table
2421 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2422 r_symndx = htab->r_sym (rel->r_info);
2423 h = NULL;
2424 sym = NULL;
2425 sec = NULL;
2426 unresolved_reloc = FALSE;
2427 if (r_symndx < symtab_hdr->sh_info)
2428 {
2429 sym = local_syms + r_symndx;
2430 sec = local_sections[r_symndx];
2431
2432 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2433 &sec, rel);
2434 st_size = sym->st_size;
2435
2436 /* Relocate against local STT_GNU_IFUNC symbol. */
2437 if (!bfd_link_relocatable (info)
2438 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2439 {
2440 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2441 rel, FALSE);
2442 if (h == NULL)
2443 abort ();
2444
2445 /* Set STT_GNU_IFUNC symbol value. */
2446 h->root.u.def.value = sym->st_value;
2447 h->root.u.def.section = sec;
2448 }
2449 }
2450 else
2451 {
2452 bfd_boolean warned ATTRIBUTE_UNUSED;
2453 bfd_boolean ignored ATTRIBUTE_UNUSED;
2454
2455 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2456 r_symndx, symtab_hdr, sym_hashes,
2457 h, sec, relocation,
2458 unresolved_reloc, warned, ignored);
2459 st_size = h->size;
2460 }
2461
2462 if (sec != NULL && discarded_section (sec))
2463 {
2464 _bfd_clear_contents (howto, input_bfd, input_section,
2465 contents + rel->r_offset);
2466 wrel->r_offset = rel->r_offset;
2467 wrel->r_info = 0;
2468 wrel->r_addend = 0;
2469
2470 /* For ld -r, remove relocations in debug sections against
2471 sections defined in discarded sections. Not done for
2472 eh_frame editing code expects to be present. */
2473 if (bfd_link_relocatable (info)
2474 && (input_section->flags & SEC_DEBUGGING))
2475 wrel--;
2476
2477 continue;
2478 }
2479
2480 if (bfd_link_relocatable (info))
2481 {
2482 if (wrel != rel)
2483 *wrel = *rel;
2484 continue;
2485 }
2486
2487 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2488 {
2489 if (r_type == R_X86_64_64)
2490 {
2491 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2492 zero-extend it to 64bit if addend is zero. */
2493 r_type = R_X86_64_32;
2494 memset (contents + rel->r_offset + 4, 0, 4);
2495 }
2496 else if (r_type == R_X86_64_SIZE64)
2497 {
2498 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2499 zero-extend it to 64bit if addend is zero. */
2500 r_type = R_X86_64_SIZE32;
2501 memset (contents + rel->r_offset + 4, 0, 4);
2502 }
2503 }
2504
2505 eh = (struct elf_x86_link_hash_entry *) h;
2506
2507 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2508 it here if it is defined in a non-shared object. */
2509 if (h != NULL
2510 && h->type == STT_GNU_IFUNC
2511 && h->def_regular)
2512 {
2513 bfd_vma plt_index;
2514 const char *name;
2515
2516 if ((input_section->flags & SEC_ALLOC) == 0)
2517 {
2518 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2519 sections because such sections are not SEC_ALLOC and
2520 thus ld.so will not process them. */
2521 if ((input_section->flags & SEC_DEBUGGING) != 0)
2522 continue;
2523 abort ();
2524 }
2525
2526 switch (r_type)
2527 {
2528 default:
2529 break;
2530
2531 case R_X86_64_GOTPCREL:
2532 case R_X86_64_GOTPCRELX:
2533 case R_X86_64_REX_GOTPCRELX:
2534 case R_X86_64_GOTPCREL64:
2535 base_got = htab->elf.sgot;
2536 off = h->got.offset;
2537
2538 if (base_got == NULL)
2539 abort ();
2540
2541 if (off == (bfd_vma) -1)
2542 {
2543 /* We can't use h->got.offset here to save state, or
2544 even just remember the offset, as finish_dynamic_symbol
2545 would use that as offset into .got. */
2546
2547 if (h->plt.offset == (bfd_vma) -1)
2548 abort ();
2549
2550 if (htab->elf.splt != NULL)
2551 {
2552 plt_index = (h->plt.offset / plt_entry_size
2553 - htab->plt.has_plt0);
2554 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2555 base_got = htab->elf.sgotplt;
2556 }
2557 else
2558 {
2559 plt_index = h->plt.offset / plt_entry_size;
2560 off = plt_index * GOT_ENTRY_SIZE;
2561 base_got = htab->elf.igotplt;
2562 }
2563
2564 if (h->dynindx == -1
2565 || h->forced_local
2566 || info->symbolic)
2567 {
2568 /* This references the local defitionion. We must
2569 initialize this entry in the global offset table.
2570 Since the offset must always be a multiple of 8,
2571 we use the least significant bit to record
2572 whether we have initialized it already.
2573
2574 When doing a dynamic link, we create a .rela.got
2575 relocation entry to initialize the value. This
2576 is done in the finish_dynamic_symbol routine. */
2577 if ((off & 1) != 0)
2578 off &= ~1;
2579 else
2580 {
2581 bfd_put_64 (output_bfd, relocation,
2582 base_got->contents + off);
2583 /* Note that this is harmless for the GOTPLT64
2584 case, as -1 | 1 still is -1. */
2585 h->got.offset |= 1;
2586 }
2587 }
2588 }
2589
2590 relocation = (base_got->output_section->vma
2591 + base_got->output_offset + off);
2592
2593 goto do_relocation;
2594 }
2595
2596 if (h->plt.offset == (bfd_vma) -1)
2597 {
2598 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2599 if (r_type == htab->pointer_r_type
2600 && (input_section->flags & SEC_CODE) == 0)
2601 goto do_ifunc_pointer;
2602 goto bad_ifunc_reloc;
2603 }
2604
2605 /* STT_GNU_IFUNC symbol must go through PLT. */
2606 if (htab->elf.splt != NULL)
2607 {
2608 if (htab->plt_second != NULL)
2609 {
2610 resolved_plt = htab->plt_second;
2611 plt_offset = eh->plt_second.offset;
2612 }
2613 else
2614 {
2615 resolved_plt = htab->elf.splt;
2616 plt_offset = h->plt.offset;
2617 }
2618 }
2619 else
2620 {
2621 resolved_plt = htab->elf.iplt;
2622 plt_offset = h->plt.offset;
2623 }
2624
2625 relocation = (resolved_plt->output_section->vma
2626 + resolved_plt->output_offset + plt_offset);
2627
2628 switch (r_type)
2629 {
2630 default:
2631 bad_ifunc_reloc:
2632 if (h->root.root.string)
2633 name = h->root.root.string;
2634 else
2635 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2636 NULL);
2637 _bfd_error_handler
2638 /* xgettext:c-format */
2639 (_("%B: relocation %s against STT_GNU_IFUNC "
2640 "symbol `%s' isn't supported"), input_bfd,
2641 howto->name, name);
2642 bfd_set_error (bfd_error_bad_value);
2643 return FALSE;
2644
2645 case R_X86_64_32S:
2646 if (bfd_link_pic (info))
2647 abort ();
2648 goto do_relocation;
2649
2650 case R_X86_64_32:
2651 if (ABI_64_P (output_bfd))
2652 goto do_relocation;
2653 /* FALLTHROUGH */
2654 case R_X86_64_64:
2655 do_ifunc_pointer:
2656 if (rel->r_addend != 0)
2657 {
2658 if (h->root.root.string)
2659 name = h->root.root.string;
2660 else
2661 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2662 sym, NULL);
2663 _bfd_error_handler
2664 /* xgettext:c-format */
2665 (_("%B: relocation %s against STT_GNU_IFUNC "
2666 "symbol `%s' has non-zero addend: %Ld"),
2667 input_bfd, howto->name, name, rel->r_addend);
2668 bfd_set_error (bfd_error_bad_value);
2669 return FALSE;
2670 }
2671
2672 /* Generate dynamic relcoation only when there is a
2673 non-GOT reference in a shared object or there is no
2674 PLT. */
2675 if ((bfd_link_pic (info) && h->non_got_ref)
2676 || h->plt.offset == (bfd_vma) -1)
2677 {
2678 Elf_Internal_Rela outrel;
2679 asection *sreloc;
2680
2681 /* Need a dynamic relocation to get the real function
2682 address. */
2683 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2684 info,
2685 input_section,
2686 rel->r_offset);
2687 if (outrel.r_offset == (bfd_vma) -1
2688 || outrel.r_offset == (bfd_vma) -2)
2689 abort ();
2690
2691 outrel.r_offset += (input_section->output_section->vma
2692 + input_section->output_offset);
2693
2694 if (POINTER_LOCAL_IFUNC_P (info, h))
2695 {
2696 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2697 h->root.root.string,
2698 h->root.u.def.section->owner);
2699
2700 /* This symbol is resolved locally. */
2701 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2702 outrel.r_addend = (h->root.u.def.value
2703 + h->root.u.def.section->output_section->vma
2704 + h->root.u.def.section->output_offset);
2705 }
2706 else
2707 {
2708 outrel.r_info = htab->r_info (h->dynindx, r_type);
2709 outrel.r_addend = 0;
2710 }
2711
2712 /* Dynamic relocations are stored in
2713 1. .rela.ifunc section in PIC object.
2714 2. .rela.got section in dynamic executable.
2715 3. .rela.iplt section in static executable. */
2716 if (bfd_link_pic (info))
2717 sreloc = htab->elf.irelifunc;
2718 else if (htab->elf.splt != NULL)
2719 sreloc = htab->elf.srelgot;
2720 else
2721 sreloc = htab->elf.irelplt;
2722 elf_append_rela (output_bfd, sreloc, &outrel);
2723
2724 /* If this reloc is against an external symbol, we
2725 do not want to fiddle with the addend. Otherwise,
2726 we need to include the symbol value so that it
2727 becomes an addend for the dynamic reloc. For an
2728 internal symbol, we have updated addend. */
2729 continue;
2730 }
2731 /* FALLTHROUGH */
2732 case R_X86_64_PC32:
2733 case R_X86_64_PC32_BND:
2734 case R_X86_64_PC64:
2735 case R_X86_64_PLT32:
2736 case R_X86_64_PLT32_BND:
2737 goto do_relocation;
2738 }
2739 }
2740
2741 resolved_to_zero = (eh != NULL
2742 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2743
2744 /* When generating a shared object, the relocations handled here are
2745 copied into the output file to be resolved at run time. */
2746 switch (r_type)
2747 {
2748 case R_X86_64_GOT32:
2749 case R_X86_64_GOT64:
2750 /* Relocation is to the entry for this symbol in the global
2751 offset table. */
2752 case R_X86_64_GOTPCREL:
2753 case R_X86_64_GOTPCRELX:
2754 case R_X86_64_REX_GOTPCRELX:
2755 case R_X86_64_GOTPCREL64:
2756 /* Use global offset table entry as symbol value. */
2757 case R_X86_64_GOTPLT64:
2758 /* This is obsolete and treated the same as GOT64. */
2759 base_got = htab->elf.sgot;
2760
2761 if (htab->elf.sgot == NULL)
2762 abort ();
2763
2764 relative_reloc = FALSE;
2765 if (h != NULL)
2766 {
2767 off = h->got.offset;
2768 if (h->needs_plt
2769 && h->plt.offset != (bfd_vma)-1
2770 && off == (bfd_vma)-1)
2771 {
2772 /* We can't use h->got.offset here to save
2773 state, or even just remember the offset, as
2774 finish_dynamic_symbol would use that as offset into
2775 .got. */
2776 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2777 - htab->plt.has_plt0);
2778 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2779 base_got = htab->elf.sgotplt;
2780 }
2781
2782 if (RESOLVED_LOCALLY_P (info, h, htab))
2783 {
2784 /* We must initialize this entry in the global offset
2785 table. Since the offset must always be a multiple
2786 of 8, we use the least significant bit to record
2787 whether we have initialized it already.
2788
2789 When doing a dynamic link, we create a .rela.got
2790 relocation entry to initialize the value. This is
2791 done in the finish_dynamic_symbol routine. */
2792 if ((off & 1) != 0)
2793 off &= ~1;
2794 else
2795 {
2796 bfd_put_64 (output_bfd, relocation,
2797 base_got->contents + off);
2798 /* Note that this is harmless for the GOTPLT64 case,
2799 as -1 | 1 still is -1. */
2800 h->got.offset |= 1;
2801
2802 if (GENERATE_RELATIVE_RELOC_P (info, h))
2803 {
2804 /* If this symbol isn't dynamic in PIC,
2805 generate R_X86_64_RELATIVE here. */
2806 eh->no_finish_dynamic_symbol = 1;
2807 relative_reloc = TRUE;
2808 }
2809 }
2810 }
2811 else
2812 unresolved_reloc = FALSE;
2813 }
2814 else
2815 {
2816 if (local_got_offsets == NULL)
2817 abort ();
2818
2819 off = local_got_offsets[r_symndx];
2820
2821 /* The offset must always be a multiple of 8. We use
2822 the least significant bit to record whether we have
2823 already generated the necessary reloc. */
2824 if ((off & 1) != 0)
2825 off &= ~1;
2826 else
2827 {
2828 bfd_put_64 (output_bfd, relocation,
2829 base_got->contents + off);
2830 local_got_offsets[r_symndx] |= 1;
2831
2832 if (bfd_link_pic (info))
2833 relative_reloc = TRUE;
2834 }
2835 }
2836
2837 if (relative_reloc)
2838 {
2839 asection *s;
2840 Elf_Internal_Rela outrel;
2841
2842 /* We need to generate a R_X86_64_RELATIVE reloc
2843 for the dynamic linker. */
2844 s = htab->elf.srelgot;
2845 if (s == NULL)
2846 abort ();
2847
2848 outrel.r_offset = (base_got->output_section->vma
2849 + base_got->output_offset
2850 + off);
2851 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2852 outrel.r_addend = relocation;
2853 elf_append_rela (output_bfd, s, &outrel);
2854 }
2855
2856 if (off >= (bfd_vma) -2)
2857 abort ();
2858
2859 relocation = base_got->output_section->vma
2860 + base_got->output_offset + off;
2861 if (r_type != R_X86_64_GOTPCREL
2862 && r_type != R_X86_64_GOTPCRELX
2863 && r_type != R_X86_64_REX_GOTPCRELX
2864 && r_type != R_X86_64_GOTPCREL64)
2865 relocation -= htab->elf.sgotplt->output_section->vma
2866 - htab->elf.sgotplt->output_offset;
2867
2868 break;
2869
2870 case R_X86_64_GOTOFF64:
2871 /* Relocation is relative to the start of the global offset
2872 table. */
2873
2874 /* Check to make sure it isn't a protected function or data
2875 symbol for shared library since it may not be local when
2876 used as function address or with copy relocation. We also
2877 need to make sure that a symbol is referenced locally. */
2878 if (bfd_link_pic (info) && h)
2879 {
2880 if (!h->def_regular)
2881 {
2882 const char *v;
2883
2884 switch (ELF_ST_VISIBILITY (h->other))
2885 {
2886 case STV_HIDDEN:
2887 v = _("hidden symbol");
2888 break;
2889 case STV_INTERNAL:
2890 v = _("internal symbol");
2891 break;
2892 case STV_PROTECTED:
2893 v = _("protected symbol");
2894 break;
2895 default:
2896 v = _("symbol");
2897 break;
2898 }
2899
2900 _bfd_error_handler
2901 /* xgettext:c-format */
2902 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2903 " `%s' can not be used when making a shared object"),
2904 input_bfd, v, h->root.root.string);
2905 bfd_set_error (bfd_error_bad_value);
2906 return FALSE;
2907 }
2908 else if (!bfd_link_executable (info)
2909 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2910 && (h->type == STT_FUNC
2911 || h->type == STT_OBJECT)
2912 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2913 {
2914 _bfd_error_handler
2915 /* xgettext:c-format */
2916 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2917 " `%s' can not be used when making a shared object"),
2918 input_bfd,
2919 h->type == STT_FUNC ? "function" : "data",
2920 h->root.root.string);
2921 bfd_set_error (bfd_error_bad_value);
2922 return FALSE;
2923 }
2924 }
2925
2926 /* Note that sgot is not involved in this
2927 calculation. We always want the start of .got.plt. If we
2928 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2929 permitted by the ABI, we might have to change this
2930 calculation. */
2931 relocation -= htab->elf.sgotplt->output_section->vma
2932 + htab->elf.sgotplt->output_offset;
2933 break;
2934
2935 case R_X86_64_GOTPC32:
2936 case R_X86_64_GOTPC64:
2937 /* Use global offset table as symbol value. */
2938 relocation = htab->elf.sgotplt->output_section->vma
2939 + htab->elf.sgotplt->output_offset;
2940 unresolved_reloc = FALSE;
2941 break;
2942
2943 case R_X86_64_PLTOFF64:
2944 /* Relocation is PLT entry relative to GOT. For local
2945 symbols it's the symbol itself relative to GOT. */
2946 if (h != NULL
2947 /* See PLT32 handling. */
2948 && (h->plt.offset != (bfd_vma) -1
2949 || eh->plt_got.offset != (bfd_vma) -1)
2950 && htab->elf.splt != NULL)
2951 {
2952 if (eh->plt_got.offset != (bfd_vma) -1)
2953 {
2954 /* Use the GOT PLT. */
2955 resolved_plt = htab->plt_got;
2956 plt_offset = eh->plt_got.offset;
2957 }
2958 else if (htab->plt_second != NULL)
2959 {
2960 resolved_plt = htab->plt_second;
2961 plt_offset = eh->plt_second.offset;
2962 }
2963 else
2964 {
2965 resolved_plt = htab->elf.splt;
2966 plt_offset = h->plt.offset;
2967 }
2968
2969 relocation = (resolved_plt->output_section->vma
2970 + resolved_plt->output_offset
2971 + plt_offset);
2972 unresolved_reloc = FALSE;
2973 }
2974
2975 relocation -= htab->elf.sgotplt->output_section->vma
2976 + htab->elf.sgotplt->output_offset;
2977 break;
2978
2979 case R_X86_64_PLT32:
2980 case R_X86_64_PLT32_BND:
2981 /* Relocation is to the entry for this symbol in the
2982 procedure linkage table. */
2983
2984 /* Resolve a PLT32 reloc against a local symbol directly,
2985 without using the procedure linkage table. */
2986 if (h == NULL)
2987 break;
2988
2989 if ((h->plt.offset == (bfd_vma) -1
2990 && eh->plt_got.offset == (bfd_vma) -1)
2991 || htab->elf.splt == NULL)
2992 {
2993 /* We didn't make a PLT entry for this symbol. This
2994 happens when statically linking PIC code, or when
2995 using -Bsymbolic. */
2996 break;
2997 }
2998
2999 if (h->plt.offset != (bfd_vma) -1)
3000 {
3001 if (htab->plt_second != NULL)
3002 {
3003 resolved_plt = htab->plt_second;
3004 plt_offset = eh->plt_second.offset;
3005 }
3006 else
3007 {
3008 resolved_plt = htab->elf.splt;
3009 plt_offset = h->plt.offset;
3010 }
3011 }
3012 else
3013 {
3014 /* Use the GOT PLT. */
3015 resolved_plt = htab->plt_got;
3016 plt_offset = eh->plt_got.offset;
3017 }
3018
3019 relocation = (resolved_plt->output_section->vma
3020 + resolved_plt->output_offset
3021 + plt_offset);
3022 unresolved_reloc = FALSE;
3023 break;
3024
3025 case R_X86_64_SIZE32:
3026 case R_X86_64_SIZE64:
3027 /* Set to symbol size. */
3028 relocation = st_size;
3029 goto direct;
3030
3031 case R_X86_64_PC8:
3032 case R_X86_64_PC16:
3033 case R_X86_64_PC32:
3034 case R_X86_64_PC32_BND:
3035 /* Don't complain about -fPIC if the symbol is undefined when
3036 building executable unless it is unresolved weak symbol or
3037 -z nocopyreloc is used. */
3038 if ((input_section->flags & SEC_ALLOC) != 0
3039 && (input_section->flags & SEC_READONLY) != 0
3040 && h != NULL
3041 && ((bfd_link_executable (info)
3042 && ((h->root.type == bfd_link_hash_undefweak
3043 && !resolved_to_zero)
3044 || ((info->nocopyreloc
3045 || (eh->def_protected
3046 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3047 && h->def_dynamic
3048 && !(h->root.u.def.section->flags & SEC_CODE))))
3049 || bfd_link_dll (info)))
3050 {
3051 bfd_boolean fail = FALSE;
3052 bfd_boolean branch
3053 = ((r_type == R_X86_64_PC32
3054 || r_type == R_X86_64_PC32_BND)
3055 && is_32bit_relative_branch (contents, rel->r_offset));
3056
3057 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3058 {
3059 /* Symbol is referenced locally. Make sure it is
3060 defined locally or for a branch. */
3061 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3062 && !branch);
3063 }
3064 else if (!(bfd_link_pie (info)
3065 && (h->needs_copy || eh->needs_copy)))
3066 {
3067 /* Symbol doesn't need copy reloc and isn't referenced
3068 locally. We only allow branch to symbol with
3069 non-default visibility. */
3070 fail = (!branch
3071 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3072 }
3073
3074 if (fail)
3075 return elf_x86_64_need_pic (info, input_bfd, input_section,
3076 h, NULL, NULL, howto);
3077 }
3078 /* Fall through. */
3079
3080 case R_X86_64_8:
3081 case R_X86_64_16:
3082 case R_X86_64_32:
3083 case R_X86_64_PC64:
3084 case R_X86_64_64:
3085 /* FIXME: The ABI says the linker should make sure the value is
3086 the same when it's zeroextended to 64 bit. */
3087
3088 direct:
3089 if ((input_section->flags & SEC_ALLOC) == 0)
3090 break;
3091
3092 need_copy_reloc_in_pie = (bfd_link_pie (info)
3093 && h != NULL
3094 && (h->needs_copy
3095 || eh->needs_copy
3096 || (h->root.type
3097 == bfd_link_hash_undefined))
3098 && (X86_PCREL_TYPE_P (r_type)
3099 || X86_SIZE_TYPE_P (r_type)));
3100
3101 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3102 need_copy_reloc_in_pie,
3103 resolved_to_zero, FALSE))
3104 {
3105 Elf_Internal_Rela outrel;
3106 bfd_boolean skip, relocate;
3107 asection *sreloc;
3108
3109 /* When generating a shared object, these relocations
3110 are copied into the output file to be resolved at run
3111 time. */
3112 skip = FALSE;
3113 relocate = FALSE;
3114
3115 outrel.r_offset =
3116 _bfd_elf_section_offset (output_bfd, info, input_section,
3117 rel->r_offset);
3118 if (outrel.r_offset == (bfd_vma) -1)
3119 skip = TRUE;
3120 else if (outrel.r_offset == (bfd_vma) -2)
3121 skip = TRUE, relocate = TRUE;
3122
3123 outrel.r_offset += (input_section->output_section->vma
3124 + input_section->output_offset);
3125
3126 if (skip)
3127 memset (&outrel, 0, sizeof outrel);
3128
3129 /* h->dynindx may be -1 if this symbol was marked to
3130 become local. */
3131 else if (h != NULL
3132 && h->dynindx != -1
3133 && (X86_PCREL_TYPE_P (r_type)
3134 || !(bfd_link_executable (info)
3135 || SYMBOLIC_BIND (info, h))
3136 || ! h->def_regular))
3137 {
3138 outrel.r_info = htab->r_info (h->dynindx, r_type);
3139 outrel.r_addend = rel->r_addend;
3140 }
3141 else
3142 {
3143 /* This symbol is local, or marked to become local.
3144 When relocation overflow check is disabled, we
3145 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3146 if (r_type == htab->pointer_r_type
3147 || (r_type == R_X86_64_32
3148 && info->no_reloc_overflow_check))
3149 {
3150 relocate = TRUE;
3151 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3152 outrel.r_addend = relocation + rel->r_addend;
3153 }
3154 else if (r_type == R_X86_64_64
3155 && !ABI_64_P (output_bfd))
3156 {
3157 relocate = TRUE;
3158 outrel.r_info = htab->r_info (0,
3159 R_X86_64_RELATIVE64);
3160 outrel.r_addend = relocation + rel->r_addend;
3161 /* Check addend overflow. */
3162 if ((outrel.r_addend & 0x80000000)
3163 != (rel->r_addend & 0x80000000))
3164 {
3165 const char *name;
3166 int addend = rel->r_addend;
3167 if (h && h->root.root.string)
3168 name = h->root.root.string;
3169 else
3170 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3171 sym, NULL);
3172 _bfd_error_handler
3173 /* xgettext:c-format */
3174 (_("%B: addend %s%#x in relocation %s against "
3175 "symbol `%s' at %#Lx in section `%A' is "
3176 "out of range"),
3177 input_bfd, addend < 0 ? "-" : "", addend,
3178 howto->name, name, rel->r_offset, input_section);
3179 bfd_set_error (bfd_error_bad_value);
3180 return FALSE;
3181 }
3182 }
3183 else
3184 {
3185 long sindx;
3186
3187 if (bfd_is_abs_section (sec))
3188 sindx = 0;
3189 else if (sec == NULL || sec->owner == NULL)
3190 {
3191 bfd_set_error (bfd_error_bad_value);
3192 return FALSE;
3193 }
3194 else
3195 {
3196 asection *osec;
3197
3198 /* We are turning this relocation into one
3199 against a section symbol. It would be
3200 proper to subtract the symbol's value,
3201 osec->vma, from the emitted reloc addend,
3202 but ld.so expects buggy relocs. */
3203 osec = sec->output_section;
3204 sindx = elf_section_data (osec)->dynindx;
3205 if (sindx == 0)
3206 {
3207 asection *oi = htab->elf.text_index_section;
3208 sindx = elf_section_data (oi)->dynindx;
3209 }
3210 BFD_ASSERT (sindx != 0);
3211 }
3212
3213 outrel.r_info = htab->r_info (sindx, r_type);
3214 outrel.r_addend = relocation + rel->r_addend;
3215 }
3216 }
3217
3218 sreloc = elf_section_data (input_section)->sreloc;
3219
3220 if (sreloc == NULL || sreloc->contents == NULL)
3221 {
3222 r = bfd_reloc_notsupported;
3223 goto check_relocation_error;
3224 }
3225
3226 elf_append_rela (output_bfd, sreloc, &outrel);
3227
3228 /* If this reloc is against an external symbol, we do
3229 not want to fiddle with the addend. Otherwise, we
3230 need to include the symbol value so that it becomes
3231 an addend for the dynamic reloc. */
3232 if (! relocate)
3233 continue;
3234 }
3235
3236 break;
3237
3238 case R_X86_64_TLSGD:
3239 case R_X86_64_GOTPC32_TLSDESC:
3240 case R_X86_64_TLSDESC_CALL:
3241 case R_X86_64_GOTTPOFF:
3242 tls_type = GOT_UNKNOWN;
3243 if (h == NULL && local_got_offsets)
3244 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3245 else if (h != NULL)
3246 tls_type = elf_x86_hash_entry (h)->tls_type;
3247
3248 r_type_tls = r_type;
3249 if (! elf_x86_64_tls_transition (info, input_bfd,
3250 input_section, contents,
3251 symtab_hdr, sym_hashes,
3252 &r_type_tls, tls_type, rel,
3253 relend, h, r_symndx, TRUE))
3254 return FALSE;
3255
3256 if (r_type_tls == R_X86_64_TPOFF32)
3257 {
3258 bfd_vma roff = rel->r_offset;
3259
3260 BFD_ASSERT (! unresolved_reloc);
3261
3262 if (r_type == R_X86_64_TLSGD)
3263 {
3264 /* GD->LE transition. For 64bit, change
3265 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3266 .word 0x6666; rex64; call __tls_get_addr@PLT
3267 or
3268 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3269 .byte 0x66; rex64
3270 call *__tls_get_addr@GOTPCREL(%rip)
3271 which may be converted to
3272 addr32 call __tls_get_addr
3273 into:
3274 movq %fs:0, %rax
3275 leaq foo@tpoff(%rax), %rax
3276 For 32bit, change
3277 leaq foo@tlsgd(%rip), %rdi
3278 .word 0x6666; rex64; call __tls_get_addr@PLT
3279 or
3280 leaq foo@tlsgd(%rip), %rdi
3281 .byte 0x66; rex64
3282 call *__tls_get_addr@GOTPCREL(%rip)
3283 which may be converted to
3284 addr32 call __tls_get_addr
3285 into:
3286 movl %fs:0, %eax
3287 leaq foo@tpoff(%rax), %rax
3288 For largepic, change:
3289 leaq foo@tlsgd(%rip), %rdi
3290 movabsq $__tls_get_addr@pltoff, %rax
3291 addq %r15, %rax
3292 call *%rax
3293 into:
3294 movq %fs:0, %rax
3295 leaq foo@tpoff(%rax), %rax
3296 nopw 0x0(%rax,%rax,1) */
3297 int largepic = 0;
3298 if (ABI_64_P (output_bfd))
3299 {
3300 if (contents[roff + 5] == 0xb8)
3301 {
3302 memcpy (contents + roff - 3,
3303 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3304 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3305 largepic = 1;
3306 }
3307 else
3308 memcpy (contents + roff - 4,
3309 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3310 16);
3311 }
3312 else
3313 memcpy (contents + roff - 3,
3314 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3315 15);
3316 bfd_put_32 (output_bfd,
3317 elf_x86_64_tpoff (info, relocation),
3318 contents + roff + 8 + largepic);
3319 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3320 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3321 rel++;
3322 wrel++;
3323 continue;
3324 }
3325 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3326 {
3327 /* GDesc -> LE transition.
3328 It's originally something like:
3329 leaq x@tlsdesc(%rip), %rax
3330
3331 Change it to:
3332 movl $x@tpoff, %rax. */
3333
3334 unsigned int val, type;
3335
3336 type = bfd_get_8 (input_bfd, contents + roff - 3);
3337 val = bfd_get_8 (input_bfd, contents + roff - 1);
3338 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3339 contents + roff - 3);
3340 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3341 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3342 contents + roff - 1);
3343 bfd_put_32 (output_bfd,
3344 elf_x86_64_tpoff (info, relocation),
3345 contents + roff);
3346 continue;
3347 }
3348 else if (r_type == R_X86_64_TLSDESC_CALL)
3349 {
3350 /* GDesc -> LE transition.
3351 It's originally:
3352 call *(%rax)
3353 Turn it into:
3354 xchg %ax,%ax. */
3355 bfd_put_8 (output_bfd, 0x66, contents + roff);
3356 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3357 continue;
3358 }
3359 else if (r_type == R_X86_64_GOTTPOFF)
3360 {
3361 /* IE->LE transition:
3362 For 64bit, originally it can be one of:
3363 movq foo@gottpoff(%rip), %reg
3364 addq foo@gottpoff(%rip), %reg
3365 We change it into:
3366 movq $foo, %reg
3367 leaq foo(%reg), %reg
3368 addq $foo, %reg.
3369 For 32bit, originally it can be one of:
3370 movq foo@gottpoff(%rip), %reg
3371 addl foo@gottpoff(%rip), %reg
3372 We change it into:
3373 movq $foo, %reg
3374 leal foo(%reg), %reg
3375 addl $foo, %reg. */
3376
3377 unsigned int val, type, reg;
3378
3379 if (roff >= 3)
3380 val = bfd_get_8 (input_bfd, contents + roff - 3);
3381 else
3382 val = 0;
3383 type = bfd_get_8 (input_bfd, contents + roff - 2);
3384 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3385 reg >>= 3;
3386 if (type == 0x8b)
3387 {
3388 /* movq */
3389 if (val == 0x4c)
3390 bfd_put_8 (output_bfd, 0x49,
3391 contents + roff - 3);
3392 else if (!ABI_64_P (output_bfd) && val == 0x44)
3393 bfd_put_8 (output_bfd, 0x41,
3394 contents + roff - 3);
3395 bfd_put_8 (output_bfd, 0xc7,
3396 contents + roff - 2);
3397 bfd_put_8 (output_bfd, 0xc0 | reg,
3398 contents + roff - 1);
3399 }
3400 else if (reg == 4)
3401 {
3402 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3403 is special */
3404 if (val == 0x4c)
3405 bfd_put_8 (output_bfd, 0x49,
3406 contents + roff - 3);
3407 else if (!ABI_64_P (output_bfd) && val == 0x44)
3408 bfd_put_8 (output_bfd, 0x41,
3409 contents + roff - 3);
3410 bfd_put_8 (output_bfd, 0x81,
3411 contents + roff - 2);
3412 bfd_put_8 (output_bfd, 0xc0 | reg,
3413 contents + roff - 1);
3414 }
3415 else
3416 {
3417 /* addq/addl -> leaq/leal */
3418 if (val == 0x4c)
3419 bfd_put_8 (output_bfd, 0x4d,
3420 contents + roff - 3);
3421 else if (!ABI_64_P (output_bfd) && val == 0x44)
3422 bfd_put_8 (output_bfd, 0x45,
3423 contents + roff - 3);
3424 bfd_put_8 (output_bfd, 0x8d,
3425 contents + roff - 2);
3426 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3427 contents + roff - 1);
3428 }
3429 bfd_put_32 (output_bfd,
3430 elf_x86_64_tpoff (info, relocation),
3431 contents + roff);
3432 continue;
3433 }
3434 else
3435 BFD_ASSERT (FALSE);
3436 }
3437
3438 if (htab->elf.sgot == NULL)
3439 abort ();
3440
3441 if (h != NULL)
3442 {
3443 off = h->got.offset;
3444 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3445 }
3446 else
3447 {
3448 if (local_got_offsets == NULL)
3449 abort ();
3450
3451 off = local_got_offsets[r_symndx];
3452 offplt = local_tlsdesc_gotents[r_symndx];
3453 }
3454
3455 if ((off & 1) != 0)
3456 off &= ~1;
3457 else
3458 {
3459 Elf_Internal_Rela outrel;
3460 int dr_type, indx;
3461 asection *sreloc;
3462
3463 if (htab->elf.srelgot == NULL)
3464 abort ();
3465
3466 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3467
3468 if (GOT_TLS_GDESC_P (tls_type))
3469 {
3470 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3471 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3472 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3473 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3474 + htab->elf.sgotplt->output_offset
3475 + offplt
3476 + htab->sgotplt_jump_table_size);
3477 sreloc = htab->elf.srelplt;
3478 if (indx == 0)
3479 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3480 else
3481 outrel.r_addend = 0;
3482 elf_append_rela (output_bfd, sreloc, &outrel);
3483 }
3484
3485 sreloc = htab->elf.srelgot;
3486
3487 outrel.r_offset = (htab->elf.sgot->output_section->vma
3488 + htab->elf.sgot->output_offset + off);
3489
3490 if (GOT_TLS_GD_P (tls_type))
3491 dr_type = R_X86_64_DTPMOD64;
3492 else if (GOT_TLS_GDESC_P (tls_type))
3493 goto dr_done;
3494 else
3495 dr_type = R_X86_64_TPOFF64;
3496
3497 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3498 outrel.r_addend = 0;
3499 if ((dr_type == R_X86_64_TPOFF64
3500 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3501 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3502 outrel.r_info = htab->r_info (indx, dr_type);
3503
3504 elf_append_rela (output_bfd, sreloc, &outrel);
3505
3506 if (GOT_TLS_GD_P (tls_type))
3507 {
3508 if (indx == 0)
3509 {
3510 BFD_ASSERT (! unresolved_reloc);
3511 bfd_put_64 (output_bfd,
3512 relocation - _bfd_x86_elf_dtpoff_base (info),
3513 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3514 }
3515 else
3516 {
3517 bfd_put_64 (output_bfd, 0,
3518 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3519 outrel.r_info = htab->r_info (indx,
3520 R_X86_64_DTPOFF64);
3521 outrel.r_offset += GOT_ENTRY_SIZE;
3522 elf_append_rela (output_bfd, sreloc,
3523 &outrel);
3524 }
3525 }
3526
3527 dr_done:
3528 if (h != NULL)
3529 h->got.offset |= 1;
3530 else
3531 local_got_offsets[r_symndx] |= 1;
3532 }
3533
3534 if (off >= (bfd_vma) -2
3535 && ! GOT_TLS_GDESC_P (tls_type))
3536 abort ();
3537 if (r_type_tls == r_type)
3538 {
3539 if (r_type == R_X86_64_GOTPC32_TLSDESC
3540 || r_type == R_X86_64_TLSDESC_CALL)
3541 relocation = htab->elf.sgotplt->output_section->vma
3542 + htab->elf.sgotplt->output_offset
3543 + offplt + htab->sgotplt_jump_table_size;
3544 else
3545 relocation = htab->elf.sgot->output_section->vma
3546 + htab->elf.sgot->output_offset + off;
3547 unresolved_reloc = FALSE;
3548 }
3549 else
3550 {
3551 bfd_vma roff = rel->r_offset;
3552
3553 if (r_type == R_X86_64_TLSGD)
3554 {
3555 /* GD->IE transition. For 64bit, change
3556 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3557 .word 0x6666; rex64; call __tls_get_addr@PLT
3558 or
3559 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3560 .byte 0x66; rex64
3561 call *__tls_get_addr@GOTPCREL(%rip
3562 which may be converted to
3563 addr32 call __tls_get_addr
3564 into:
3565 movq %fs:0, %rax
3566 addq foo@gottpoff(%rip), %rax
3567 For 32bit, change
3568 leaq foo@tlsgd(%rip), %rdi
3569 .word 0x6666; rex64; call __tls_get_addr@PLT
3570 or
3571 leaq foo@tlsgd(%rip), %rdi
3572 .byte 0x66; rex64;
3573 call *__tls_get_addr@GOTPCREL(%rip)
3574 which may be converted to
3575 addr32 call __tls_get_addr
3576 into:
3577 movl %fs:0, %eax
3578 addq foo@gottpoff(%rip), %rax
3579 For largepic, change:
3580 leaq foo@tlsgd(%rip), %rdi
3581 movabsq $__tls_get_addr@pltoff, %rax
3582 addq %r15, %rax
3583 call *%rax
3584 into:
3585 movq %fs:0, %rax
3586 addq foo@gottpoff(%rax), %rax
3587 nopw 0x0(%rax,%rax,1) */
3588 int largepic = 0;
3589 if (ABI_64_P (output_bfd))
3590 {
3591 if (contents[roff + 5] == 0xb8)
3592 {
3593 memcpy (contents + roff - 3,
3594 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3595 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3596 largepic = 1;
3597 }
3598 else
3599 memcpy (contents + roff - 4,
3600 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3601 16);
3602 }
3603 else
3604 memcpy (contents + roff - 3,
3605 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3606 15);
3607
3608 relocation = (htab->elf.sgot->output_section->vma
3609 + htab->elf.sgot->output_offset + off
3610 - roff
3611 - largepic
3612 - input_section->output_section->vma
3613 - input_section->output_offset
3614 - 12);
3615 bfd_put_32 (output_bfd, relocation,
3616 contents + roff + 8 + largepic);
3617 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3618 rel++;
3619 wrel++;
3620 continue;
3621 }
3622 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3623 {
3624 /* GDesc -> IE transition.
3625 It's originally something like:
3626 leaq x@tlsdesc(%rip), %rax
3627
3628 Change it to:
3629 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3630
3631 /* Now modify the instruction as appropriate. To
3632 turn a leaq into a movq in the form we use it, it
3633 suffices to change the second byte from 0x8d to
3634 0x8b. */
3635 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3636
3637 bfd_put_32 (output_bfd,
3638 htab->elf.sgot->output_section->vma
3639 + htab->elf.sgot->output_offset + off
3640 - rel->r_offset
3641 - input_section->output_section->vma
3642 - input_section->output_offset
3643 - 4,
3644 contents + roff);
3645 continue;
3646 }
3647 else if (r_type == R_X86_64_TLSDESC_CALL)
3648 {
3649 /* GDesc -> IE transition.
3650 It's originally:
3651 call *(%rax)
3652
3653 Change it to:
3654 xchg %ax, %ax. */
3655
3656 bfd_put_8 (output_bfd, 0x66, contents + roff);
3657 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3658 continue;
3659 }
3660 else
3661 BFD_ASSERT (FALSE);
3662 }
3663 break;
3664
3665 case R_X86_64_TLSLD:
3666 if (! elf_x86_64_tls_transition (info, input_bfd,
3667 input_section, contents,
3668 symtab_hdr, sym_hashes,
3669 &r_type, GOT_UNKNOWN, rel,
3670 relend, h, r_symndx, TRUE))
3671 return FALSE;
3672
3673 if (r_type != R_X86_64_TLSLD)
3674 {
3675 /* LD->LE transition:
3676 leaq foo@tlsld(%rip), %rdi
3677 call __tls_get_addr@PLT
3678 For 64bit, we change it into:
3679 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3680 For 32bit, we change it into:
3681 nopl 0x0(%rax); movl %fs:0, %eax
3682 Or
3683 leaq foo@tlsld(%rip), %rdi;
3684 call *__tls_get_addr@GOTPCREL(%rip)
3685 which may be converted to
3686 addr32 call __tls_get_addr
3687 For 64bit, we change it into:
3688 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3689 For 32bit, we change it into:
3690 nopw 0x0(%rax); movl %fs:0, %eax
3691 For largepic, change:
3692 leaq foo@tlsgd(%rip), %rdi
3693 movabsq $__tls_get_addr@pltoff, %rax
3694 addq %rbx, %rax
3695 call *%rax
3696 into
3697 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3698 movq %fs:0, %eax */
3699
3700 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3701 if (ABI_64_P (output_bfd))
3702 {
3703 if (contents[rel->r_offset + 5] == 0xb8)
3704 memcpy (contents + rel->r_offset - 3,
3705 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3706 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3707 else if (contents[rel->r_offset + 4] == 0xff
3708 || contents[rel->r_offset + 4] == 0x67)
3709 memcpy (contents + rel->r_offset - 3,
3710 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3711 13);
3712 else
3713 memcpy (contents + rel->r_offset - 3,
3714 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3715 }
3716 else
3717 {
3718 if (contents[rel->r_offset + 4] == 0xff)
3719 memcpy (contents + rel->r_offset - 3,
3720 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3721 13);
3722 else
3723 memcpy (contents + rel->r_offset - 3,
3724 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3725 }
3726 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3727 and R_X86_64_PLTOFF64. */
3728 rel++;
3729 wrel++;
3730 continue;
3731 }
3732
3733 if (htab->elf.sgot == NULL)
3734 abort ();
3735
3736 off = htab->tls_ld_or_ldm_got.offset;
3737 if (off & 1)
3738 off &= ~1;
3739 else
3740 {
3741 Elf_Internal_Rela outrel;
3742
3743 if (htab->elf.srelgot == NULL)
3744 abort ();
3745
3746 outrel.r_offset = (htab->elf.sgot->output_section->vma
3747 + htab->elf.sgot->output_offset + off);
3748
3749 bfd_put_64 (output_bfd, 0,
3750 htab->elf.sgot->contents + off);
3751 bfd_put_64 (output_bfd, 0,
3752 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3753 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3754 outrel.r_addend = 0;
3755 elf_append_rela (output_bfd, htab->elf.srelgot,
3756 &outrel);
3757 htab->tls_ld_or_ldm_got.offset |= 1;
3758 }
3759 relocation = htab->elf.sgot->output_section->vma
3760 + htab->elf.sgot->output_offset + off;
3761 unresolved_reloc = FALSE;
3762 break;
3763
3764 case R_X86_64_DTPOFF32:
3765 if (!bfd_link_executable (info)
3766 || (input_section->flags & SEC_CODE) == 0)
3767 relocation -= _bfd_x86_elf_dtpoff_base (info);
3768 else
3769 relocation = elf_x86_64_tpoff (info, relocation);
3770 break;
3771
3772 case R_X86_64_TPOFF32:
3773 case R_X86_64_TPOFF64:
3774 BFD_ASSERT (bfd_link_executable (info));
3775 relocation = elf_x86_64_tpoff (info, relocation);
3776 break;
3777
3778 case R_X86_64_DTPOFF64:
3779 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3780 relocation -= _bfd_x86_elf_dtpoff_base (info);
3781 break;
3782
3783 default:
3784 break;
3785 }
3786
3787 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3788 because such sections are not SEC_ALLOC and thus ld.so will
3789 not process them. */
3790 if (unresolved_reloc
3791 && !((input_section->flags & SEC_DEBUGGING) != 0
3792 && h->def_dynamic)
3793 && _bfd_elf_section_offset (output_bfd, info, input_section,
3794 rel->r_offset) != (bfd_vma) -1)
3795 {
3796 switch (r_type)
3797 {
3798 case R_X86_64_32S:
3799 sec = h->root.u.def.section;
3800 if ((info->nocopyreloc
3801 || (eh->def_protected
3802 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3803 && !(h->root.u.def.section->flags & SEC_CODE))
3804 return elf_x86_64_need_pic (info, input_bfd, input_section,
3805 h, NULL, NULL, howto);
3806 /* Fall through. */
3807
3808 default:
3809 _bfd_error_handler
3810 /* xgettext:c-format */
3811 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3812 input_bfd,
3813 input_section,
3814 rel->r_offset,
3815 howto->name,
3816 h->root.root.string);
3817 return FALSE;
3818 }
3819 }
3820
3821 do_relocation:
3822 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3823 contents, rel->r_offset,
3824 relocation, rel->r_addend);
3825
3826 check_relocation_error:
3827 if (r != bfd_reloc_ok)
3828 {
3829 const char *name;
3830
3831 if (h != NULL)
3832 name = h->root.root.string;
3833 else
3834 {
3835 name = bfd_elf_string_from_elf_section (input_bfd,
3836 symtab_hdr->sh_link,
3837 sym->st_name);
3838 if (name == NULL)
3839 return FALSE;
3840 if (*name == '\0')
3841 name = bfd_section_name (input_bfd, sec);
3842 }
3843
3844 if (r == bfd_reloc_overflow)
3845 {
3846 if (converted_reloc)
3847 {
3848 info->callbacks->einfo
3849 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3850 return FALSE;
3851 }
3852 (*info->callbacks->reloc_overflow)
3853 (info, (h ? &h->root : NULL), name, howto->name,
3854 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3855 }
3856 else
3857 {
3858 _bfd_error_handler
3859 /* xgettext:c-format */
3860 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3861 input_bfd, input_section,
3862 rel->r_offset, name, (int) r);
3863 return FALSE;
3864 }
3865 }
3866
3867 if (wrel != rel)
3868 *wrel = *rel;
3869 }
3870
3871 if (wrel != rel)
3872 {
3873 Elf_Internal_Shdr *rel_hdr;
3874 size_t deleted = rel - wrel;
3875
3876 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3877 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3878 if (rel_hdr->sh_size == 0)
3879 {
3880 /* It is too late to remove an empty reloc section. Leave
3881 one NONE reloc.
3882 ??? What is wrong with an empty section??? */
3883 rel_hdr->sh_size = rel_hdr->sh_entsize;
3884 deleted -= 1;
3885 }
3886 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3887 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3888 input_section->reloc_count -= deleted;
3889 }
3890
3891 return TRUE;
3892 }
3893
3894 /* Finish up dynamic symbol handling. We set the contents of various
3895 dynamic sections here. */
3896
3897 static bfd_boolean
3898 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3899 struct bfd_link_info *info,
3900 struct elf_link_hash_entry *h,
3901 Elf_Internal_Sym *sym)
3902 {
3903 struct elf_x86_link_hash_table *htab;
3904 bfd_boolean use_plt_second;
3905 struct elf_x86_link_hash_entry *eh;
3906 bfd_boolean local_undefweak;
3907
3908 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3909 if (htab == NULL)
3910 return FALSE;
3911
3912 /* Use the second PLT section only if there is .plt section. */
3913 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3914
3915 eh = (struct elf_x86_link_hash_entry *) h;
3916 if (eh->no_finish_dynamic_symbol)
3917 abort ();
3918
3919 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3920 resolved undefined weak symbols in executable so that their
3921 references have value 0 at run-time. */
3922 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3923
3924 if (h->plt.offset != (bfd_vma) -1)
3925 {
3926 bfd_vma plt_index;
3927 bfd_vma got_offset, plt_offset;
3928 Elf_Internal_Rela rela;
3929 bfd_byte *loc;
3930 asection *plt, *gotplt, *relplt, *resolved_plt;
3931 const struct elf_backend_data *bed;
3932 bfd_vma plt_got_pcrel_offset;
3933
3934 /* When building a static executable, use .iplt, .igot.plt and
3935 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3936 if (htab->elf.splt != NULL)
3937 {
3938 plt = htab->elf.splt;
3939 gotplt = htab->elf.sgotplt;
3940 relplt = htab->elf.srelplt;
3941 }
3942 else
3943 {
3944 plt = htab->elf.iplt;
3945 gotplt = htab->elf.igotplt;
3946 relplt = htab->elf.irelplt;
3947 }
3948
3949 /* This symbol has an entry in the procedure linkage table. Set
3950 it up. */
3951 if ((h->dynindx == -1
3952 && !local_undefweak
3953 && !((h->forced_local || bfd_link_executable (info))
3954 && h->def_regular
3955 && h->type == STT_GNU_IFUNC))
3956 || plt == NULL
3957 || gotplt == NULL
3958 || relplt == NULL)
3959 abort ();
3960
3961 /* Get the index in the procedure linkage table which
3962 corresponds to this symbol. This is the index of this symbol
3963 in all the symbols for which we are making plt entries. The
3964 first entry in the procedure linkage table is reserved.
3965
3966 Get the offset into the .got table of the entry that
3967 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3968 bytes. The first three are reserved for the dynamic linker.
3969
3970 For static executables, we don't reserve anything. */
3971
3972 if (plt == htab->elf.splt)
3973 {
3974 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3975 - htab->plt.has_plt0);
3976 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3977 }
3978 else
3979 {
3980 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3981 got_offset = got_offset * GOT_ENTRY_SIZE;
3982 }
3983
3984 /* Fill in the entry in the procedure linkage table. */
3985 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3986 htab->plt.plt_entry_size);
3987 if (use_plt_second)
3988 {
3989 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3990 htab->non_lazy_plt->plt_entry,
3991 htab->non_lazy_plt->plt_entry_size);
3992
3993 resolved_plt = htab->plt_second;
3994 plt_offset = eh->plt_second.offset;
3995 }
3996 else
3997 {
3998 resolved_plt = plt;
3999 plt_offset = h->plt.offset;
4000 }
4001
4002 /* Insert the relocation positions of the plt section. */
4003
4004 /* Put offset the PC-relative instruction referring to the GOT entry,
4005 subtracting the size of that instruction. */
4006 plt_got_pcrel_offset = (gotplt->output_section->vma
4007 + gotplt->output_offset
4008 + got_offset
4009 - resolved_plt->output_section->vma
4010 - resolved_plt->output_offset
4011 - plt_offset
4012 - htab->plt.plt_got_insn_size);
4013
4014 /* Check PC-relative offset overflow in PLT entry. */
4015 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4016 /* xgettext:c-format */
4017 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4018 output_bfd, h->root.root.string);
4019
4020 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4021 (resolved_plt->contents + plt_offset
4022 + htab->plt.plt_got_offset));
4023
4024 /* Fill in the entry in the global offset table, initially this
4025 points to the second part of the PLT entry. Leave the entry
4026 as zero for undefined weak symbol in PIE. No PLT relocation
4027 against undefined weak symbol in PIE. */
4028 if (!local_undefweak)
4029 {
4030 if (htab->plt.has_plt0)
4031 bfd_put_64 (output_bfd, (plt->output_section->vma
4032 + plt->output_offset
4033 + h->plt.offset
4034 + htab->lazy_plt->plt_lazy_offset),
4035 gotplt->contents + got_offset);
4036
4037 /* Fill in the entry in the .rela.plt section. */
4038 rela.r_offset = (gotplt->output_section->vma
4039 + gotplt->output_offset
4040 + got_offset);
4041 if (PLT_LOCAL_IFUNC_P (info, h))
4042 {
4043 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4044 h->root.root.string,
4045 h->root.u.def.section->owner);
4046
4047 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4048 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4049 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4050 rela.r_addend = (h->root.u.def.value
4051 + h->root.u.def.section->output_section->vma
4052 + h->root.u.def.section->output_offset);
4053 /* R_X86_64_IRELATIVE comes last. */
4054 plt_index = htab->next_irelative_index--;
4055 }
4056 else
4057 {
4058 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4059 rela.r_addend = 0;
4060 plt_index = htab->next_jump_slot_index++;
4061 }
4062
4063 /* Don't fill the second and third slots in PLT entry for
4064 static executables nor without PLT0. */
4065 if (plt == htab->elf.splt && htab->plt.has_plt0)
4066 {
4067 bfd_vma plt0_offset
4068 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4069
4070 /* Put relocation index. */
4071 bfd_put_32 (output_bfd, plt_index,
4072 (plt->contents + h->plt.offset
4073 + htab->lazy_plt->plt_reloc_offset));
4074
4075 /* Put offset for jmp .PLT0 and check for overflow. We don't
4076 check relocation index for overflow since branch displacement
4077 will overflow first. */
4078 if (plt0_offset > 0x80000000)
4079 /* xgettext:c-format */
4080 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4081 output_bfd, h->root.root.string);
4082 bfd_put_32 (output_bfd, - plt0_offset,
4083 (plt->contents + h->plt.offset
4084 + htab->lazy_plt->plt_plt_offset));
4085 }
4086
4087 bed = get_elf_backend_data (output_bfd);
4088 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4089 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4090 }
4091 }
4092 else if (eh->plt_got.offset != (bfd_vma) -1)
4093 {
4094 bfd_vma got_offset, plt_offset;
4095 asection *plt, *got;
4096 bfd_boolean got_after_plt;
4097 int32_t got_pcrel_offset;
4098
4099 /* Set the entry in the GOT procedure linkage table. */
4100 plt = htab->plt_got;
4101 got = htab->elf.sgot;
4102 got_offset = h->got.offset;
4103
4104 if (got_offset == (bfd_vma) -1
4105 || (h->type == STT_GNU_IFUNC && h->def_regular)
4106 || plt == NULL
4107 || got == NULL)
4108 abort ();
4109
4110 /* Use the non-lazy PLT entry template for the GOT PLT since they
4111 are the identical. */
4112 /* Fill in the entry in the GOT procedure linkage table. */
4113 plt_offset = eh->plt_got.offset;
4114 memcpy (plt->contents + plt_offset,
4115 htab->non_lazy_plt->plt_entry,
4116 htab->non_lazy_plt->plt_entry_size);
4117
4118 /* Put offset the PC-relative instruction referring to the GOT
4119 entry, subtracting the size of that instruction. */
4120 got_pcrel_offset = (got->output_section->vma
4121 + got->output_offset
4122 + got_offset
4123 - plt->output_section->vma
4124 - plt->output_offset
4125 - plt_offset
4126 - htab->non_lazy_plt->plt_got_insn_size);
4127
4128 /* Check PC-relative offset overflow in GOT PLT entry. */
4129 got_after_plt = got->output_section->vma > plt->output_section->vma;
4130 if ((got_after_plt && got_pcrel_offset < 0)
4131 || (!got_after_plt && got_pcrel_offset > 0))
4132 /* xgettext:c-format */
4133 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4134 output_bfd, h->root.root.string);
4135
4136 bfd_put_32 (output_bfd, got_pcrel_offset,
4137 (plt->contents + plt_offset
4138 + htab->non_lazy_plt->plt_got_offset));
4139 }
4140
4141 if (!local_undefweak
4142 && !h->def_regular
4143 && (h->plt.offset != (bfd_vma) -1
4144 || eh->plt_got.offset != (bfd_vma) -1))
4145 {
4146 /* Mark the symbol as undefined, rather than as defined in
4147 the .plt section. Leave the value if there were any
4148 relocations where pointer equality matters (this is a clue
4149 for the dynamic linker, to make function pointer
4150 comparisons work between an application and shared
4151 library), otherwise set it to zero. If a function is only
4152 called from a binary, there is no need to slow down
4153 shared libraries because of that. */
4154 sym->st_shndx = SHN_UNDEF;
4155 if (!h->pointer_equality_needed)
4156 sym->st_value = 0;
4157 }
4158
4159 /* Don't generate dynamic GOT relocation against undefined weak
4160 symbol in executable. */
4161 if (h->got.offset != (bfd_vma) -1
4162 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4163 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4164 && !local_undefweak)
4165 {
4166 Elf_Internal_Rela rela;
4167 asection *relgot = htab->elf.srelgot;
4168
4169 /* This symbol has an entry in the global offset table. Set it
4170 up. */
4171 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4172 abort ();
4173
4174 rela.r_offset = (htab->elf.sgot->output_section->vma
4175 + htab->elf.sgot->output_offset
4176 + (h->got.offset &~ (bfd_vma) 1));
4177
4178 /* If this is a static link, or it is a -Bsymbolic link and the
4179 symbol is defined locally or was forced to be local because
4180 of a version file, we just want to emit a RELATIVE reloc.
4181 The entry in the global offset table will already have been
4182 initialized in the relocate_section function. */
4183 if (h->def_regular
4184 && h->type == STT_GNU_IFUNC)
4185 {
4186 if (h->plt.offset == (bfd_vma) -1)
4187 {
4188 /* STT_GNU_IFUNC is referenced without PLT. */
4189 if (htab->elf.splt == NULL)
4190 {
4191 /* use .rel[a].iplt section to store .got relocations
4192 in static executable. */
4193 relgot = htab->elf.irelplt;
4194 }
4195 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4196 {
4197 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4198 h->root.root.string,
4199 h->root.u.def.section->owner);
4200
4201 rela.r_info = htab->r_info (0,
4202 R_X86_64_IRELATIVE);
4203 rela.r_addend = (h->root.u.def.value
4204 + h->root.u.def.section->output_section->vma
4205 + h->root.u.def.section->output_offset);
4206 }
4207 else
4208 goto do_glob_dat;
4209 }
4210 else if (bfd_link_pic (info))
4211 {
4212 /* Generate R_X86_64_GLOB_DAT. */
4213 goto do_glob_dat;
4214 }
4215 else
4216 {
4217 asection *plt;
4218 bfd_vma plt_offset;
4219
4220 if (!h->pointer_equality_needed)
4221 abort ();
4222
4223 /* For non-shared object, we can't use .got.plt, which
4224 contains the real function addres if we need pointer
4225 equality. We load the GOT entry with the PLT entry. */
4226 if (htab->plt_second != NULL)
4227 {
4228 plt = htab->plt_second;
4229 plt_offset = eh->plt_second.offset;
4230 }
4231 else
4232 {
4233 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4234 plt_offset = h->plt.offset;
4235 }
4236 bfd_put_64 (output_bfd, (plt->output_section->vma
4237 + plt->output_offset
4238 + plt_offset),
4239 htab->elf.sgot->contents + h->got.offset);
4240 return TRUE;
4241 }
4242 }
4243 else if (bfd_link_pic (info)
4244 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4245 {
4246 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4247 return FALSE;
4248 BFD_ASSERT((h->got.offset & 1) != 0);
4249 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4250 rela.r_addend = (h->root.u.def.value
4251 + h->root.u.def.section->output_section->vma
4252 + h->root.u.def.section->output_offset);
4253 }
4254 else
4255 {
4256 BFD_ASSERT((h->got.offset & 1) == 0);
4257 do_glob_dat:
4258 bfd_put_64 (output_bfd, (bfd_vma) 0,
4259 htab->elf.sgot->contents + h->got.offset);
4260 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4261 rela.r_addend = 0;
4262 }
4263
4264 elf_append_rela (output_bfd, relgot, &rela);
4265 }
4266
4267 if (h->needs_copy)
4268 {
4269 Elf_Internal_Rela rela;
4270 asection *s;
4271
4272 /* This symbol needs a copy reloc. Set it up. */
4273
4274 if (h->dynindx == -1
4275 || (h->root.type != bfd_link_hash_defined
4276 && h->root.type != bfd_link_hash_defweak)
4277 || htab->elf.srelbss == NULL
4278 || htab->elf.sreldynrelro == NULL)
4279 abort ();
4280
4281 rela.r_offset = (h->root.u.def.value
4282 + h->root.u.def.section->output_section->vma
4283 + h->root.u.def.section->output_offset);
4284 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4285 rela.r_addend = 0;
4286 if (h->root.u.def.section == htab->elf.sdynrelro)
4287 s = htab->elf.sreldynrelro;
4288 else
4289 s = htab->elf.srelbss;
4290 elf_append_rela (output_bfd, s, &rela);
4291 }
4292
4293 return TRUE;
4294 }
4295
4296 /* Finish up local dynamic symbol handling. We set the contents of
4297 various dynamic sections here. */
4298
4299 static bfd_boolean
4300 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4301 {
4302 struct elf_link_hash_entry *h
4303 = (struct elf_link_hash_entry *) *slot;
4304 struct bfd_link_info *info
4305 = (struct bfd_link_info *) inf;
4306
4307 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4308 info, h, NULL);
4309 }
4310
4311 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4312 here since undefined weak symbol may not be dynamic and may not be
4313 called for elf_x86_64_finish_dynamic_symbol. */
4314
4315 static bfd_boolean
4316 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4317 void *inf)
4318 {
4319 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4320 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4321
4322 if (h->root.type != bfd_link_hash_undefweak
4323 || h->dynindx != -1)
4324 return TRUE;
4325
4326 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4327 info, h, NULL);
4328 }
4329
4330 /* Used to decide how to sort relocs in an optimal manner for the
4331 dynamic linker, before writing them out. */
4332
4333 static enum elf_reloc_type_class
4334 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4335 const asection *rel_sec ATTRIBUTE_UNUSED,
4336 const Elf_Internal_Rela *rela)
4337 {
4338 bfd *abfd = info->output_bfd;
4339 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4340 struct elf_x86_link_hash_table *htab
4341 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4342
4343 if (htab->elf.dynsym != NULL
4344 && htab->elf.dynsym->contents != NULL)
4345 {
4346 /* Check relocation against STT_GNU_IFUNC symbol if there are
4347 dynamic symbols. */
4348 unsigned long r_symndx = htab->r_sym (rela->r_info);
4349 if (r_symndx != STN_UNDEF)
4350 {
4351 Elf_Internal_Sym sym;
4352 if (!bed->s->swap_symbol_in (abfd,
4353 (htab->elf.dynsym->contents
4354 + r_symndx * bed->s->sizeof_sym),
4355 0, &sym))
4356 abort ();
4357
4358 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4359 return reloc_class_ifunc;
4360 }
4361 }
4362
4363 switch ((int) ELF32_R_TYPE (rela->r_info))
4364 {
4365 case R_X86_64_IRELATIVE:
4366 return reloc_class_ifunc;
4367 case R_X86_64_RELATIVE:
4368 case R_X86_64_RELATIVE64:
4369 return reloc_class_relative;
4370 case R_X86_64_JUMP_SLOT:
4371 return reloc_class_plt;
4372 case R_X86_64_COPY:
4373 return reloc_class_copy;
4374 default:
4375 return reloc_class_normal;
4376 }
4377 }
4378
4379 /* Finish up the dynamic sections. */
4380
4381 static bfd_boolean
4382 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4383 struct bfd_link_info *info)
4384 {
4385 struct elf_x86_link_hash_table *htab;
4386 bfd *dynobj;
4387 asection *sdyn;
4388
4389 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4390 if (htab == NULL)
4391 return FALSE;
4392
4393 dynobj = htab->elf.dynobj;
4394 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4395
4396 if (htab->elf.dynamic_sections_created)
4397 {
4398 bfd_byte *dyncon, *dynconend;
4399 const struct elf_backend_data *bed;
4400 bfd_size_type sizeof_dyn;
4401
4402 if (sdyn == NULL || htab->elf.sgot == NULL)
4403 abort ();
4404
4405 bed = get_elf_backend_data (dynobj);
4406 sizeof_dyn = bed->s->sizeof_dyn;
4407 dyncon = sdyn->contents;
4408 dynconend = sdyn->contents + sdyn->size;
4409 for (; dyncon < dynconend; dyncon += sizeof_dyn)
4410 {
4411 Elf_Internal_Dyn dyn;
4412 asection *s;
4413
4414 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
4415
4416 switch (dyn.d_tag)
4417 {
4418 default:
4419 continue;
4420
4421 case DT_PLTGOT:
4422 s = htab->elf.sgotplt;
4423 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4424 break;
4425
4426 case DT_JMPREL:
4427 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
4428 break;
4429
4430 case DT_PLTRELSZ:
4431 s = htab->elf.srelplt->output_section;
4432 dyn.d_un.d_val = s->size;
4433 break;
4434
4435 case DT_TLSDESC_PLT:
4436 s = htab->elf.splt;
4437 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4438 + htab->tlsdesc_plt;
4439 break;
4440
4441 case DT_TLSDESC_GOT:
4442 s = htab->elf.sgot;
4443 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4444 + htab->tlsdesc_got;
4445 break;
4446 }
4447
4448 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
4449 }
4450
4451 if (htab->elf.splt && htab->elf.splt->size > 0)
4452 {
4453 elf_section_data (htab->elf.splt->output_section)
4454 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4455
4456 if (htab->plt.has_plt0)
4457 {
4458 /* Fill in the special first entry in the procedure linkage
4459 table. */
4460 memcpy (htab->elf.splt->contents,
4461 htab->lazy_plt->plt0_entry,
4462 htab->lazy_plt->plt0_entry_size);
4463 /* Add offset for pushq GOT+8(%rip), since the instruction
4464 uses 6 bytes subtract this value. */
4465 bfd_put_32 (output_bfd,
4466 (htab->elf.sgotplt->output_section->vma
4467 + htab->elf.sgotplt->output_offset
4468 + 8
4469 - htab->elf.splt->output_section->vma
4470 - htab->elf.splt->output_offset
4471 - 6),
4472 (htab->elf.splt->contents
4473 + htab->lazy_plt->plt0_got1_offset));
4474 /* Add offset for the PC-relative instruction accessing
4475 GOT+16, subtracting the offset to the end of that
4476 instruction. */
4477 bfd_put_32 (output_bfd,
4478 (htab->elf.sgotplt->output_section->vma
4479 + htab->elf.sgotplt->output_offset
4480 + 16
4481 - htab->elf.splt->output_section->vma
4482 - htab->elf.splt->output_offset
4483 - htab->lazy_plt->plt0_got2_insn_end),
4484 (htab->elf.splt->contents
4485 + htab->lazy_plt->plt0_got2_offset));
4486
4487 if (htab->tlsdesc_plt)
4488 {
4489 bfd_put_64 (output_bfd, (bfd_vma) 0,
4490 htab->elf.sgot->contents + htab->tlsdesc_got);
4491
4492 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4493 htab->lazy_plt->plt0_entry,
4494 htab->lazy_plt->plt0_entry_size);
4495
4496 /* Add offset for pushq GOT+8(%rip), since the
4497 instruction uses 6 bytes subtract this value. */
4498 bfd_put_32 (output_bfd,
4499 (htab->elf.sgotplt->output_section->vma
4500 + htab->elf.sgotplt->output_offset
4501 + 8
4502 - htab->elf.splt->output_section->vma
4503 - htab->elf.splt->output_offset
4504 - htab->tlsdesc_plt
4505 - 6),
4506 (htab->elf.splt->contents
4507 + htab->tlsdesc_plt
4508 + htab->lazy_plt->plt0_got1_offset));
4509 /* Add offset for the PC-relative instruction accessing
4510 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4511 subtracting the offset to the end of that
4512 instruction. */
4513 bfd_put_32 (output_bfd,
4514 (htab->elf.sgot->output_section->vma
4515 + htab->elf.sgot->output_offset
4516 + htab->tlsdesc_got
4517 - htab->elf.splt->output_section->vma
4518 - htab->elf.splt->output_offset
4519 - htab->tlsdesc_plt
4520 - htab->lazy_plt->plt0_got2_insn_end),
4521 (htab->elf.splt->contents
4522 + htab->tlsdesc_plt
4523 + htab->lazy_plt->plt0_got2_offset));
4524 }
4525 }
4526 }
4527
4528 if (htab->plt_got != NULL && htab->plt_got->size > 0)
4529 elf_section_data (htab->plt_got->output_section)
4530 ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
4531
4532 if (htab->plt_second != NULL && htab->plt_second->size > 0)
4533 elf_section_data (htab->plt_second->output_section)
4534 ->this_hdr.sh_entsize = htab->non_lazy_plt->plt_entry_size;
4535 }
4536
4537 /* GOT is always created in setup_gnu_properties. But it may not be
4538 needed. */
4539 if (htab->elf.sgotplt && htab->elf.sgotplt->size > 0)
4540 {
4541 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
4542 {
4543 _bfd_error_handler
4544 (_("discarded output section: `%A'"), htab->elf.sgotplt);
4545 return FALSE;
4546 }
4547
4548 /* Set the first entry in the global offset table to the address of
4549 the dynamic section. */
4550 if (sdyn == NULL)
4551 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
4552 else
4553 bfd_put_64 (output_bfd,
4554 sdyn->output_section->vma + sdyn->output_offset,
4555 htab->elf.sgotplt->contents);
4556 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
4557 bfd_put_64 (output_bfd, (bfd_vma) 0,
4558 htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
4559 bfd_put_64 (output_bfd, (bfd_vma) 0,
4560 htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
4561
4562 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize
4563 = GOT_ENTRY_SIZE;
4564 }
4565
4566 /* Adjust .eh_frame for .plt section. */
4567 if (htab->plt_eh_frame != NULL
4568 && htab->plt_eh_frame->contents != NULL)
4569 {
4570 if (htab->elf.splt != NULL
4571 && htab->elf.splt->size != 0
4572 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
4573 && htab->elf.splt->output_section != NULL
4574 && htab->plt_eh_frame->output_section != NULL)
4575 {
4576 bfd_vma plt_start = htab->elf.splt->output_section->vma;
4577 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
4578 + htab->plt_eh_frame->output_offset
4579 + PLT_FDE_START_OFFSET;
4580 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4581 htab->plt_eh_frame->contents
4582 + PLT_FDE_START_OFFSET);
4583 }
4584 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
4585 {
4586 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4587 htab->plt_eh_frame,
4588 htab->plt_eh_frame->contents))
4589 return FALSE;
4590 }
4591 }
4592
4593 /* Adjust .eh_frame for .plt.got section. */
4594 if (htab->plt_got_eh_frame != NULL
4595 && htab->plt_got_eh_frame->contents != NULL)
4596 {
4597 if (htab->plt_got != NULL
4598 && htab->plt_got->size != 0
4599 && (htab->plt_got->flags & SEC_EXCLUDE) == 0
4600 && htab->plt_got->output_section != NULL
4601 && htab->plt_got_eh_frame->output_section != NULL)
4602 {
4603 bfd_vma plt_start = htab->plt_got->output_section->vma;
4604 bfd_vma eh_frame_start = htab->plt_got_eh_frame->output_section->vma
4605 + htab->plt_got_eh_frame->output_offset
4606 + PLT_FDE_START_OFFSET;
4607 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4608 htab->plt_got_eh_frame->contents
4609 + PLT_FDE_START_OFFSET);
4610 }
4611 if (htab->plt_got_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
4612 {
4613 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4614 htab->plt_got_eh_frame,
4615 htab->plt_got_eh_frame->contents))
4616 return FALSE;
4617 }
4618 }
4619
4620 /* Adjust .eh_frame for the second PLT section. */
4621 if (htab->plt_second_eh_frame != NULL
4622 && htab->plt_second_eh_frame->contents != NULL)
4623 {
4624 if (htab->plt_second != NULL
4625 && htab->plt_second->size != 0
4626 && (htab->plt_second->flags & SEC_EXCLUDE) == 0
4627 && htab->plt_second->output_section != NULL
4628 && htab->plt_second_eh_frame->output_section != NULL)
4629 {
4630 bfd_vma plt_start = htab->plt_second->output_section->vma;
4631 bfd_vma eh_frame_start
4632 = (htab->plt_second_eh_frame->output_section->vma
4633 + htab->plt_second_eh_frame->output_offset
4634 + PLT_FDE_START_OFFSET);
4635 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4636 htab->plt_second_eh_frame->contents
4637 + PLT_FDE_START_OFFSET);
4638 }
4639 if (htab->plt_second_eh_frame->sec_info_type
4640 == SEC_INFO_TYPE_EH_FRAME)
4641 {
4642 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4643 htab->plt_second_eh_frame,
4644 htab->plt_second_eh_frame->contents))
4645 return FALSE;
4646 }
4647 }
4648
4649 if (htab->elf.sgot && htab->elf.sgot->size > 0)
4650 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
4651 = GOT_ENTRY_SIZE;
4652
4653 /* Fill PLT entries for undefined weak symbols in PIE. */
4654 if (bfd_link_pie (info))
4655 bfd_hash_traverse (&info->hash->table,
4656 elf_x86_64_pie_finish_undefweak_symbol,
4657 info);
4658
4659 return TRUE;
4660 }
4661
4662 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4663 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4664 It has to be done before elf_link_sort_relocs is called so that
4665 dynamic relocations are properly sorted. */
4666
4667 static bfd_boolean
4668 elf_x86_64_output_arch_local_syms
4669 (bfd *output_bfd ATTRIBUTE_UNUSED,
4670 struct bfd_link_info *info,
4671 void *flaginfo ATTRIBUTE_UNUSED,
4672 int (*func) (void *, const char *,
4673 Elf_Internal_Sym *,
4674 asection *,
4675 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4676 {
4677 struct elf_x86_link_hash_table *htab
4678 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4679 if (htab == NULL)
4680 return FALSE;
4681
4682 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4683 htab_traverse (htab->loc_hash_table,
4684 elf_x86_64_finish_local_dynamic_symbol,
4685 info);
4686
4687 return TRUE;
4688 }
4689
4690 /* Forward declaration. */
4691 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4692
4693 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4694 dynamic relocations. */
4695
4696 static long
4697 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4698 long symcount ATTRIBUTE_UNUSED,
4699 asymbol **syms ATTRIBUTE_UNUSED,
4700 long dynsymcount,
4701 asymbol **dynsyms,
4702 asymbol **ret)
4703 {
4704 long count, i, n;
4705 int j;
4706 bfd_byte *plt_contents;
4707 long relsize;
4708 const struct elf_x86_lazy_plt_layout *lazy_plt;
4709 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4710 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4711 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4712 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4713 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4714 asection *plt;
4715 enum elf_x86_plt_type plt_type;
4716 struct elf_x86_plt plts[] =
4717 {
4718 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4719 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4720 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4721 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4722 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4723 };
4724
4725 *ret = NULL;
4726
4727 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4728 return 0;
4729
4730 if (dynsymcount <= 0)
4731 return 0;
4732
4733 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4734 if (relsize <= 0)
4735 return -1;
4736
4737 if (get_elf_x86_64_backend_data (abfd)->os == is_normal)
4738 {
4739 lazy_plt = &elf_x86_64_lazy_plt;
4740 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4741 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4742 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4743 if (ABI_64_P (abfd))
4744 {
4745 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4746 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4747 }
4748 else
4749 {
4750 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4751 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4752 }
4753 }
4754 else
4755 {
4756 lazy_plt = &elf_x86_64_nacl_plt;
4757 non_lazy_plt = NULL;
4758 lazy_bnd_plt = NULL;
4759 non_lazy_bnd_plt = NULL;
4760 lazy_ibt_plt = NULL;
4761 non_lazy_ibt_plt = NULL;
4762 }
4763
4764 count = 0;
4765 for (j = 0; plts[j].name != NULL; j++)
4766 {
4767 plt = bfd_get_section_by_name (abfd, plts[j].name);
4768 if (plt == NULL || plt->size == 0)
4769 continue;
4770
4771 /* Get the PLT section contents. */
4772 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4773 if (plt_contents == NULL)
4774 break;
4775 if (!bfd_get_section_contents (abfd, (asection *) plt,
4776 plt_contents, 0, plt->size))
4777 {
4778 free (plt_contents);
4779 break;
4780 }
4781
4782 /* Check what kind of PLT it is. */
4783 plt_type = plt_unknown;
4784 if (plts[j].type == plt_unknown
4785 && (plt->size >= (lazy_plt->plt_entry_size
4786 + lazy_plt->plt_entry_size)))
4787 {
4788 /* Match lazy PLT first. Need to check the first two
4789 instructions. */
4790 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4791 lazy_plt->plt0_got1_offset) == 0)
4792 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4793 2) == 0))
4794 plt_type = plt_lazy;
4795 else if (lazy_bnd_plt != NULL
4796 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4797 lazy_bnd_plt->plt0_got1_offset) == 0)
4798 && (memcmp (plt_contents + 6,
4799 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4800 {
4801 plt_type = plt_lazy | plt_second;
4802 /* The fist entry in the lazy IBT PLT is the same as the
4803 lazy BND PLT. */
4804 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4805 lazy_ibt_plt->plt_entry,
4806 lazy_ibt_plt->plt_got_offset) == 0))
4807 lazy_plt = lazy_ibt_plt;
4808 else
4809 lazy_plt = lazy_bnd_plt;
4810 }
4811 }
4812
4813 if (non_lazy_plt != NULL
4814 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4815 && plt->size >= non_lazy_plt->plt_entry_size)
4816 {
4817 /* Match non-lazy PLT. */
4818 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4819 non_lazy_plt->plt_got_offset) == 0)
4820 plt_type = plt_non_lazy;
4821 }
4822
4823 if (plt_type == plt_unknown || plt_type == plt_second)
4824 {
4825 if (non_lazy_bnd_plt != NULL
4826 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4827 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4828 non_lazy_bnd_plt->plt_got_offset) == 0))
4829 {
4830 /* Match BND PLT. */
4831 plt_type = plt_second;
4832 non_lazy_plt = non_lazy_bnd_plt;
4833 }
4834 else if (non_lazy_ibt_plt != NULL
4835 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4836 && (memcmp (plt_contents,
4837 non_lazy_ibt_plt->plt_entry,
4838 non_lazy_ibt_plt->plt_got_offset) == 0))
4839 {
4840 /* Match IBT PLT. */
4841 plt_type = plt_second;
4842 non_lazy_plt = non_lazy_ibt_plt;
4843 }
4844 }
4845
4846 if (plt_type == plt_unknown)
4847 {
4848 free (plt_contents);
4849 continue;
4850 }
4851
4852 plts[j].sec = plt;
4853 plts[j].type = plt_type;
4854
4855 if ((plt_type & plt_lazy))
4856 {
4857 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4858 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4859 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4860 /* Skip PLT0 in lazy PLT. */
4861 i = 1;
4862 }
4863 else
4864 {
4865 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4866 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4867 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4868 i = 0;
4869 }
4870
4871 /* Skip lazy PLT when the second PLT is used. */
4872 if (plt_type == (plt_lazy | plt_second))
4873 plts[j].count = 0;
4874 else
4875 {
4876 n = plt->size / plts[j].plt_entry_size;
4877 plts[j].count = n;
4878 count += n - i;
4879 }
4880
4881 plts[j].contents = plt_contents;
4882 }
4883
4884 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4885 (bfd_vma) 0, plts, dynsyms,
4886 ret);
4887 }
4888
4889 /* Handle an x86-64 specific section when reading an object file. This
4890 is called when elfcode.h finds a section with an unknown type. */
4891
4892 static bfd_boolean
4893 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4894 const char *name, int shindex)
4895 {
4896 if (hdr->sh_type != SHT_X86_64_UNWIND)
4897 return FALSE;
4898
4899 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4900 return FALSE;
4901
4902 return TRUE;
4903 }
4904
4905 /* Hook called by the linker routine which adds symbols from an object
4906 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4907 of .bss. */
4908
4909 static bfd_boolean
4910 elf_x86_64_add_symbol_hook (bfd *abfd,
4911 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4912 Elf_Internal_Sym *sym,
4913 const char **namep ATTRIBUTE_UNUSED,
4914 flagword *flagsp ATTRIBUTE_UNUSED,
4915 asection **secp,
4916 bfd_vma *valp)
4917 {
4918 asection *lcomm;
4919
4920 switch (sym->st_shndx)
4921 {
4922 case SHN_X86_64_LCOMMON:
4923 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4924 if (lcomm == NULL)
4925 {
4926 lcomm = bfd_make_section_with_flags (abfd,
4927 "LARGE_COMMON",
4928 (SEC_ALLOC
4929 | SEC_IS_COMMON
4930 | SEC_LINKER_CREATED));
4931 if (lcomm == NULL)
4932 return FALSE;
4933 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4934 }
4935 *secp = lcomm;
4936 *valp = sym->st_size;
4937 return TRUE;
4938 }
4939
4940 return TRUE;
4941 }
4942
4943
4944 /* Given a BFD section, try to locate the corresponding ELF section
4945 index. */
4946
4947 static bfd_boolean
4948 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4949 asection *sec, int *index_return)
4950 {
4951 if (sec == &_bfd_elf_large_com_section)
4952 {
4953 *index_return = SHN_X86_64_LCOMMON;
4954 return TRUE;
4955 }
4956 return FALSE;
4957 }
4958
4959 /* Process a symbol. */
4960
4961 static void
4962 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4963 asymbol *asym)
4964 {
4965 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4966
4967 switch (elfsym->internal_elf_sym.st_shndx)
4968 {
4969 case SHN_X86_64_LCOMMON:
4970 asym->section = &_bfd_elf_large_com_section;
4971 asym->value = elfsym->internal_elf_sym.st_size;
4972 /* Common symbol doesn't set BSF_GLOBAL. */
4973 asym->flags &= ~BSF_GLOBAL;
4974 break;
4975 }
4976 }
4977
4978 static bfd_boolean
4979 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4980 {
4981 return (sym->st_shndx == SHN_COMMON
4982 || sym->st_shndx == SHN_X86_64_LCOMMON);
4983 }
4984
4985 static unsigned int
4986 elf_x86_64_common_section_index (asection *sec)
4987 {
4988 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4989 return SHN_COMMON;
4990 else
4991 return SHN_X86_64_LCOMMON;
4992 }
4993
4994 static asection *
4995 elf_x86_64_common_section (asection *sec)
4996 {
4997 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4998 return bfd_com_section_ptr;
4999 else
5000 return &_bfd_elf_large_com_section;
5001 }
5002
5003 static bfd_boolean
5004 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5005 const Elf_Internal_Sym *sym,
5006 asection **psec,
5007 bfd_boolean newdef,
5008 bfd_boolean olddef,
5009 bfd *oldbfd,
5010 const asection *oldsec)
5011 {
5012 /* A normal common symbol and a large common symbol result in a
5013 normal common symbol. We turn the large common symbol into a
5014 normal one. */
5015 if (!olddef
5016 && h->root.type == bfd_link_hash_common
5017 && !newdef
5018 && bfd_is_com_section (*psec)
5019 && oldsec != *psec)
5020 {
5021 if (sym->st_shndx == SHN_COMMON
5022 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5023 {
5024 h->root.u.c.p->section
5025 = bfd_make_section_old_way (oldbfd, "COMMON");
5026 h->root.u.c.p->section->flags = SEC_ALLOC;
5027 }
5028 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5029 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5030 *psec = bfd_com_section_ptr;
5031 }
5032
5033 return TRUE;
5034 }
5035
5036 static int
5037 elf_x86_64_additional_program_headers (bfd *abfd,
5038 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5039 {
5040 asection *s;
5041 int count = 0;
5042
5043 /* Check to see if we need a large readonly segment. */
5044 s = bfd_get_section_by_name (abfd, ".lrodata");
5045 if (s && (s->flags & SEC_LOAD))
5046 count++;
5047
5048 /* Check to see if we need a large data segment. Since .lbss sections
5049 is placed right after the .bss section, there should be no need for
5050 a large data segment just because of .lbss. */
5051 s = bfd_get_section_by_name (abfd, ".ldata");
5052 if (s && (s->flags & SEC_LOAD))
5053 count++;
5054
5055 return count;
5056 }
5057
5058 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5059
5060 static bfd_boolean
5061 elf_x86_64_relocs_compatible (const bfd_target *input,
5062 const bfd_target *output)
5063 {
5064 return ((xvec_get_elf_backend_data (input)->s->elfclass
5065 == xvec_get_elf_backend_data (output)->s->elfclass)
5066 && _bfd_elf_relocs_compatible (input, output));
5067 }
5068
5069 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5070 with GNU properties if found. Otherwise, return NULL. */
5071
5072 static bfd *
5073 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5074 {
5075 struct elf_x86_init_table init_table;
5076
5077 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5078 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5079 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5080 != (int) R_X86_64_GNU_VTINHERIT)
5081 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5082 != (int) R_X86_64_GNU_VTENTRY))
5083 abort ();
5084
5085 init_table.is_vxworks = FALSE;
5086 if (get_elf_x86_64_backend_data (info->output_bfd)->os == is_normal)
5087 {
5088 if (info->bndplt)
5089 {
5090 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5091 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5092 }
5093 else
5094 {
5095 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5096 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5097 }
5098
5099 if (ABI_64_P (info->output_bfd))
5100 {
5101 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5102 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5103 }
5104 else
5105 {
5106 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5107 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5108 }
5109 init_table.normal_target = TRUE;
5110 }
5111 else
5112 {
5113 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5114 init_table.non_lazy_plt = NULL;
5115 init_table.lazy_ibt_plt = NULL;
5116 init_table.non_lazy_ibt_plt = NULL;
5117 init_table.normal_target = FALSE;
5118 }
5119
5120 if (ABI_64_P (info->output_bfd))
5121 {
5122 init_table.r_info = elf64_r_info;
5123 init_table.r_sym = elf64_r_sym;
5124 }
5125 else
5126 {
5127 init_table.r_info = elf32_r_info;
5128 init_table.r_sym = elf32_r_sym;
5129 }
5130
5131 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5132 }
5133
5134 static const struct bfd_elf_special_section
5135 elf_x86_64_special_sections[]=
5136 {
5137 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5138 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5139 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5140 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5141 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5142 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5143 { NULL, 0, 0, 0, 0 }
5144 };
5145
5146 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5147 #define TARGET_LITTLE_NAME "elf64-x86-64"
5148 #define ELF_ARCH bfd_arch_i386
5149 #define ELF_TARGET_ID X86_64_ELF_DATA
5150 #define ELF_MACHINE_CODE EM_X86_64
5151 #define ELF_MAXPAGESIZE 0x200000
5152 #define ELF_MINPAGESIZE 0x1000
5153 #define ELF_COMMONPAGESIZE 0x1000
5154
5155 #define elf_backend_can_gc_sections 1
5156 #define elf_backend_can_refcount 1
5157 #define elf_backend_want_got_plt 1
5158 #define elf_backend_plt_readonly 1
5159 #define elf_backend_want_plt_sym 0
5160 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5161 #define elf_backend_rela_normal 1
5162 #define elf_backend_plt_alignment 4
5163 #define elf_backend_extern_protected_data 1
5164 #define elf_backend_caches_rawsize 1
5165 #define elf_backend_dtrel_excludes_plt 1
5166 #define elf_backend_want_dynrelro 1
5167
5168 #define elf_info_to_howto elf_x86_64_info_to_howto
5169
5170 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5171 #define bfd_elf64_bfd_reloc_name_lookup \
5172 elf_x86_64_reloc_name_lookup
5173
5174 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5175 #define elf_backend_check_relocs elf_x86_64_check_relocs
5176 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5177 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5178 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5179 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5180 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5181 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5182 #ifdef CORE_HEADER
5183 #define elf_backend_write_core_note elf_x86_64_write_core_note
5184 #endif
5185 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5186 #define elf_backend_relocate_section elf_x86_64_relocate_section
5187 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5188 #define elf_backend_object_p elf64_x86_64_elf_object_p
5189 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5190
5191 #define elf_backend_section_from_shdr \
5192 elf_x86_64_section_from_shdr
5193
5194 #define elf_backend_section_from_bfd_section \
5195 elf_x86_64_elf_section_from_bfd_section
5196 #define elf_backend_add_symbol_hook \
5197 elf_x86_64_add_symbol_hook
5198 #define elf_backend_symbol_processing \
5199 elf_x86_64_symbol_processing
5200 #define elf_backend_common_section_index \
5201 elf_x86_64_common_section_index
5202 #define elf_backend_common_section \
5203 elf_x86_64_common_section
5204 #define elf_backend_common_definition \
5205 elf_x86_64_common_definition
5206 #define elf_backend_merge_symbol \
5207 elf_x86_64_merge_symbol
5208 #define elf_backend_special_sections \
5209 elf_x86_64_special_sections
5210 #define elf_backend_additional_program_headers \
5211 elf_x86_64_additional_program_headers
5212 #define elf_backend_setup_gnu_properties \
5213 elf_x86_64_link_setup_gnu_properties
5214
5215 #include "elf64-target.h"
5216
5217 /* CloudABI support. */
5218
5219 #undef TARGET_LITTLE_SYM
5220 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5221 #undef TARGET_LITTLE_NAME
5222 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5223
5224 #undef ELF_OSABI
5225 #define ELF_OSABI ELFOSABI_CLOUDABI
5226
5227 #undef elf64_bed
5228 #define elf64_bed elf64_x86_64_cloudabi_bed
5229
5230 #include "elf64-target.h"
5231
5232 /* FreeBSD support. */
5233
5234 #undef TARGET_LITTLE_SYM
5235 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5236 #undef TARGET_LITTLE_NAME
5237 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5238
5239 #undef ELF_OSABI
5240 #define ELF_OSABI ELFOSABI_FREEBSD
5241
5242 #undef elf64_bed
5243 #define elf64_bed elf64_x86_64_fbsd_bed
5244
5245 #include "elf64-target.h"
5246
5247 /* Solaris 2 support. */
5248
5249 #undef TARGET_LITTLE_SYM
5250 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5251 #undef TARGET_LITTLE_NAME
5252 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5253
5254 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5255 objects won't be recognized. */
5256 #undef ELF_OSABI
5257
5258 #undef elf64_bed
5259 #define elf64_bed elf64_x86_64_sol2_bed
5260
5261 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5262 boundary. */
5263 #undef elf_backend_static_tls_alignment
5264 #define elf_backend_static_tls_alignment 16
5265
5266 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5267
5268 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5269 File, p.63. */
5270 #undef elf_backend_want_plt_sym
5271 #define elf_backend_want_plt_sym 1
5272
5273 #undef elf_backend_strtab_flags
5274 #define elf_backend_strtab_flags SHF_STRINGS
5275
5276 static bfd_boolean
5277 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5278 bfd *obfd ATTRIBUTE_UNUSED,
5279 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5280 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5281 {
5282 /* PR 19938: FIXME: Need to add code for setting the sh_info
5283 and sh_link fields of Solaris specific section types. */
5284 return FALSE;
5285 }
5286
5287 #undef elf_backend_copy_special_section_fields
5288 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5289
5290 #include "elf64-target.h"
5291
5292 /* Native Client support. */
5293
5294 static bfd_boolean
5295 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5296 {
5297 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5298 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5299 return TRUE;
5300 }
5301
5302 #undef TARGET_LITTLE_SYM
5303 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5304 #undef TARGET_LITTLE_NAME
5305 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5306 #undef elf64_bed
5307 #define elf64_bed elf64_x86_64_nacl_bed
5308
5309 #undef ELF_MAXPAGESIZE
5310 #undef ELF_MINPAGESIZE
5311 #undef ELF_COMMONPAGESIZE
5312 #define ELF_MAXPAGESIZE 0x10000
5313 #define ELF_MINPAGESIZE 0x10000
5314 #define ELF_COMMONPAGESIZE 0x10000
5315
5316 /* Restore defaults. */
5317 #undef ELF_OSABI
5318 #undef elf_backend_static_tls_alignment
5319 #undef elf_backend_want_plt_sym
5320 #define elf_backend_want_plt_sym 0
5321 #undef elf_backend_strtab_flags
5322 #undef elf_backend_copy_special_section_fields
5323
5324 /* NaCl uses substantially different PLT entries for the same effects. */
5325
5326 #undef elf_backend_plt_alignment
5327 #define elf_backend_plt_alignment 5
5328 #define NACL_PLT_ENTRY_SIZE 64
5329 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5330
5331 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5332 {
5333 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5334 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5335 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5336 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5337 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5338
5339 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5340 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5341
5342 /* 32 bytes of nop to pad out to the standard size. */
5343 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5344 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5345 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5346 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5347 0x66, /* excess data16 prefix */
5348 0x90 /* nop */
5349 };
5350
5351 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5352 {
5353 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5354 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5355 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5356 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5357
5358 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5359 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5360 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5361
5362 /* Lazy GOT entries point here (32-byte aligned). */
5363 0x68, /* pushq immediate */
5364 0, 0, 0, 0, /* replaced with index into relocation table. */
5365 0xe9, /* jmp relative */
5366 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5367
5368 /* 22 bytes of nop to pad out to the standard size. */
5369 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5370 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5371 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5372 };
5373
5374 /* .eh_frame covering the .plt section. */
5375
5376 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5377 {
5378 #if (PLT_CIE_LENGTH != 20 \
5379 || PLT_FDE_LENGTH != 36 \
5380 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5381 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5382 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
5383 #endif
5384 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5385 0, 0, 0, 0, /* CIE ID */
5386 1, /* CIE version */
5387 'z', 'R', 0, /* Augmentation string */
5388 1, /* Code alignment factor */
5389 0x78, /* Data alignment factor */
5390 16, /* Return address column */
5391 1, /* Augmentation size */
5392 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5393 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5394 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5395 DW_CFA_nop, DW_CFA_nop,
5396
5397 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5398 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5399 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5400 0, 0, 0, 0, /* .plt size goes here */
5401 0, /* Augmentation size */
5402 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5403 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5404 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5405 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5406 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5407 13, /* Block length */
5408 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5409 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5410 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5411 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5412 DW_CFA_nop, DW_CFA_nop
5413 };
5414
5415 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5416 {
5417 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5418 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5419 elf_x86_64_nacl_plt_entry, /* plt_entry */
5420 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5421 2, /* plt0_got1_offset */
5422 9, /* plt0_got2_offset */
5423 13, /* plt0_got2_insn_end */
5424 3, /* plt_got_offset */
5425 33, /* plt_reloc_offset */
5426 38, /* plt_plt_offset */
5427 7, /* plt_got_insn_size */
5428 42, /* plt_plt_insn_end */
5429 32, /* plt_lazy_offset */
5430 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5431 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5432 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5433 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5434 };
5435
5436 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
5437 {
5438 is_nacl /* os */
5439 };
5440
5441 #undef elf_backend_arch_data
5442 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5443
5444 #undef elf_backend_object_p
5445 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5446 #undef elf_backend_modify_segment_map
5447 #define elf_backend_modify_segment_map nacl_modify_segment_map
5448 #undef elf_backend_modify_program_headers
5449 #define elf_backend_modify_program_headers nacl_modify_program_headers
5450 #undef elf_backend_final_write_processing
5451 #define elf_backend_final_write_processing nacl_final_write_processing
5452
5453 #include "elf64-target.h"
5454
5455 /* Native Client x32 support. */
5456
5457 static bfd_boolean
5458 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5459 {
5460 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5461 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5462 return TRUE;
5463 }
5464
5465 #undef TARGET_LITTLE_SYM
5466 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5467 #undef TARGET_LITTLE_NAME
5468 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5469 #undef elf32_bed
5470 #define elf32_bed elf32_x86_64_nacl_bed
5471
5472 #define bfd_elf32_bfd_reloc_type_lookup \
5473 elf_x86_64_reloc_type_lookup
5474 #define bfd_elf32_bfd_reloc_name_lookup \
5475 elf_x86_64_reloc_name_lookup
5476 #define bfd_elf32_get_synthetic_symtab \
5477 elf_x86_64_get_synthetic_symtab
5478
5479 #undef elf_backend_object_p
5480 #define elf_backend_object_p \
5481 elf32_x86_64_nacl_elf_object_p
5482
5483 #undef elf_backend_bfd_from_remote_memory
5484 #define elf_backend_bfd_from_remote_memory \
5485 _bfd_elf32_bfd_from_remote_memory
5486
5487 #undef elf_backend_size_info
5488 #define elf_backend_size_info \
5489 _bfd_elf32_size_info
5490
5491 #include "elf32-target.h"
5492
5493 /* Restore defaults. */
5494 #undef elf_backend_object_p
5495 #define elf_backend_object_p elf64_x86_64_elf_object_p
5496 #undef elf_backend_bfd_from_remote_memory
5497 #undef elf_backend_size_info
5498 #undef elf_backend_modify_segment_map
5499 #undef elf_backend_modify_program_headers
5500 #undef elf_backend_final_write_processing
5501
5502 /* Intel L1OM support. */
5503
5504 static bfd_boolean
5505 elf64_l1om_elf_object_p (bfd *abfd)
5506 {
5507 /* Set the right machine number for an L1OM elf64 file. */
5508 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5509 return TRUE;
5510 }
5511
5512 #undef TARGET_LITTLE_SYM
5513 #define TARGET_LITTLE_SYM l1om_elf64_vec
5514 #undef TARGET_LITTLE_NAME
5515 #define TARGET_LITTLE_NAME "elf64-l1om"
5516 #undef ELF_ARCH
5517 #define ELF_ARCH bfd_arch_l1om
5518
5519 #undef ELF_MACHINE_CODE
5520 #define ELF_MACHINE_CODE EM_L1OM
5521
5522 #undef ELF_OSABI
5523
5524 #undef elf64_bed
5525 #define elf64_bed elf64_l1om_bed
5526
5527 #undef elf_backend_object_p
5528 #define elf_backend_object_p elf64_l1om_elf_object_p
5529
5530 /* Restore defaults. */
5531 #undef ELF_MAXPAGESIZE
5532 #undef ELF_MINPAGESIZE
5533 #undef ELF_COMMONPAGESIZE
5534 #define ELF_MAXPAGESIZE 0x200000
5535 #define ELF_MINPAGESIZE 0x1000
5536 #define ELF_COMMONPAGESIZE 0x1000
5537 #undef elf_backend_plt_alignment
5538 #define elf_backend_plt_alignment 4
5539 #undef elf_backend_arch_data
5540 #define elf_backend_arch_data &elf_x86_64_arch_bed
5541
5542 #include "elf64-target.h"
5543
5544 /* FreeBSD L1OM support. */
5545
5546 #undef TARGET_LITTLE_SYM
5547 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5548 #undef TARGET_LITTLE_NAME
5549 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5550
5551 #undef ELF_OSABI
5552 #define ELF_OSABI ELFOSABI_FREEBSD
5553
5554 #undef elf64_bed
5555 #define elf64_bed elf64_l1om_fbsd_bed
5556
5557 #include "elf64-target.h"
5558
5559 /* Intel K1OM support. */
5560
5561 static bfd_boolean
5562 elf64_k1om_elf_object_p (bfd *abfd)
5563 {
5564 /* Set the right machine number for an K1OM elf64 file. */
5565 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5566 return TRUE;
5567 }
5568
5569 #undef TARGET_LITTLE_SYM
5570 #define TARGET_LITTLE_SYM k1om_elf64_vec
5571 #undef TARGET_LITTLE_NAME
5572 #define TARGET_LITTLE_NAME "elf64-k1om"
5573 #undef ELF_ARCH
5574 #define ELF_ARCH bfd_arch_k1om
5575
5576 #undef ELF_MACHINE_CODE
5577 #define ELF_MACHINE_CODE EM_K1OM
5578
5579 #undef ELF_OSABI
5580
5581 #undef elf64_bed
5582 #define elf64_bed elf64_k1om_bed
5583
5584 #undef elf_backend_object_p
5585 #define elf_backend_object_p elf64_k1om_elf_object_p
5586
5587 #undef elf_backend_static_tls_alignment
5588
5589 #undef elf_backend_want_plt_sym
5590 #define elf_backend_want_plt_sym 0
5591
5592 #include "elf64-target.h"
5593
5594 /* FreeBSD K1OM support. */
5595
5596 #undef TARGET_LITTLE_SYM
5597 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5598 #undef TARGET_LITTLE_NAME
5599 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5600
5601 #undef ELF_OSABI
5602 #define ELF_OSABI ELFOSABI_FREEBSD
5603
5604 #undef elf64_bed
5605 #define elf64_bed elf64_k1om_fbsd_bed
5606
5607 #include "elf64-target.h"
5608
5609 /* 32bit x86-64 support. */
5610
5611 #undef TARGET_LITTLE_SYM
5612 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5613 #undef TARGET_LITTLE_NAME
5614 #define TARGET_LITTLE_NAME "elf32-x86-64"
5615 #undef elf32_bed
5616
5617 #undef ELF_ARCH
5618 #define ELF_ARCH bfd_arch_i386
5619
5620 #undef ELF_MACHINE_CODE
5621 #define ELF_MACHINE_CODE EM_X86_64
5622
5623 #undef ELF_OSABI
5624
5625 #undef elf_backend_object_p
5626 #define elf_backend_object_p \
5627 elf32_x86_64_elf_object_p
5628
5629 #undef elf_backend_bfd_from_remote_memory
5630 #define elf_backend_bfd_from_remote_memory \
5631 _bfd_elf32_bfd_from_remote_memory
5632
5633 #undef elf_backend_size_info
5634 #define elf_backend_size_info \
5635 _bfd_elf32_size_info
5636
5637 #include "elf32-target.h"
This page took 0.144365 seconds and 4 git commands to generate.