fcc82363735c0125f34614ed8ca80f0e3533e52b
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
348 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
349 r_type &= ~R_X86_64_converted_reloc_bit;
350 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
351 if (cache_ptr->howto == NULL)
352 return FALSE;
353 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
354 return TRUE;
355 }
356 \f
357 /* Support for core dump NOTE sections. */
358 static bfd_boolean
359 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
360 {
361 int offset;
362 size_t size;
363
364 switch (note->descsz)
365 {
366 default:
367 return FALSE;
368
369 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
372
373 /* pr_pid */
374 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
375
376 /* pr_reg */
377 offset = 72;
378 size = 216;
379
380 break;
381
382 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
383 /* pr_cursig */
384 elf_tdata (abfd)->core->signal
385 = bfd_get_16 (abfd, note->descdata + 12);
386
387 /* pr_pid */
388 elf_tdata (abfd)->core->lwpid
389 = bfd_get_32 (abfd, note->descdata + 32);
390
391 /* pr_reg */
392 offset = 112;
393 size = 216;
394
395 break;
396 }
397
398 /* Make a ".reg/999" section. */
399 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
400 size, note->descpos + offset);
401 }
402
403 static bfd_boolean
404 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
405 {
406 switch (note->descsz)
407 {
408 default:
409 return FALSE;
410
411 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
412 elf_tdata (abfd)->core->pid
413 = bfd_get_32 (abfd, note->descdata + 12);
414 elf_tdata (abfd)->core->program
415 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
416 elf_tdata (abfd)->core->command
417 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
418 break;
419
420 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
421 elf_tdata (abfd)->core->pid
422 = bfd_get_32 (abfd, note->descdata + 24);
423 elf_tdata (abfd)->core->program
424 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
425 elf_tdata (abfd)->core->command
426 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
427 }
428
429 /* Note that for some reason, a spurious space is tacked
430 onto the end of the args in some (at least one anyway)
431 implementations, so strip it off if it exists. */
432
433 {
434 char *command = elf_tdata (abfd)->core->command;
435 int n = strlen (command);
436
437 if (0 < n && command[n - 1] == ' ')
438 command[n - 1] = '\0';
439 }
440
441 return TRUE;
442 }
443
444 #ifdef CORE_HEADER
445 static char *
446 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
447 int note_type, ...)
448 {
449 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
450 va_list ap;
451 const char *fname, *psargs;
452 long pid;
453 int cursig;
454 const void *gregs;
455
456 switch (note_type)
457 {
458 default:
459 return NULL;
460
461 case NT_PRPSINFO:
462 va_start (ap, note_type);
463 fname = va_arg (ap, const char *);
464 psargs = va_arg (ap, const char *);
465 va_end (ap);
466
467 if (bed->s->elfclass == ELFCLASS32)
468 {
469 prpsinfo32_t data;
470 memset (&data, 0, sizeof (data));
471 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
472 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
473 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
474 &data, sizeof (data));
475 }
476 else
477 {
478 prpsinfo64_t data;
479 memset (&data, 0, sizeof (data));
480 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
481 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
482 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
483 &data, sizeof (data));
484 }
485 /* NOTREACHED */
486
487 case NT_PRSTATUS:
488 va_start (ap, note_type);
489 pid = va_arg (ap, long);
490 cursig = va_arg (ap, int);
491 gregs = va_arg (ap, const void *);
492 va_end (ap);
493
494 if (bed->s->elfclass == ELFCLASS32)
495 {
496 if (bed->elf_machine_code == EM_X86_64)
497 {
498 prstatusx32_t prstat;
499 memset (&prstat, 0, sizeof (prstat));
500 prstat.pr_pid = pid;
501 prstat.pr_cursig = cursig;
502 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
503 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
504 &prstat, sizeof (prstat));
505 }
506 else
507 {
508 prstatus32_t prstat;
509 memset (&prstat, 0, sizeof (prstat));
510 prstat.pr_pid = pid;
511 prstat.pr_cursig = cursig;
512 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
513 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
514 &prstat, sizeof (prstat));
515 }
516 }
517 else
518 {
519 prstatus64_t prstat;
520 memset (&prstat, 0, sizeof (prstat));
521 prstat.pr_pid = pid;
522 prstat.pr_cursig = cursig;
523 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
524 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
525 &prstat, sizeof (prstat));
526 }
527 }
528 /* NOTREACHED */
529 }
530 #endif
531 \f
532 /* Functions for the x86-64 ELF linker. */
533
534 /* The size in bytes of an entry in the global offset table. */
535
536 #define GOT_ENTRY_SIZE 8
537
538 /* The size in bytes of an entry in the lazy procedure linkage table. */
539
540 #define LAZY_PLT_ENTRY_SIZE 16
541
542 /* The size in bytes of an entry in the non-lazy procedure linkage
543 table. */
544
545 #define NON_LAZY_PLT_ENTRY_SIZE 8
546
547 /* The first entry in a lazy procedure linkage table looks like this.
548 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
549 works. */
550
551 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
552 {
553 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
554 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
555 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
556 };
557
558 /* Subsequent entries in a lazy procedure linkage table look like this. */
559
560 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
561 {
562 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
563 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
564 0x68, /* pushq immediate */
565 0, 0, 0, 0, /* replaced with index into relocation table. */
566 0xe9, /* jmp relative */
567 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
568 };
569
570 /* The first entry in a lazy procedure linkage table with BND prefix
571 like this. */
572
573 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
574 {
575 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
576 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
577 0x0f, 0x1f, 0 /* nopl (%rax) */
578 };
579
580 /* Subsequent entries for branches with BND prefx in a lazy procedure
581 linkage table look like this. */
582
583 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
584 {
585 0x68, 0, 0, 0, 0, /* pushq immediate */
586 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
587 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
588 };
589
590 /* The first entry in the IBT-enabled lazy procedure linkage table is the
591 the same as the lazy PLT with BND prefix so that bound registers are
592 preserved when control is passed to dynamic linker. Subsequent
593 entries for a IBT-enabled lazy procedure linkage table look like
594 this. */
595
596 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
597 {
598 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 0x90 /* nop */
602 };
603
604 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
605 is the same as the normal lazy PLT. Subsequent entries for an
606 x32 IBT-enabled lazy procedure linkage table look like this. */
607
608 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
609 {
610 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
611 0x68, 0, 0, 0, 0, /* pushq immediate */
612 0xe9, 0, 0, 0, 0, /* jmpq relative */
613 0x66, 0x90 /* xchg %ax,%ax */
614 };
615
616 /* Entries in the non-lazey procedure linkage table look like this. */
617
618 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
619 {
620 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
621 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
622 0x66, 0x90 /* xchg %ax,%ax */
623 };
624
625 /* Entries for branches with BND prefix in the non-lazey procedure
626 linkage table look like this. */
627
628 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
629 {
630 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
631 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 0x90 /* nop */
633 };
634
635 /* Entries for branches with IBT-enabled in the non-lazey procedure
636 linkage table look like this. They have the same size as the lazy
637 PLT entry. */
638
639 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
640 {
641 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
642 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
645 };
646
647 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
648 linkage table look like this. They have the same size as the lazy
649 PLT entry. */
650
651 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
652 {
653 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
654 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
655 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
656 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
657 };
658
659 /* .eh_frame covering the lazy .plt section. */
660
661 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
662 {
663 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
664 0, 0, 0, 0, /* CIE ID */
665 1, /* CIE version */
666 'z', 'R', 0, /* Augmentation string */
667 1, /* Code alignment factor */
668 0x78, /* Data alignment factor */
669 16, /* Return address column */
670 1, /* Augmentation size */
671 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
672 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
673 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
674 DW_CFA_nop, DW_CFA_nop,
675
676 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
677 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
678 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
679 0, 0, 0, 0, /* .plt size goes here */
680 0, /* Augmentation size */
681 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
682 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
683 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
684 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
685 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
686 11, /* Block length */
687 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
688 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
689 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
690 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
691 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
692 };
693
694 /* .eh_frame covering the lazy BND .plt section. */
695
696 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
697 {
698 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
699 0, 0, 0, 0, /* CIE ID */
700 1, /* CIE version */
701 'z', 'R', 0, /* Augmentation string */
702 1, /* Code alignment factor */
703 0x78, /* Data alignment factor */
704 16, /* Return address column */
705 1, /* Augmentation size */
706 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
707 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
708 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
709 DW_CFA_nop, DW_CFA_nop,
710
711 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
712 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
713 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
714 0, 0, 0, 0, /* .plt size goes here */
715 0, /* Augmentation size */
716 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
717 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
718 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
719 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
720 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
721 11, /* Block length */
722 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
723 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
724 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
725 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
726 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
727 };
728
729 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
730
731 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
732 {
733 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
734 0, 0, 0, 0, /* CIE ID */
735 1, /* CIE version */
736 'z', 'R', 0, /* Augmentation string */
737 1, /* Code alignment factor */
738 0x78, /* Data alignment factor */
739 16, /* Return address column */
740 1, /* Augmentation size */
741 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
742 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
743 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
744 DW_CFA_nop, DW_CFA_nop,
745
746 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
747 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
748 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
749 0, 0, 0, 0, /* .plt size goes here */
750 0, /* Augmentation size */
751 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
752 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
753 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
754 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
755 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
756 11, /* Block length */
757 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
758 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
759 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
760 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
761 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
762 };
763
764 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
765
766 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
767 {
768 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
769 0, 0, 0, 0, /* CIE ID */
770 1, /* CIE version */
771 'z', 'R', 0, /* Augmentation string */
772 1, /* Code alignment factor */
773 0x78, /* Data alignment factor */
774 16, /* Return address column */
775 1, /* Augmentation size */
776 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
777 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
778 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
779 DW_CFA_nop, DW_CFA_nop,
780
781 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
782 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
783 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
784 0, 0, 0, 0, /* .plt size goes here */
785 0, /* Augmentation size */
786 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
787 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
788 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
789 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
790 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
791 11, /* Block length */
792 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
793 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
794 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
795 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
796 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
797 };
798
799 /* .eh_frame covering the non-lazy .plt section. */
800
801 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
802 {
803 #define PLT_GOT_FDE_LENGTH 20
804 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
805 0, 0, 0, 0, /* CIE ID */
806 1, /* CIE version */
807 'z', 'R', 0, /* Augmentation string */
808 1, /* Code alignment factor */
809 0x78, /* Data alignment factor */
810 16, /* Return address column */
811 1, /* Augmentation size */
812 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
813 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
814 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
815 DW_CFA_nop, DW_CFA_nop,
816
817 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
818 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
819 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
820 0, 0, 0, 0, /* non-lazy .plt size goes here */
821 0, /* Augmentation size */
822 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
823 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
824 };
825
826 /* These are the standard parameters. */
827 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
828 {
829 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
830 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
831 elf_x86_64_lazy_plt_entry, /* plt_entry */
832 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
833 2, /* plt0_got1_offset */
834 8, /* plt0_got2_offset */
835 12, /* plt0_got2_insn_end */
836 2, /* plt_got_offset */
837 7, /* plt_reloc_offset */
838 12, /* plt_plt_offset */
839 6, /* plt_got_insn_size */
840 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
841 6, /* plt_lazy_offset */
842 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
843 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
844 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
845 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
846 };
847
848 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
849 {
850 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
851 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
852 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
853 2, /* plt_got_offset */
854 6, /* plt_got_insn_size */
855 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
856 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
857 };
858
859 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
860 {
861 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
862 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
863 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
864 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
865 2, /* plt0_got1_offset */
866 1+8, /* plt0_got2_offset */
867 1+12, /* plt0_got2_insn_end */
868 1+2, /* plt_got_offset */
869 1, /* plt_reloc_offset */
870 7, /* plt_plt_offset */
871 1+6, /* plt_got_insn_size */
872 11, /* plt_plt_insn_end */
873 0, /* plt_lazy_offset */
874 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
875 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
876 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
877 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
878 };
879
880 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
881 {
882 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
883 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
884 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
885 1+2, /* plt_got_offset */
886 1+6, /* plt_got_insn_size */
887 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
888 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
889 };
890
891 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
892 {
893 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
894 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
895 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
896 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
897 2, /* plt0_got1_offset */
898 1+8, /* plt0_got2_offset */
899 1+12, /* plt0_got2_insn_end */
900 4+1+2, /* plt_got_offset */
901 4+1, /* plt_reloc_offset */
902 4+1+6, /* plt_plt_offset */
903 4+1+6, /* plt_got_insn_size */
904 4+1+5+5, /* plt_plt_insn_end */
905 0, /* plt_lazy_offset */
906 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
907 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
908 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
909 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
910 };
911
912 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
913 {
914 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
915 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
916 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
917 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
918 2, /* plt0_got1_offset */
919 8, /* plt0_got2_offset */
920 12, /* plt0_got2_insn_end */
921 4+2, /* plt_got_offset */
922 4+1, /* plt_reloc_offset */
923 4+6, /* plt_plt_offset */
924 4+6, /* plt_got_insn_size */
925 4+5+5, /* plt_plt_insn_end */
926 0, /* plt_lazy_offset */
927 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
928 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
929 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
930 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
931 };
932
933 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
934 {
935 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
936 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
937 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
938 4+1+2, /* plt_got_offset */
939 4+1+6, /* plt_got_insn_size */
940 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
941 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
942 };
943
944 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
945 {
946 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
947 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
948 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
949 4+2, /* plt_got_offset */
950 4+6, /* plt_got_insn_size */
951 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
952 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
953 };
954
955 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
956 {
957 is_normal /* os */
958 };
959
960 #define elf_backend_arch_data &elf_x86_64_arch_bed
961
962 static bfd_boolean
963 elf64_x86_64_elf_object_p (bfd *abfd)
964 {
965 /* Set the right machine number for an x86-64 elf64 file. */
966 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
967 return TRUE;
968 }
969
970 static bfd_boolean
971 elf32_x86_64_elf_object_p (bfd *abfd)
972 {
973 /* Set the right machine number for an x86-64 elf32 file. */
974 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
975 return TRUE;
976 }
977
978 /* Return TRUE if the TLS access code sequence support transition
979 from R_TYPE. */
980
981 static bfd_boolean
982 elf_x86_64_check_tls_transition (bfd *abfd,
983 struct bfd_link_info *info,
984 asection *sec,
985 bfd_byte *contents,
986 Elf_Internal_Shdr *symtab_hdr,
987 struct elf_link_hash_entry **sym_hashes,
988 unsigned int r_type,
989 const Elf_Internal_Rela *rel,
990 const Elf_Internal_Rela *relend)
991 {
992 unsigned int val;
993 unsigned long r_symndx;
994 bfd_boolean largepic = FALSE;
995 struct elf_link_hash_entry *h;
996 bfd_vma offset;
997 struct elf_x86_link_hash_table *htab;
998 bfd_byte *call;
999 bfd_boolean indirect_call;
1000
1001 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1002 offset = rel->r_offset;
1003 switch (r_type)
1004 {
1005 case R_X86_64_TLSGD:
1006 case R_X86_64_TLSLD:
1007 if ((rel + 1) >= relend)
1008 return FALSE;
1009
1010 if (r_type == R_X86_64_TLSGD)
1011 {
1012 /* Check transition from GD access model. For 64bit, only
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .word 0x6666; rex64; call __tls_get_addr@PLT
1015 or
1016 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1017 .byte 0x66; rex64
1018 call *__tls_get_addr@GOTPCREL(%rip)
1019 which may be converted to
1020 addr32 call __tls_get_addr
1021 can transit to different access model. For 32bit, only
1022 leaq foo@tlsgd(%rip), %rdi
1023 .word 0x6666; rex64; call __tls_get_addr@PLT
1024 or
1025 leaq foo@tlsgd(%rip), %rdi
1026 .byte 0x66; rex64
1027 call *__tls_get_addr@GOTPCREL(%rip)
1028 which may be converted to
1029 addr32 call __tls_get_addr
1030 can transit to different access model. For largepic,
1031 we also support:
1032 leaq foo@tlsgd(%rip), %rdi
1033 movabsq $__tls_get_addr@pltoff, %rax
1034 addq $r15, %rax
1035 call *%rax
1036 or
1037 leaq foo@tlsgd(%rip), %rdi
1038 movabsq $__tls_get_addr@pltoff, %rax
1039 addq $rbx, %rax
1040 call *%rax */
1041
1042 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1043
1044 if ((offset + 12) > sec->size)
1045 return FALSE;
1046
1047 call = contents + offset + 4;
1048 if (call[0] != 0x66
1049 || !((call[1] == 0x48
1050 && call[2] == 0xff
1051 && call[3] == 0x15)
1052 || (call[1] == 0x48
1053 && call[2] == 0x67
1054 && call[3] == 0xe8)
1055 || (call[1] == 0x66
1056 && call[2] == 0x48
1057 && call[3] == 0xe8)))
1058 {
1059 if (!ABI_64_P (abfd)
1060 || (offset + 19) > sec->size
1061 || offset < 3
1062 || memcmp (call - 7, leaq + 1, 3) != 0
1063 || memcmp (call, "\x48\xb8", 2) != 0
1064 || call[11] != 0x01
1065 || call[13] != 0xff
1066 || call[14] != 0xd0
1067 || !((call[10] == 0x48 && call[12] == 0xd8)
1068 || (call[10] == 0x4c && call[12] == 0xf8)))
1069 return FALSE;
1070 largepic = TRUE;
1071 }
1072 else if (ABI_64_P (abfd))
1073 {
1074 if (offset < 4
1075 || memcmp (contents + offset - 4, leaq, 4) != 0)
1076 return FALSE;
1077 }
1078 else
1079 {
1080 if (offset < 3
1081 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1082 return FALSE;
1083 }
1084 indirect_call = call[2] == 0xff;
1085 }
1086 else
1087 {
1088 /* Check transition from LD access model. Only
1089 leaq foo@tlsld(%rip), %rdi;
1090 call __tls_get_addr@PLT
1091 or
1092 leaq foo@tlsld(%rip), %rdi;
1093 call *__tls_get_addr@GOTPCREL(%rip)
1094 which may be converted to
1095 addr32 call __tls_get_addr
1096 can transit to different access model. For largepic
1097 we also support:
1098 leaq foo@tlsld(%rip), %rdi
1099 movabsq $__tls_get_addr@pltoff, %rax
1100 addq $r15, %rax
1101 call *%rax
1102 or
1103 leaq foo@tlsld(%rip), %rdi
1104 movabsq $__tls_get_addr@pltoff, %rax
1105 addq $rbx, %rax
1106 call *%rax */
1107
1108 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1109
1110 if (offset < 3 || (offset + 9) > sec->size)
1111 return FALSE;
1112
1113 if (memcmp (contents + offset - 3, lea, 3) != 0)
1114 return FALSE;
1115
1116 call = contents + offset + 4;
1117 if (!(call[0] == 0xe8
1118 || (call[0] == 0xff && call[1] == 0x15)
1119 || (call[0] == 0x67 && call[1] == 0xe8)))
1120 {
1121 if (!ABI_64_P (abfd)
1122 || (offset + 19) > sec->size
1123 || memcmp (call, "\x48\xb8", 2) != 0
1124 || call[11] != 0x01
1125 || call[13] != 0xff
1126 || call[14] != 0xd0
1127 || !((call[10] == 0x48 && call[12] == 0xd8)
1128 || (call[10] == 0x4c && call[12] == 0xf8)))
1129 return FALSE;
1130 largepic = TRUE;
1131 }
1132 indirect_call = call[0] == 0xff;
1133 }
1134
1135 r_symndx = htab->r_sym (rel[1].r_info);
1136 if (r_symndx < symtab_hdr->sh_info)
1137 return FALSE;
1138
1139 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1140 if (h == NULL
1141 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1142 return FALSE;
1143 else
1144 {
1145 r_type = (ELF32_R_TYPE (rel[1].r_info)
1146 & ~R_X86_64_converted_reloc_bit);
1147 if (largepic)
1148 return r_type == R_X86_64_PLTOFF64;
1149 else if (indirect_call)
1150 return r_type == R_X86_64_GOTPCRELX;
1151 else
1152 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1153 }
1154
1155 case R_X86_64_GOTTPOFF:
1156 /* Check transition from IE access model:
1157 mov foo@gottpoff(%rip), %reg
1158 add foo@gottpoff(%rip), %reg
1159 */
1160
1161 /* Check REX prefix first. */
1162 if (offset >= 3 && (offset + 4) <= sec->size)
1163 {
1164 val = bfd_get_8 (abfd, contents + offset - 3);
1165 if (val != 0x48 && val != 0x4c)
1166 {
1167 /* X32 may have 0x44 REX prefix or no REX prefix. */
1168 if (ABI_64_P (abfd))
1169 return FALSE;
1170 }
1171 }
1172 else
1173 {
1174 /* X32 may not have any REX prefix. */
1175 if (ABI_64_P (abfd))
1176 return FALSE;
1177 if (offset < 2 || (offset + 3) > sec->size)
1178 return FALSE;
1179 }
1180
1181 val = bfd_get_8 (abfd, contents + offset - 2);
1182 if (val != 0x8b && val != 0x03)
1183 return FALSE;
1184
1185 val = bfd_get_8 (abfd, contents + offset - 1);
1186 return (val & 0xc7) == 5;
1187
1188 case R_X86_64_GOTPC32_TLSDESC:
1189 /* Check transition from GDesc access model:
1190 leaq x@tlsdesc(%rip), %rax
1191
1192 Make sure it's a leaq adding rip to a 32-bit offset
1193 into any register, although it's probably almost always
1194 going to be rax. */
1195
1196 if (offset < 3 || (offset + 4) > sec->size)
1197 return FALSE;
1198
1199 val = bfd_get_8 (abfd, contents + offset - 3);
1200 if ((val & 0xfb) != 0x48)
1201 return FALSE;
1202
1203 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1204 return FALSE;
1205
1206 val = bfd_get_8 (abfd, contents + offset - 1);
1207 return (val & 0xc7) == 0x05;
1208
1209 case R_X86_64_TLSDESC_CALL:
1210 /* Check transition from GDesc access model:
1211 call *x@tlsdesc(%rax)
1212 */
1213 if (offset + 2 <= sec->size)
1214 {
1215 /* Make sure that it's a call *x@tlsdesc(%rax). */
1216 call = contents + offset;
1217 return call[0] == 0xff && call[1] == 0x10;
1218 }
1219
1220 return FALSE;
1221
1222 default:
1223 abort ();
1224 }
1225 }
1226
1227 /* Return TRUE if the TLS access transition is OK or no transition
1228 will be performed. Update R_TYPE if there is a transition. */
1229
1230 static bfd_boolean
1231 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1232 asection *sec, bfd_byte *contents,
1233 Elf_Internal_Shdr *symtab_hdr,
1234 struct elf_link_hash_entry **sym_hashes,
1235 unsigned int *r_type, int tls_type,
1236 const Elf_Internal_Rela *rel,
1237 const Elf_Internal_Rela *relend,
1238 struct elf_link_hash_entry *h,
1239 unsigned long r_symndx,
1240 bfd_boolean from_relocate_section)
1241 {
1242 unsigned int from_type = *r_type;
1243 unsigned int to_type = from_type;
1244 bfd_boolean check = TRUE;
1245
1246 /* Skip TLS transition for functions. */
1247 if (h != NULL
1248 && (h->type == STT_FUNC
1249 || h->type == STT_GNU_IFUNC))
1250 return TRUE;
1251
1252 switch (from_type)
1253 {
1254 case R_X86_64_TLSGD:
1255 case R_X86_64_GOTPC32_TLSDESC:
1256 case R_X86_64_TLSDESC_CALL:
1257 case R_X86_64_GOTTPOFF:
1258 if (bfd_link_executable (info))
1259 {
1260 if (h == NULL)
1261 to_type = R_X86_64_TPOFF32;
1262 else
1263 to_type = R_X86_64_GOTTPOFF;
1264 }
1265
1266 /* When we are called from elf_x86_64_relocate_section, there may
1267 be additional transitions based on TLS_TYPE. */
1268 if (from_relocate_section)
1269 {
1270 unsigned int new_to_type = to_type;
1271
1272 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1273 new_to_type = R_X86_64_TPOFF32;
1274
1275 if (to_type == R_X86_64_TLSGD
1276 || to_type == R_X86_64_GOTPC32_TLSDESC
1277 || to_type == R_X86_64_TLSDESC_CALL)
1278 {
1279 if (tls_type == GOT_TLS_IE)
1280 new_to_type = R_X86_64_GOTTPOFF;
1281 }
1282
1283 /* We checked the transition before when we were called from
1284 elf_x86_64_check_relocs. We only want to check the new
1285 transition which hasn't been checked before. */
1286 check = new_to_type != to_type && from_type == to_type;
1287 to_type = new_to_type;
1288 }
1289
1290 break;
1291
1292 case R_X86_64_TLSLD:
1293 if (bfd_link_executable (info))
1294 to_type = R_X86_64_TPOFF32;
1295 break;
1296
1297 default:
1298 return TRUE;
1299 }
1300
1301 /* Return TRUE if there is no transition. */
1302 if (from_type == to_type)
1303 return TRUE;
1304
1305 /* Check if the transition can be performed. */
1306 if (check
1307 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1308 symtab_hdr, sym_hashes,
1309 from_type, rel, relend))
1310 {
1311 reloc_howto_type *from, *to;
1312 const char *name;
1313
1314 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1315 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1316
1317 if (from == NULL || to == NULL)
1318 return FALSE;
1319
1320 if (h)
1321 name = h->root.root.string;
1322 else
1323 {
1324 struct elf_x86_link_hash_table *htab;
1325
1326 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1327 if (htab == NULL)
1328 name = "*unknown*";
1329 else
1330 {
1331 Elf_Internal_Sym *isym;
1332
1333 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1334 abfd, r_symndx);
1335 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1336 }
1337 }
1338
1339 _bfd_error_handler
1340 /* xgettext:c-format */
1341 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1342 " in section `%pA' failed"),
1343 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1344 bfd_set_error (bfd_error_bad_value);
1345 return FALSE;
1346 }
1347
1348 *r_type = to_type;
1349 return TRUE;
1350 }
1351
1352 /* Rename some of the generic section flags to better document how they
1353 are used here. */
1354 #define check_relocs_failed sec_flg0
1355
1356 static bfd_boolean
1357 elf_x86_64_need_pic (struct bfd_link_info *info,
1358 bfd *input_bfd, asection *sec,
1359 struct elf_link_hash_entry *h,
1360 Elf_Internal_Shdr *symtab_hdr,
1361 Elf_Internal_Sym *isym,
1362 reloc_howto_type *howto)
1363 {
1364 const char *v = "";
1365 const char *und = "";
1366 const char *pic = "";
1367 const char *object;
1368
1369 const char *name;
1370 if (h)
1371 {
1372 name = h->root.root.string;
1373 switch (ELF_ST_VISIBILITY (h->other))
1374 {
1375 case STV_HIDDEN:
1376 v = _("hidden symbol ");
1377 break;
1378 case STV_INTERNAL:
1379 v = _("internal symbol ");
1380 break;
1381 case STV_PROTECTED:
1382 v = _("protected symbol ");
1383 break;
1384 default:
1385 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1386 v = _("protected symbol ");
1387 else
1388 v = _("symbol ");
1389 pic = _("; recompile with -fPIC");
1390 break;
1391 }
1392
1393 if (!h->def_regular && !h->def_dynamic)
1394 und = _("undefined ");
1395 }
1396 else
1397 {
1398 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1399 pic = _("; recompile with -fPIC");
1400 }
1401
1402 if (bfd_link_dll (info))
1403 object = _("a shared object");
1404 else if (bfd_link_pie (info))
1405 object = _("a PIE object");
1406 else
1407 object = _("a PDE object");
1408
1409 /* xgettext:c-format */
1410 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1411 "not be used when making %s%s"),
1412 input_bfd, howto->name, und, v, name,
1413 object, pic);
1414 bfd_set_error (bfd_error_bad_value);
1415 sec->check_relocs_failed = 1;
1416 return FALSE;
1417 }
1418
1419 /* With the local symbol, foo, we convert
1420 mov foo@GOTPCREL(%rip), %reg
1421 to
1422 lea foo(%rip), %reg
1423 and convert
1424 call/jmp *foo@GOTPCREL(%rip)
1425 to
1426 nop call foo/jmp foo nop
1427 When PIC is false, convert
1428 test %reg, foo@GOTPCREL(%rip)
1429 to
1430 test $foo, %reg
1431 and convert
1432 binop foo@GOTPCREL(%rip), %reg
1433 to
1434 binop $foo, %reg
1435 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1436 instructions. */
1437
1438 static bfd_boolean
1439 elf_x86_64_convert_load_reloc (bfd *abfd,
1440 bfd_byte *contents,
1441 unsigned int *r_type_p,
1442 Elf_Internal_Rela *irel,
1443 struct elf_link_hash_entry *h,
1444 bfd_boolean *converted,
1445 struct bfd_link_info *link_info)
1446 {
1447 struct elf_x86_link_hash_table *htab;
1448 bfd_boolean is_pic;
1449 bfd_boolean no_overflow;
1450 bfd_boolean relocx;
1451 bfd_boolean to_reloc_pc32;
1452 asection *tsec;
1453 bfd_signed_vma raddend;
1454 unsigned int opcode;
1455 unsigned int modrm;
1456 unsigned int r_type = *r_type_p;
1457 unsigned int r_symndx;
1458 bfd_vma roff = irel->r_offset;
1459
1460 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1461 return TRUE;
1462
1463 raddend = irel->r_addend;
1464 /* Addend for 32-bit PC-relative relocation must be -4. */
1465 if (raddend != -4)
1466 return TRUE;
1467
1468 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1469 is_pic = bfd_link_pic (link_info);
1470
1471 relocx = (r_type == R_X86_64_GOTPCRELX
1472 || r_type == R_X86_64_REX_GOTPCRELX);
1473
1474 /* TRUE if --no-relax is used. */
1475 no_overflow = link_info->disable_target_specific_optimizations > 1;
1476
1477 r_symndx = htab->r_sym (irel->r_info);
1478
1479 opcode = bfd_get_8 (abfd, contents + roff - 2);
1480
1481 /* Convert mov to lea since it has been done for a while. */
1482 if (opcode != 0x8b)
1483 {
1484 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1485 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1486 test, xor instructions. */
1487 if (!relocx)
1488 return TRUE;
1489 }
1490
1491 /* We convert only to R_X86_64_PC32:
1492 1. Branch.
1493 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1494 3. no_overflow is true.
1495 4. PIC.
1496 */
1497 to_reloc_pc32 = (opcode == 0xff
1498 || !relocx
1499 || no_overflow
1500 || is_pic);
1501
1502 /* Get the symbol referred to by the reloc. */
1503 if (h == NULL)
1504 {
1505 Elf_Internal_Sym *isym
1506 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1507
1508 /* Skip relocation against undefined symbols. */
1509 if (isym->st_shndx == SHN_UNDEF)
1510 return TRUE;
1511
1512 if (isym->st_shndx == SHN_ABS)
1513 tsec = bfd_abs_section_ptr;
1514 else if (isym->st_shndx == SHN_COMMON)
1515 tsec = bfd_com_section_ptr;
1516 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1517 tsec = &_bfd_elf_large_com_section;
1518 else
1519 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1520 }
1521 else
1522 {
1523 /* Undefined weak symbol is only bound locally in executable
1524 and its reference is resolved as 0 without relocation
1525 overflow. We can only perform this optimization for
1526 GOTPCRELX relocations since we need to modify REX byte.
1527 It is OK convert mov with R_X86_64_GOTPCREL to
1528 R_X86_64_PC32. */
1529 bfd_boolean local_ref;
1530 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1531
1532 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1533 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1534 if ((relocx || opcode == 0x8b)
1535 && (h->root.type == bfd_link_hash_undefweak
1536 && !eh->linker_def
1537 && local_ref))
1538 {
1539 if (opcode == 0xff)
1540 {
1541 /* Skip for branch instructions since R_X86_64_PC32
1542 may overflow. */
1543 if (no_overflow)
1544 return TRUE;
1545 }
1546 else if (relocx)
1547 {
1548 /* For non-branch instructions, we can convert to
1549 R_X86_64_32/R_X86_64_32S since we know if there
1550 is a REX byte. */
1551 to_reloc_pc32 = FALSE;
1552 }
1553
1554 /* Since we don't know the current PC when PIC is true,
1555 we can't convert to R_X86_64_PC32. */
1556 if (to_reloc_pc32 && is_pic)
1557 return TRUE;
1558
1559 goto convert;
1560 }
1561 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1562 ld.so may use its link-time address. */
1563 else if (h->start_stop
1564 || eh->linker_def
1565 || ((h->def_regular
1566 || h->root.type == bfd_link_hash_defined
1567 || h->root.type == bfd_link_hash_defweak)
1568 && h != htab->elf.hdynamic
1569 && local_ref))
1570 {
1571 /* bfd_link_hash_new or bfd_link_hash_undefined is
1572 set by an assignment in a linker script in
1573 bfd_elf_record_link_assignment. start_stop is set
1574 on __start_SECNAME/__stop_SECNAME which mark section
1575 SECNAME. */
1576 if (h->start_stop
1577 || eh->linker_def
1578 || (h->def_regular
1579 && (h->root.type == bfd_link_hash_new
1580 || h->root.type == bfd_link_hash_undefined
1581 || ((h->root.type == bfd_link_hash_defined
1582 || h->root.type == bfd_link_hash_defweak)
1583 && h->root.u.def.section == bfd_und_section_ptr))))
1584 {
1585 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1586 if (no_overflow)
1587 return TRUE;
1588 goto convert;
1589 }
1590 tsec = h->root.u.def.section;
1591 }
1592 else
1593 return TRUE;
1594 }
1595
1596 /* Don't convert GOTPCREL relocation against large section. */
1597 if (elf_section_data (tsec) != NULL
1598 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1599 return TRUE;
1600
1601 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1602 if (no_overflow)
1603 return TRUE;
1604
1605 convert:
1606 if (opcode == 0xff)
1607 {
1608 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1609 unsigned int nop;
1610 unsigned int disp;
1611 bfd_vma nop_offset;
1612
1613 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1614 R_X86_64_PC32. */
1615 modrm = bfd_get_8 (abfd, contents + roff - 1);
1616 if (modrm == 0x25)
1617 {
1618 /* Convert to "jmp foo nop". */
1619 modrm = 0xe9;
1620 nop = NOP_OPCODE;
1621 nop_offset = irel->r_offset + 3;
1622 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1623 irel->r_offset -= 1;
1624 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1625 }
1626 else
1627 {
1628 struct elf_x86_link_hash_entry *eh
1629 = (struct elf_x86_link_hash_entry *) h;
1630
1631 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1632 is a nop prefix. */
1633 modrm = 0xe8;
1634 /* To support TLS optimization, always use addr32 prefix for
1635 "call *__tls_get_addr@GOTPCREL(%rip)". */
1636 if (eh && eh->tls_get_addr)
1637 {
1638 nop = 0x67;
1639 nop_offset = irel->r_offset - 2;
1640 }
1641 else
1642 {
1643 nop = link_info->call_nop_byte;
1644 if (link_info->call_nop_as_suffix)
1645 {
1646 nop_offset = irel->r_offset + 3;
1647 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1648 irel->r_offset -= 1;
1649 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1650 }
1651 else
1652 nop_offset = irel->r_offset - 2;
1653 }
1654 }
1655 bfd_put_8 (abfd, nop, contents + nop_offset);
1656 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1657 r_type = R_X86_64_PC32;
1658 }
1659 else
1660 {
1661 unsigned int rex;
1662 unsigned int rex_mask = REX_R;
1663
1664 if (r_type == R_X86_64_REX_GOTPCRELX)
1665 rex = bfd_get_8 (abfd, contents + roff - 3);
1666 else
1667 rex = 0;
1668
1669 if (opcode == 0x8b)
1670 {
1671 if (to_reloc_pc32)
1672 {
1673 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1674 "lea foo(%rip), %reg". */
1675 opcode = 0x8d;
1676 r_type = R_X86_64_PC32;
1677 }
1678 else
1679 {
1680 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1681 "mov $foo, %reg". */
1682 opcode = 0xc7;
1683 modrm = bfd_get_8 (abfd, contents + roff - 1);
1684 modrm = 0xc0 | (modrm & 0x38) >> 3;
1685 if ((rex & REX_W) != 0
1686 && ABI_64_P (link_info->output_bfd))
1687 {
1688 /* Keep the REX_W bit in REX byte for LP64. */
1689 r_type = R_X86_64_32S;
1690 goto rewrite_modrm_rex;
1691 }
1692 else
1693 {
1694 /* If the REX_W bit in REX byte isn't needed,
1695 use R_X86_64_32 and clear the W bit to avoid
1696 sign-extend imm32 to imm64. */
1697 r_type = R_X86_64_32;
1698 /* Clear the W bit in REX byte. */
1699 rex_mask |= REX_W;
1700 goto rewrite_modrm_rex;
1701 }
1702 }
1703 }
1704 else
1705 {
1706 /* R_X86_64_PC32 isn't supported. */
1707 if (to_reloc_pc32)
1708 return TRUE;
1709
1710 modrm = bfd_get_8 (abfd, contents + roff - 1);
1711 if (opcode == 0x85)
1712 {
1713 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1714 "test $foo, %reg". */
1715 modrm = 0xc0 | (modrm & 0x38) >> 3;
1716 opcode = 0xf7;
1717 }
1718 else
1719 {
1720 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1721 "binop $foo, %reg". */
1722 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1723 opcode = 0x81;
1724 }
1725
1726 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1727 overflow when sign-extending imm32 to imm64. */
1728 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1729
1730 rewrite_modrm_rex:
1731 bfd_put_8 (abfd, modrm, contents + roff - 1);
1732
1733 if (rex)
1734 {
1735 /* Move the R bit to the B bit in REX byte. */
1736 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1737 bfd_put_8 (abfd, rex, contents + roff - 3);
1738 }
1739
1740 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1741 irel->r_addend = 0;
1742 }
1743
1744 bfd_put_8 (abfd, opcode, contents + roff - 2);
1745 }
1746
1747 *r_type_p = r_type;
1748 irel->r_info = htab->r_info (r_symndx,
1749 r_type | R_X86_64_converted_reloc_bit);
1750
1751 *converted = TRUE;
1752
1753 return TRUE;
1754 }
1755
1756 /* Look through the relocs for a section during the first phase, and
1757 calculate needed space in the global offset table, procedure
1758 linkage table, and dynamic reloc sections. */
1759
1760 static bfd_boolean
1761 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1762 asection *sec,
1763 const Elf_Internal_Rela *relocs)
1764 {
1765 struct elf_x86_link_hash_table *htab;
1766 Elf_Internal_Shdr *symtab_hdr;
1767 struct elf_link_hash_entry **sym_hashes;
1768 const Elf_Internal_Rela *rel;
1769 const Elf_Internal_Rela *rel_end;
1770 asection *sreloc;
1771 bfd_byte *contents;
1772 bfd_boolean converted;
1773
1774 if (bfd_link_relocatable (info))
1775 return TRUE;
1776
1777 /* Don't do anything special with non-loaded, non-alloced sections.
1778 In particular, any relocs in such sections should not affect GOT
1779 and PLT reference counting (ie. we don't allow them to create GOT
1780 or PLT entries), there's no possibility or desire to optimize TLS
1781 relocs, and there's not much point in propagating relocs to shared
1782 libs that the dynamic linker won't relocate. */
1783 if ((sec->flags & SEC_ALLOC) == 0)
1784 return TRUE;
1785
1786 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1787 if (htab == NULL)
1788 {
1789 sec->check_relocs_failed = 1;
1790 return FALSE;
1791 }
1792
1793 BFD_ASSERT (is_x86_elf (abfd, htab));
1794
1795 /* Get the section contents. */
1796 if (elf_section_data (sec)->this_hdr.contents != NULL)
1797 contents = elf_section_data (sec)->this_hdr.contents;
1798 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1799 {
1800 sec->check_relocs_failed = 1;
1801 return FALSE;
1802 }
1803
1804 symtab_hdr = &elf_symtab_hdr (abfd);
1805 sym_hashes = elf_sym_hashes (abfd);
1806
1807 converted = FALSE;
1808
1809 sreloc = NULL;
1810
1811 rel_end = relocs + sec->reloc_count;
1812 for (rel = relocs; rel < rel_end; rel++)
1813 {
1814 unsigned int r_type;
1815 unsigned int r_symndx;
1816 struct elf_link_hash_entry *h;
1817 struct elf_x86_link_hash_entry *eh;
1818 Elf_Internal_Sym *isym;
1819 const char *name;
1820 bfd_boolean size_reloc;
1821 bfd_boolean converted_reloc;
1822
1823 r_symndx = htab->r_sym (rel->r_info);
1824 r_type = ELF32_R_TYPE (rel->r_info);
1825
1826 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1827 {
1828 /* xgettext:c-format */
1829 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1830 abfd, r_symndx);
1831 goto error_return;
1832 }
1833
1834 if (r_symndx < symtab_hdr->sh_info)
1835 {
1836 /* A local symbol. */
1837 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1838 abfd, r_symndx);
1839 if (isym == NULL)
1840 goto error_return;
1841
1842 /* Check relocation against local STT_GNU_IFUNC symbol. */
1843 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1844 {
1845 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1846 TRUE);
1847 if (h == NULL)
1848 goto error_return;
1849
1850 /* Fake a STT_GNU_IFUNC symbol. */
1851 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1852 isym, NULL);
1853 h->type = STT_GNU_IFUNC;
1854 h->def_regular = 1;
1855 h->ref_regular = 1;
1856 h->forced_local = 1;
1857 h->root.type = bfd_link_hash_defined;
1858 }
1859 else
1860 h = NULL;
1861 }
1862 else
1863 {
1864 isym = NULL;
1865 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1866 while (h->root.type == bfd_link_hash_indirect
1867 || h->root.type == bfd_link_hash_warning)
1868 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1869 }
1870
1871 /* Check invalid x32 relocations. */
1872 if (!ABI_64_P (abfd))
1873 switch (r_type)
1874 {
1875 default:
1876 break;
1877
1878 case R_X86_64_DTPOFF64:
1879 case R_X86_64_TPOFF64:
1880 case R_X86_64_PC64:
1881 case R_X86_64_GOTOFF64:
1882 case R_X86_64_GOT64:
1883 case R_X86_64_GOTPCREL64:
1884 case R_X86_64_GOTPC64:
1885 case R_X86_64_GOTPLT64:
1886 case R_X86_64_PLTOFF64:
1887 {
1888 if (h)
1889 name = h->root.root.string;
1890 else
1891 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1892 NULL);
1893 _bfd_error_handler
1894 /* xgettext:c-format */
1895 (_("%pB: relocation %s against symbol `%s' isn't "
1896 "supported in x32 mode"), abfd,
1897 x86_64_elf_howto_table[r_type].name, name);
1898 bfd_set_error (bfd_error_bad_value);
1899 goto error_return;
1900 }
1901 break;
1902 }
1903
1904 if (h != NULL)
1905 {
1906 /* It is referenced by a non-shared object. */
1907 h->ref_regular = 1;
1908
1909 if (h->type == STT_GNU_IFUNC)
1910 elf_tdata (info->output_bfd)->has_gnu_symbols
1911 |= elf_gnu_symbol_ifunc;
1912 }
1913
1914 converted_reloc = FALSE;
1915 if ((r_type == R_X86_64_GOTPCREL
1916 || r_type == R_X86_64_GOTPCRELX
1917 || r_type == R_X86_64_REX_GOTPCRELX)
1918 && (h == NULL || h->type != STT_GNU_IFUNC))
1919 {
1920 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1921 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1922 irel, h, &converted_reloc,
1923 info))
1924 goto error_return;
1925
1926 if (converted_reloc)
1927 converted = TRUE;
1928 }
1929
1930 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1931 symtab_hdr, sym_hashes,
1932 &r_type, GOT_UNKNOWN,
1933 rel, rel_end, h, r_symndx, FALSE))
1934 goto error_return;
1935
1936 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1937 if (h == htab->elf.hgot)
1938 htab->got_referenced = TRUE;
1939
1940 eh = (struct elf_x86_link_hash_entry *) h;
1941 switch (r_type)
1942 {
1943 case R_X86_64_TLSLD:
1944 htab->tls_ld_or_ldm_got.refcount = 1;
1945 goto create_got;
1946
1947 case R_X86_64_TPOFF32:
1948 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1949 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1950 &x86_64_elf_howto_table[r_type]);
1951 if (eh != NULL)
1952 eh->zero_undefweak &= 0x2;
1953 break;
1954
1955 case R_X86_64_GOTTPOFF:
1956 if (!bfd_link_executable (info))
1957 info->flags |= DF_STATIC_TLS;
1958 /* Fall through */
1959
1960 case R_X86_64_GOT32:
1961 case R_X86_64_GOTPCREL:
1962 case R_X86_64_GOTPCRELX:
1963 case R_X86_64_REX_GOTPCRELX:
1964 case R_X86_64_TLSGD:
1965 case R_X86_64_GOT64:
1966 case R_X86_64_GOTPCREL64:
1967 case R_X86_64_GOTPLT64:
1968 case R_X86_64_GOTPC32_TLSDESC:
1969 case R_X86_64_TLSDESC_CALL:
1970 /* This symbol requires a global offset table entry. */
1971 {
1972 int tls_type, old_tls_type;
1973
1974 switch (r_type)
1975 {
1976 default: tls_type = GOT_NORMAL; break;
1977 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1978 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1979 case R_X86_64_GOTPC32_TLSDESC:
1980 case R_X86_64_TLSDESC_CALL:
1981 tls_type = GOT_TLS_GDESC; break;
1982 }
1983
1984 if (h != NULL)
1985 {
1986 h->got.refcount = 1;
1987 old_tls_type = eh->tls_type;
1988 }
1989 else
1990 {
1991 bfd_signed_vma *local_got_refcounts;
1992
1993 /* This is a global offset table entry for a local symbol. */
1994 local_got_refcounts = elf_local_got_refcounts (abfd);
1995 if (local_got_refcounts == NULL)
1996 {
1997 bfd_size_type size;
1998
1999 size = symtab_hdr->sh_info;
2000 size *= sizeof (bfd_signed_vma)
2001 + sizeof (bfd_vma) + sizeof (char);
2002 local_got_refcounts = ((bfd_signed_vma *)
2003 bfd_zalloc (abfd, size));
2004 if (local_got_refcounts == NULL)
2005 goto error_return;
2006 elf_local_got_refcounts (abfd) = local_got_refcounts;
2007 elf_x86_local_tlsdesc_gotent (abfd)
2008 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2009 elf_x86_local_got_tls_type (abfd)
2010 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2011 }
2012 local_got_refcounts[r_symndx] = 1;
2013 old_tls_type
2014 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2015 }
2016
2017 /* If a TLS symbol is accessed using IE at least once,
2018 there is no point to use dynamic model for it. */
2019 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2020 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2021 || tls_type != GOT_TLS_IE))
2022 {
2023 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2024 tls_type = old_tls_type;
2025 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2026 && GOT_TLS_GD_ANY_P (tls_type))
2027 tls_type |= old_tls_type;
2028 else
2029 {
2030 if (h)
2031 name = h->root.root.string;
2032 else
2033 name = bfd_elf_sym_name (abfd, symtab_hdr,
2034 isym, NULL);
2035 _bfd_error_handler
2036 /* xgettext:c-format */
2037 (_("%pB: '%s' accessed both as normal and"
2038 " thread local symbol"),
2039 abfd, name);
2040 bfd_set_error (bfd_error_bad_value);
2041 goto error_return;
2042 }
2043 }
2044
2045 if (old_tls_type != tls_type)
2046 {
2047 if (eh != NULL)
2048 eh->tls_type = tls_type;
2049 else
2050 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2051 }
2052 }
2053 /* Fall through */
2054
2055 case R_X86_64_GOTOFF64:
2056 case R_X86_64_GOTPC32:
2057 case R_X86_64_GOTPC64:
2058 create_got:
2059 if (eh != NULL)
2060 eh->zero_undefweak &= 0x2;
2061 break;
2062
2063 case R_X86_64_PLT32:
2064 case R_X86_64_PLT32_BND:
2065 /* This symbol requires a procedure linkage table entry. We
2066 actually build the entry in adjust_dynamic_symbol,
2067 because this might be a case of linking PIC code which is
2068 never referenced by a dynamic object, in which case we
2069 don't need to generate a procedure linkage table entry
2070 after all. */
2071
2072 /* If this is a local symbol, we resolve it directly without
2073 creating a procedure linkage table entry. */
2074 if (h == NULL)
2075 continue;
2076
2077 eh->zero_undefweak &= 0x2;
2078 h->needs_plt = 1;
2079 h->plt.refcount = 1;
2080 break;
2081
2082 case R_X86_64_PLTOFF64:
2083 /* This tries to form the 'address' of a function relative
2084 to GOT. For global symbols we need a PLT entry. */
2085 if (h != NULL)
2086 {
2087 h->needs_plt = 1;
2088 h->plt.refcount = 1;
2089 }
2090 goto create_got;
2091
2092 case R_X86_64_SIZE32:
2093 case R_X86_64_SIZE64:
2094 size_reloc = TRUE;
2095 goto do_size;
2096
2097 case R_X86_64_32:
2098 if (!ABI_64_P (abfd))
2099 goto pointer;
2100 /* Fall through. */
2101 case R_X86_64_8:
2102 case R_X86_64_16:
2103 case R_X86_64_32S:
2104 /* Check relocation overflow as these relocs may lead to
2105 run-time relocation overflow. Don't error out for
2106 sections we don't care about, such as debug sections or
2107 when relocation overflow check is disabled. */
2108 if (!info->no_reloc_overflow_check
2109 && !converted_reloc
2110 && (bfd_link_pic (info)
2111 || (bfd_link_executable (info)
2112 && h != NULL
2113 && !h->def_regular
2114 && h->def_dynamic
2115 && (sec->flags & SEC_READONLY) == 0)))
2116 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2117 &x86_64_elf_howto_table[r_type]);
2118 /* Fall through. */
2119
2120 case R_X86_64_PC8:
2121 case R_X86_64_PC16:
2122 case R_X86_64_PC32:
2123 case R_X86_64_PC32_BND:
2124 case R_X86_64_PC64:
2125 case R_X86_64_64:
2126 pointer:
2127 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2128 eh->zero_undefweak |= 0x2;
2129 /* We are called after all symbols have been resolved. Only
2130 relocation against STT_GNU_IFUNC symbol must go through
2131 PLT. */
2132 if (h != NULL
2133 && (bfd_link_executable (info)
2134 || h->type == STT_GNU_IFUNC))
2135 {
2136 bfd_boolean func_pointer_ref = FALSE;
2137
2138 if (r_type == R_X86_64_PC32)
2139 {
2140 /* Since something like ".long foo - ." may be used
2141 as pointer, make sure that PLT is used if foo is
2142 a function defined in a shared library. */
2143 if ((sec->flags & SEC_CODE) == 0)
2144 {
2145 h->pointer_equality_needed = 1;
2146 if (bfd_link_pie (info)
2147 && h->type == STT_FUNC
2148 && !h->def_regular
2149 && h->def_dynamic)
2150 {
2151 h->needs_plt = 1;
2152 h->plt.refcount = 1;
2153 }
2154 }
2155 }
2156 else if (r_type != R_X86_64_PC32_BND
2157 && r_type != R_X86_64_PC64)
2158 {
2159 h->pointer_equality_needed = 1;
2160 /* At run-time, R_X86_64_64 can be resolved for both
2161 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2162 can only be resolved for x32. */
2163 if ((sec->flags & SEC_READONLY) == 0
2164 && (r_type == R_X86_64_64
2165 || (!ABI_64_P (abfd)
2166 && (r_type == R_X86_64_32
2167 || r_type == R_X86_64_32S))))
2168 func_pointer_ref = TRUE;
2169 }
2170
2171 if (!func_pointer_ref)
2172 {
2173 /* If this reloc is in a read-only section, we might
2174 need a copy reloc. We can't check reliably at this
2175 stage whether the section is read-only, as input
2176 sections have not yet been mapped to output sections.
2177 Tentatively set the flag for now, and correct in
2178 adjust_dynamic_symbol. */
2179 h->non_got_ref = 1;
2180
2181 /* We may need a .plt entry if the symbol is a function
2182 defined in a shared lib or is a function referenced
2183 from the code or read-only section. */
2184 if (!h->def_regular
2185 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2186 h->plt.refcount = 1;
2187 }
2188 }
2189
2190 size_reloc = FALSE;
2191 do_size:
2192 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2193 htab->pointer_r_type))
2194 {
2195 struct elf_dyn_relocs *p;
2196 struct elf_dyn_relocs **head;
2197
2198 /* We must copy these reloc types into the output file.
2199 Create a reloc section in dynobj and make room for
2200 this reloc. */
2201 if (sreloc == NULL)
2202 {
2203 sreloc = _bfd_elf_make_dynamic_reloc_section
2204 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2205 abfd, /*rela?*/ TRUE);
2206
2207 if (sreloc == NULL)
2208 goto error_return;
2209 }
2210
2211 /* If this is a global symbol, we count the number of
2212 relocations we need for this symbol. */
2213 if (h != NULL)
2214 head = &eh->dyn_relocs;
2215 else
2216 {
2217 /* Track dynamic relocs needed for local syms too.
2218 We really need local syms available to do this
2219 easily. Oh well. */
2220 asection *s;
2221 void **vpp;
2222
2223 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2224 abfd, r_symndx);
2225 if (isym == NULL)
2226 goto error_return;
2227
2228 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2229 if (s == NULL)
2230 s = sec;
2231
2232 /* Beware of type punned pointers vs strict aliasing
2233 rules. */
2234 vpp = &(elf_section_data (s)->local_dynrel);
2235 head = (struct elf_dyn_relocs **)vpp;
2236 }
2237
2238 p = *head;
2239 if (p == NULL || p->sec != sec)
2240 {
2241 bfd_size_type amt = sizeof *p;
2242
2243 p = ((struct elf_dyn_relocs *)
2244 bfd_alloc (htab->elf.dynobj, amt));
2245 if (p == NULL)
2246 goto error_return;
2247 p->next = *head;
2248 *head = p;
2249 p->sec = sec;
2250 p->count = 0;
2251 p->pc_count = 0;
2252 }
2253
2254 p->count += 1;
2255 /* Count size relocation as PC-relative relocation. */
2256 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2257 p->pc_count += 1;
2258 }
2259 break;
2260
2261 /* This relocation describes the C++ object vtable hierarchy.
2262 Reconstruct it for later use during GC. */
2263 case R_X86_64_GNU_VTINHERIT:
2264 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2265 goto error_return;
2266 break;
2267
2268 /* This relocation describes which C++ vtable entries are actually
2269 used. Record for later use during GC. */
2270 case R_X86_64_GNU_VTENTRY:
2271 BFD_ASSERT (h != NULL);
2272 if (h != NULL
2273 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2274 goto error_return;
2275 break;
2276
2277 default:
2278 break;
2279 }
2280 }
2281
2282 if (elf_section_data (sec)->this_hdr.contents != contents)
2283 {
2284 if (!converted && !info->keep_memory)
2285 free (contents);
2286 else
2287 {
2288 /* Cache the section contents for elf_link_input_bfd if any
2289 load is converted or --no-keep-memory isn't used. */
2290 elf_section_data (sec)->this_hdr.contents = contents;
2291 }
2292 }
2293
2294 /* Cache relocations if any load is converted. */
2295 if (elf_section_data (sec)->relocs != relocs && converted)
2296 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2297
2298 return TRUE;
2299
2300 error_return:
2301 if (elf_section_data (sec)->this_hdr.contents != contents)
2302 free (contents);
2303 sec->check_relocs_failed = 1;
2304 return FALSE;
2305 }
2306
2307 /* Return the relocation value for @tpoff relocation
2308 if STT_TLS virtual address is ADDRESS. */
2309
2310 static bfd_vma
2311 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2312 {
2313 struct elf_link_hash_table *htab = elf_hash_table (info);
2314 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2315 bfd_vma static_tls_size;
2316
2317 /* If tls_segment is NULL, we should have signalled an error already. */
2318 if (htab->tls_sec == NULL)
2319 return 0;
2320
2321 /* Consider special static TLS alignment requirements. */
2322 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2323 return address - static_tls_size - htab->tls_sec->vma;
2324 }
2325
2326 /* Relocate an x86_64 ELF section. */
2327
2328 static bfd_boolean
2329 elf_x86_64_relocate_section (bfd *output_bfd,
2330 struct bfd_link_info *info,
2331 bfd *input_bfd,
2332 asection *input_section,
2333 bfd_byte *contents,
2334 Elf_Internal_Rela *relocs,
2335 Elf_Internal_Sym *local_syms,
2336 asection **local_sections)
2337 {
2338 struct elf_x86_link_hash_table *htab;
2339 Elf_Internal_Shdr *symtab_hdr;
2340 struct elf_link_hash_entry **sym_hashes;
2341 bfd_vma *local_got_offsets;
2342 bfd_vma *local_tlsdesc_gotents;
2343 Elf_Internal_Rela *rel;
2344 Elf_Internal_Rela *wrel;
2345 Elf_Internal_Rela *relend;
2346 unsigned int plt_entry_size;
2347
2348 /* Skip if check_relocs failed. */
2349 if (input_section->check_relocs_failed)
2350 return FALSE;
2351
2352 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2353 if (htab == NULL)
2354 return FALSE;
2355
2356 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2357
2358 plt_entry_size = htab->plt.plt_entry_size;
2359 symtab_hdr = &elf_symtab_hdr (input_bfd);
2360 sym_hashes = elf_sym_hashes (input_bfd);
2361 local_got_offsets = elf_local_got_offsets (input_bfd);
2362 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2363
2364 _bfd_x86_elf_set_tls_module_base (info);
2365
2366 rel = wrel = relocs;
2367 relend = relocs + input_section->reloc_count;
2368 for (; rel < relend; wrel++, rel++)
2369 {
2370 unsigned int r_type, r_type_tls;
2371 reloc_howto_type *howto;
2372 unsigned long r_symndx;
2373 struct elf_link_hash_entry *h;
2374 struct elf_x86_link_hash_entry *eh;
2375 Elf_Internal_Sym *sym;
2376 asection *sec;
2377 bfd_vma off, offplt, plt_offset;
2378 bfd_vma relocation;
2379 bfd_boolean unresolved_reloc;
2380 bfd_reloc_status_type r;
2381 int tls_type;
2382 asection *base_got, *resolved_plt;
2383 bfd_vma st_size;
2384 bfd_boolean resolved_to_zero;
2385 bfd_boolean relative_reloc;
2386 bfd_boolean converted_reloc;
2387 bfd_boolean need_copy_reloc_in_pie;
2388
2389 r_type = ELF32_R_TYPE (rel->r_info);
2390 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2391 || r_type == (int) R_X86_64_GNU_VTENTRY)
2392 {
2393 if (wrel != rel)
2394 *wrel = *rel;
2395 continue;
2396 }
2397
2398 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2399 r_type &= ~R_X86_64_converted_reloc_bit;
2400
2401 if (r_type >= (int) R_X86_64_standard)
2402 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2403
2404 if (r_type != (int) R_X86_64_32
2405 || ABI_64_P (output_bfd))
2406 howto = x86_64_elf_howto_table + r_type;
2407 else
2408 howto = (x86_64_elf_howto_table
2409 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2410 r_symndx = htab->r_sym (rel->r_info);
2411 h = NULL;
2412 sym = NULL;
2413 sec = NULL;
2414 unresolved_reloc = FALSE;
2415 if (r_symndx < symtab_hdr->sh_info)
2416 {
2417 sym = local_syms + r_symndx;
2418 sec = local_sections[r_symndx];
2419
2420 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2421 &sec, rel);
2422 st_size = sym->st_size;
2423
2424 /* Relocate against local STT_GNU_IFUNC symbol. */
2425 if (!bfd_link_relocatable (info)
2426 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2427 {
2428 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2429 rel, FALSE);
2430 if (h == NULL)
2431 abort ();
2432
2433 /* Set STT_GNU_IFUNC symbol value. */
2434 h->root.u.def.value = sym->st_value;
2435 h->root.u.def.section = sec;
2436 }
2437 }
2438 else
2439 {
2440 bfd_boolean warned ATTRIBUTE_UNUSED;
2441 bfd_boolean ignored ATTRIBUTE_UNUSED;
2442
2443 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2444 r_symndx, symtab_hdr, sym_hashes,
2445 h, sec, relocation,
2446 unresolved_reloc, warned, ignored);
2447 st_size = h->size;
2448 }
2449
2450 if (sec != NULL && discarded_section (sec))
2451 {
2452 _bfd_clear_contents (howto, input_bfd, input_section,
2453 contents + rel->r_offset);
2454 wrel->r_offset = rel->r_offset;
2455 wrel->r_info = 0;
2456 wrel->r_addend = 0;
2457
2458 /* For ld -r, remove relocations in debug sections against
2459 sections defined in discarded sections. Not done for
2460 eh_frame editing code expects to be present. */
2461 if (bfd_link_relocatable (info)
2462 && (input_section->flags & SEC_DEBUGGING))
2463 wrel--;
2464
2465 continue;
2466 }
2467
2468 if (bfd_link_relocatable (info))
2469 {
2470 if (wrel != rel)
2471 *wrel = *rel;
2472 continue;
2473 }
2474
2475 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2476 {
2477 if (r_type == R_X86_64_64)
2478 {
2479 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2480 zero-extend it to 64bit if addend is zero. */
2481 r_type = R_X86_64_32;
2482 memset (contents + rel->r_offset + 4, 0, 4);
2483 }
2484 else if (r_type == R_X86_64_SIZE64)
2485 {
2486 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2487 zero-extend it to 64bit if addend is zero. */
2488 r_type = R_X86_64_SIZE32;
2489 memset (contents + rel->r_offset + 4, 0, 4);
2490 }
2491 }
2492
2493 eh = (struct elf_x86_link_hash_entry *) h;
2494
2495 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2496 it here if it is defined in a non-shared object. */
2497 if (h != NULL
2498 && h->type == STT_GNU_IFUNC
2499 && h->def_regular)
2500 {
2501 bfd_vma plt_index;
2502 const char *name;
2503
2504 if ((input_section->flags & SEC_ALLOC) == 0)
2505 {
2506 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2507 STT_GNU_IFUNC symbol as STT_FUNC. */
2508 if (elf_section_type (input_section) == SHT_NOTE)
2509 goto skip_ifunc;
2510 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2511 sections because such sections are not SEC_ALLOC and
2512 thus ld.so will not process them. */
2513 if ((input_section->flags & SEC_DEBUGGING) != 0)
2514 continue;
2515 abort ();
2516 }
2517
2518 switch (r_type)
2519 {
2520 default:
2521 break;
2522
2523 case R_X86_64_GOTPCREL:
2524 case R_X86_64_GOTPCRELX:
2525 case R_X86_64_REX_GOTPCRELX:
2526 case R_X86_64_GOTPCREL64:
2527 base_got = htab->elf.sgot;
2528 off = h->got.offset;
2529
2530 if (base_got == NULL)
2531 abort ();
2532
2533 if (off == (bfd_vma) -1)
2534 {
2535 /* We can't use h->got.offset here to save state, or
2536 even just remember the offset, as finish_dynamic_symbol
2537 would use that as offset into .got. */
2538
2539 if (h->plt.offset == (bfd_vma) -1)
2540 abort ();
2541
2542 if (htab->elf.splt != NULL)
2543 {
2544 plt_index = (h->plt.offset / plt_entry_size
2545 - htab->plt.has_plt0);
2546 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2547 base_got = htab->elf.sgotplt;
2548 }
2549 else
2550 {
2551 plt_index = h->plt.offset / plt_entry_size;
2552 off = plt_index * GOT_ENTRY_SIZE;
2553 base_got = htab->elf.igotplt;
2554 }
2555
2556 if (h->dynindx == -1
2557 || h->forced_local
2558 || info->symbolic)
2559 {
2560 /* This references the local defitionion. We must
2561 initialize this entry in the global offset table.
2562 Since the offset must always be a multiple of 8,
2563 we use the least significant bit to record
2564 whether we have initialized it already.
2565
2566 When doing a dynamic link, we create a .rela.got
2567 relocation entry to initialize the value. This
2568 is done in the finish_dynamic_symbol routine. */
2569 if ((off & 1) != 0)
2570 off &= ~1;
2571 else
2572 {
2573 bfd_put_64 (output_bfd, relocation,
2574 base_got->contents + off);
2575 /* Note that this is harmless for the GOTPLT64
2576 case, as -1 | 1 still is -1. */
2577 h->got.offset |= 1;
2578 }
2579 }
2580 }
2581
2582 relocation = (base_got->output_section->vma
2583 + base_got->output_offset + off);
2584
2585 goto do_relocation;
2586 }
2587
2588 if (h->plt.offset == (bfd_vma) -1)
2589 {
2590 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2591 if (r_type == htab->pointer_r_type
2592 && (input_section->flags & SEC_CODE) == 0)
2593 goto do_ifunc_pointer;
2594 goto bad_ifunc_reloc;
2595 }
2596
2597 /* STT_GNU_IFUNC symbol must go through PLT. */
2598 if (htab->elf.splt != NULL)
2599 {
2600 if (htab->plt_second != NULL)
2601 {
2602 resolved_plt = htab->plt_second;
2603 plt_offset = eh->plt_second.offset;
2604 }
2605 else
2606 {
2607 resolved_plt = htab->elf.splt;
2608 plt_offset = h->plt.offset;
2609 }
2610 }
2611 else
2612 {
2613 resolved_plt = htab->elf.iplt;
2614 plt_offset = h->plt.offset;
2615 }
2616
2617 relocation = (resolved_plt->output_section->vma
2618 + resolved_plt->output_offset + plt_offset);
2619
2620 switch (r_type)
2621 {
2622 default:
2623 bad_ifunc_reloc:
2624 if (h->root.root.string)
2625 name = h->root.root.string;
2626 else
2627 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2628 NULL);
2629 _bfd_error_handler
2630 /* xgettext:c-format */
2631 (_("%pB: relocation %s against STT_GNU_IFUNC "
2632 "symbol `%s' isn't supported"), input_bfd,
2633 howto->name, name);
2634 bfd_set_error (bfd_error_bad_value);
2635 return FALSE;
2636
2637 case R_X86_64_32S:
2638 if (bfd_link_pic (info))
2639 abort ();
2640 goto do_relocation;
2641
2642 case R_X86_64_32:
2643 if (ABI_64_P (output_bfd))
2644 goto do_relocation;
2645 /* FALLTHROUGH */
2646 case R_X86_64_64:
2647 do_ifunc_pointer:
2648 if (rel->r_addend != 0)
2649 {
2650 if (h->root.root.string)
2651 name = h->root.root.string;
2652 else
2653 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2654 sym, NULL);
2655 _bfd_error_handler
2656 /* xgettext:c-format */
2657 (_("%pB: relocation %s against STT_GNU_IFUNC "
2658 "symbol `%s' has non-zero addend: %" PRId64),
2659 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2660 bfd_set_error (bfd_error_bad_value);
2661 return FALSE;
2662 }
2663
2664 /* Generate dynamic relcoation only when there is a
2665 non-GOT reference in a shared object or there is no
2666 PLT. */
2667 if ((bfd_link_pic (info) && h->non_got_ref)
2668 || h->plt.offset == (bfd_vma) -1)
2669 {
2670 Elf_Internal_Rela outrel;
2671 asection *sreloc;
2672
2673 /* Need a dynamic relocation to get the real function
2674 address. */
2675 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2676 info,
2677 input_section,
2678 rel->r_offset);
2679 if (outrel.r_offset == (bfd_vma) -1
2680 || outrel.r_offset == (bfd_vma) -2)
2681 abort ();
2682
2683 outrel.r_offset += (input_section->output_section->vma
2684 + input_section->output_offset);
2685
2686 if (POINTER_LOCAL_IFUNC_P (info, h))
2687 {
2688 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2689 h->root.root.string,
2690 h->root.u.def.section->owner);
2691
2692 /* This symbol is resolved locally. */
2693 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2694 outrel.r_addend = (h->root.u.def.value
2695 + h->root.u.def.section->output_section->vma
2696 + h->root.u.def.section->output_offset);
2697 }
2698 else
2699 {
2700 outrel.r_info = htab->r_info (h->dynindx, r_type);
2701 outrel.r_addend = 0;
2702 }
2703
2704 /* Dynamic relocations are stored in
2705 1. .rela.ifunc section in PIC object.
2706 2. .rela.got section in dynamic executable.
2707 3. .rela.iplt section in static executable. */
2708 if (bfd_link_pic (info))
2709 sreloc = htab->elf.irelifunc;
2710 else if (htab->elf.splt != NULL)
2711 sreloc = htab->elf.srelgot;
2712 else
2713 sreloc = htab->elf.irelplt;
2714 elf_append_rela (output_bfd, sreloc, &outrel);
2715
2716 /* If this reloc is against an external symbol, we
2717 do not want to fiddle with the addend. Otherwise,
2718 we need to include the symbol value so that it
2719 becomes an addend for the dynamic reloc. For an
2720 internal symbol, we have updated addend. */
2721 continue;
2722 }
2723 /* FALLTHROUGH */
2724 case R_X86_64_PC32:
2725 case R_X86_64_PC32_BND:
2726 case R_X86_64_PC64:
2727 case R_X86_64_PLT32:
2728 case R_X86_64_PLT32_BND:
2729 goto do_relocation;
2730 }
2731 }
2732
2733 skip_ifunc:
2734 resolved_to_zero = (eh != NULL
2735 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2736
2737 /* When generating a shared object, the relocations handled here are
2738 copied into the output file to be resolved at run time. */
2739 switch (r_type)
2740 {
2741 case R_X86_64_GOT32:
2742 case R_X86_64_GOT64:
2743 /* Relocation is to the entry for this symbol in the global
2744 offset table. */
2745 case R_X86_64_GOTPCREL:
2746 case R_X86_64_GOTPCRELX:
2747 case R_X86_64_REX_GOTPCRELX:
2748 case R_X86_64_GOTPCREL64:
2749 /* Use global offset table entry as symbol value. */
2750 case R_X86_64_GOTPLT64:
2751 /* This is obsolete and treated the same as GOT64. */
2752 base_got = htab->elf.sgot;
2753
2754 if (htab->elf.sgot == NULL)
2755 abort ();
2756
2757 relative_reloc = FALSE;
2758 if (h != NULL)
2759 {
2760 off = h->got.offset;
2761 if (h->needs_plt
2762 && h->plt.offset != (bfd_vma)-1
2763 && off == (bfd_vma)-1)
2764 {
2765 /* We can't use h->got.offset here to save
2766 state, or even just remember the offset, as
2767 finish_dynamic_symbol would use that as offset into
2768 .got. */
2769 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2770 - htab->plt.has_plt0);
2771 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2772 base_got = htab->elf.sgotplt;
2773 }
2774
2775 if (RESOLVED_LOCALLY_P (info, h, htab))
2776 {
2777 /* We must initialize this entry in the global offset
2778 table. Since the offset must always be a multiple
2779 of 8, we use the least significant bit to record
2780 whether we have initialized it already.
2781
2782 When doing a dynamic link, we create a .rela.got
2783 relocation entry to initialize the value. This is
2784 done in the finish_dynamic_symbol routine. */
2785 if ((off & 1) != 0)
2786 off &= ~1;
2787 else
2788 {
2789 bfd_put_64 (output_bfd, relocation,
2790 base_got->contents + off);
2791 /* Note that this is harmless for the GOTPLT64 case,
2792 as -1 | 1 still is -1. */
2793 h->got.offset |= 1;
2794
2795 if (GENERATE_RELATIVE_RELOC_P (info, h))
2796 {
2797 /* If this symbol isn't dynamic in PIC,
2798 generate R_X86_64_RELATIVE here. */
2799 eh->no_finish_dynamic_symbol = 1;
2800 relative_reloc = TRUE;
2801 }
2802 }
2803 }
2804 else
2805 unresolved_reloc = FALSE;
2806 }
2807 else
2808 {
2809 if (local_got_offsets == NULL)
2810 abort ();
2811
2812 off = local_got_offsets[r_symndx];
2813
2814 /* The offset must always be a multiple of 8. We use
2815 the least significant bit to record whether we have
2816 already generated the necessary reloc. */
2817 if ((off & 1) != 0)
2818 off &= ~1;
2819 else
2820 {
2821 bfd_put_64 (output_bfd, relocation,
2822 base_got->contents + off);
2823 local_got_offsets[r_symndx] |= 1;
2824
2825 if (bfd_link_pic (info))
2826 relative_reloc = TRUE;
2827 }
2828 }
2829
2830 if (relative_reloc)
2831 {
2832 asection *s;
2833 Elf_Internal_Rela outrel;
2834
2835 /* We need to generate a R_X86_64_RELATIVE reloc
2836 for the dynamic linker. */
2837 s = htab->elf.srelgot;
2838 if (s == NULL)
2839 abort ();
2840
2841 outrel.r_offset = (base_got->output_section->vma
2842 + base_got->output_offset
2843 + off);
2844 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2845 outrel.r_addend = relocation;
2846 elf_append_rela (output_bfd, s, &outrel);
2847 }
2848
2849 if (off >= (bfd_vma) -2)
2850 abort ();
2851
2852 relocation = base_got->output_section->vma
2853 + base_got->output_offset + off;
2854 if (r_type != R_X86_64_GOTPCREL
2855 && r_type != R_X86_64_GOTPCRELX
2856 && r_type != R_X86_64_REX_GOTPCRELX
2857 && r_type != R_X86_64_GOTPCREL64)
2858 relocation -= htab->elf.sgotplt->output_section->vma
2859 - htab->elf.sgotplt->output_offset;
2860
2861 break;
2862
2863 case R_X86_64_GOTOFF64:
2864 /* Relocation is relative to the start of the global offset
2865 table. */
2866
2867 /* Check to make sure it isn't a protected function or data
2868 symbol for shared library since it may not be local when
2869 used as function address or with copy relocation. We also
2870 need to make sure that a symbol is referenced locally. */
2871 if (bfd_link_pic (info) && h)
2872 {
2873 if (!h->def_regular)
2874 {
2875 const char *v;
2876
2877 switch (ELF_ST_VISIBILITY (h->other))
2878 {
2879 case STV_HIDDEN:
2880 v = _("hidden symbol");
2881 break;
2882 case STV_INTERNAL:
2883 v = _("internal symbol");
2884 break;
2885 case STV_PROTECTED:
2886 v = _("protected symbol");
2887 break;
2888 default:
2889 v = _("symbol");
2890 break;
2891 }
2892
2893 _bfd_error_handler
2894 /* xgettext:c-format */
2895 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2896 " `%s' can not be used when making a shared object"),
2897 input_bfd, v, h->root.root.string);
2898 bfd_set_error (bfd_error_bad_value);
2899 return FALSE;
2900 }
2901 else if (!bfd_link_executable (info)
2902 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2903 && (h->type == STT_FUNC
2904 || h->type == STT_OBJECT)
2905 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2906 {
2907 _bfd_error_handler
2908 /* xgettext:c-format */
2909 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2910 " `%s' can not be used when making a shared object"),
2911 input_bfd,
2912 h->type == STT_FUNC ? "function" : "data",
2913 h->root.root.string);
2914 bfd_set_error (bfd_error_bad_value);
2915 return FALSE;
2916 }
2917 }
2918
2919 /* Note that sgot is not involved in this
2920 calculation. We always want the start of .got.plt. If we
2921 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2922 permitted by the ABI, we might have to change this
2923 calculation. */
2924 relocation -= htab->elf.sgotplt->output_section->vma
2925 + htab->elf.sgotplt->output_offset;
2926 break;
2927
2928 case R_X86_64_GOTPC32:
2929 case R_X86_64_GOTPC64:
2930 /* Use global offset table as symbol value. */
2931 relocation = htab->elf.sgotplt->output_section->vma
2932 + htab->elf.sgotplt->output_offset;
2933 unresolved_reloc = FALSE;
2934 break;
2935
2936 case R_X86_64_PLTOFF64:
2937 /* Relocation is PLT entry relative to GOT. For local
2938 symbols it's the symbol itself relative to GOT. */
2939 if (h != NULL
2940 /* See PLT32 handling. */
2941 && (h->plt.offset != (bfd_vma) -1
2942 || eh->plt_got.offset != (bfd_vma) -1)
2943 && htab->elf.splt != NULL)
2944 {
2945 if (eh->plt_got.offset != (bfd_vma) -1)
2946 {
2947 /* Use the GOT PLT. */
2948 resolved_plt = htab->plt_got;
2949 plt_offset = eh->plt_got.offset;
2950 }
2951 else if (htab->plt_second != NULL)
2952 {
2953 resolved_plt = htab->plt_second;
2954 plt_offset = eh->plt_second.offset;
2955 }
2956 else
2957 {
2958 resolved_plt = htab->elf.splt;
2959 plt_offset = h->plt.offset;
2960 }
2961
2962 relocation = (resolved_plt->output_section->vma
2963 + resolved_plt->output_offset
2964 + plt_offset);
2965 unresolved_reloc = FALSE;
2966 }
2967
2968 relocation -= htab->elf.sgotplt->output_section->vma
2969 + htab->elf.sgotplt->output_offset;
2970 break;
2971
2972 case R_X86_64_PLT32:
2973 case R_X86_64_PLT32_BND:
2974 /* Relocation is to the entry for this symbol in the
2975 procedure linkage table. */
2976
2977 /* Resolve a PLT32 reloc against a local symbol directly,
2978 without using the procedure linkage table. */
2979 if (h == NULL)
2980 break;
2981
2982 if ((h->plt.offset == (bfd_vma) -1
2983 && eh->plt_got.offset == (bfd_vma) -1)
2984 || htab->elf.splt == NULL)
2985 {
2986 /* We didn't make a PLT entry for this symbol. This
2987 happens when statically linking PIC code, or when
2988 using -Bsymbolic. */
2989 break;
2990 }
2991
2992 use_plt:
2993 if (h->plt.offset != (bfd_vma) -1)
2994 {
2995 if (htab->plt_second != NULL)
2996 {
2997 resolved_plt = htab->plt_second;
2998 plt_offset = eh->plt_second.offset;
2999 }
3000 else
3001 {
3002 resolved_plt = htab->elf.splt;
3003 plt_offset = h->plt.offset;
3004 }
3005 }
3006 else
3007 {
3008 /* Use the GOT PLT. */
3009 resolved_plt = htab->plt_got;
3010 plt_offset = eh->plt_got.offset;
3011 }
3012
3013 relocation = (resolved_plt->output_section->vma
3014 + resolved_plt->output_offset
3015 + plt_offset);
3016 unresolved_reloc = FALSE;
3017 break;
3018
3019 case R_X86_64_SIZE32:
3020 case R_X86_64_SIZE64:
3021 /* Set to symbol size. */
3022 relocation = st_size;
3023 goto direct;
3024
3025 case R_X86_64_PC8:
3026 case R_X86_64_PC16:
3027 case R_X86_64_PC32:
3028 case R_X86_64_PC32_BND:
3029 /* Don't complain about -fPIC if the symbol is undefined when
3030 building executable unless it is unresolved weak symbol,
3031 references a dynamic definition in PIE or -z nocopyreloc
3032 is used. */
3033 if ((input_section->flags & SEC_ALLOC) != 0
3034 && (input_section->flags & SEC_READONLY) != 0
3035 && h != NULL
3036 && ((bfd_link_executable (info)
3037 && ((h->root.type == bfd_link_hash_undefweak
3038 && !resolved_to_zero)
3039 || (bfd_link_pie (info)
3040 && !h->def_regular
3041 && h->def_dynamic)
3042 || ((info->nocopyreloc
3043 || (eh->def_protected
3044 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3045 && h->def_dynamic
3046 && !(h->root.u.def.section->flags & SEC_CODE))))
3047 || bfd_link_dll (info)))
3048 {
3049 bfd_boolean fail = FALSE;
3050 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3051 {
3052 /* Symbol is referenced locally. Make sure it is
3053 defined locally. */
3054 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3055 }
3056 else if (!(bfd_link_pie (info)
3057 && (h->needs_copy || eh->needs_copy)))
3058 {
3059 /* Symbol doesn't need copy reloc and isn't referenced
3060 locally. Address of protected function may not be
3061 reachable at run-time. */
3062 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3063 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3064 && h->type == STT_FUNC));
3065 }
3066
3067 if (fail)
3068 return elf_x86_64_need_pic (info, input_bfd, input_section,
3069 h, NULL, NULL, howto);
3070 }
3071 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3072 as function address. */
3073 else if (h != NULL
3074 && (input_section->flags & SEC_CODE) == 0
3075 && bfd_link_pie (info)
3076 && h->type == STT_FUNC
3077 && !h->def_regular
3078 && h->def_dynamic)
3079 goto use_plt;
3080 /* Fall through. */
3081
3082 case R_X86_64_8:
3083 case R_X86_64_16:
3084 case R_X86_64_32:
3085 case R_X86_64_PC64:
3086 case R_X86_64_64:
3087 /* FIXME: The ABI says the linker should make sure the value is
3088 the same when it's zeroextended to 64 bit. */
3089
3090 direct:
3091 if ((input_section->flags & SEC_ALLOC) == 0)
3092 break;
3093
3094 need_copy_reloc_in_pie = (bfd_link_pie (info)
3095 && h != NULL
3096 && (h->needs_copy
3097 || eh->needs_copy
3098 || (h->root.type
3099 == bfd_link_hash_undefined))
3100 && (X86_PCREL_TYPE_P (r_type)
3101 || X86_SIZE_TYPE_P (r_type)));
3102
3103 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3104 need_copy_reloc_in_pie,
3105 resolved_to_zero, FALSE))
3106 {
3107 Elf_Internal_Rela outrel;
3108 bfd_boolean skip, relocate;
3109 asection *sreloc;
3110
3111 /* When generating a shared object, these relocations
3112 are copied into the output file to be resolved at run
3113 time. */
3114 skip = FALSE;
3115 relocate = FALSE;
3116
3117 outrel.r_offset =
3118 _bfd_elf_section_offset (output_bfd, info, input_section,
3119 rel->r_offset);
3120 if (outrel.r_offset == (bfd_vma) -1)
3121 skip = TRUE;
3122 else if (outrel.r_offset == (bfd_vma) -2)
3123 skip = TRUE, relocate = TRUE;
3124
3125 outrel.r_offset += (input_section->output_section->vma
3126 + input_section->output_offset);
3127
3128 if (skip)
3129 memset (&outrel, 0, sizeof outrel);
3130
3131 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3132 {
3133 outrel.r_info = htab->r_info (h->dynindx, r_type);
3134 outrel.r_addend = rel->r_addend;
3135 }
3136 else
3137 {
3138 /* This symbol is local, or marked to become local.
3139 When relocation overflow check is disabled, we
3140 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3141 if (r_type == htab->pointer_r_type
3142 || (r_type == R_X86_64_32
3143 && info->no_reloc_overflow_check))
3144 {
3145 relocate = TRUE;
3146 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3147 outrel.r_addend = relocation + rel->r_addend;
3148 }
3149 else if (r_type == R_X86_64_64
3150 && !ABI_64_P (output_bfd))
3151 {
3152 relocate = TRUE;
3153 outrel.r_info = htab->r_info (0,
3154 R_X86_64_RELATIVE64);
3155 outrel.r_addend = relocation + rel->r_addend;
3156 /* Check addend overflow. */
3157 if ((outrel.r_addend & 0x80000000)
3158 != (rel->r_addend & 0x80000000))
3159 {
3160 const char *name;
3161 int addend = rel->r_addend;
3162 if (h && h->root.root.string)
3163 name = h->root.root.string;
3164 else
3165 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3166 sym, NULL);
3167 _bfd_error_handler
3168 /* xgettext:c-format */
3169 (_("%pB: addend %s%#x in relocation %s against "
3170 "symbol `%s' at %#" PRIx64
3171 " in section `%pA' is out of range"),
3172 input_bfd, addend < 0 ? "-" : "", addend,
3173 howto->name, name, (uint64_t) rel->r_offset,
3174 input_section);
3175 bfd_set_error (bfd_error_bad_value);
3176 return FALSE;
3177 }
3178 }
3179 else
3180 {
3181 long sindx;
3182
3183 if (bfd_is_abs_section (sec))
3184 sindx = 0;
3185 else if (sec == NULL || sec->owner == NULL)
3186 {
3187 bfd_set_error (bfd_error_bad_value);
3188 return FALSE;
3189 }
3190 else
3191 {
3192 asection *osec;
3193
3194 /* We are turning this relocation into one
3195 against a section symbol. It would be
3196 proper to subtract the symbol's value,
3197 osec->vma, from the emitted reloc addend,
3198 but ld.so expects buggy relocs. */
3199 osec = sec->output_section;
3200 sindx = elf_section_data (osec)->dynindx;
3201 if (sindx == 0)
3202 {
3203 asection *oi = htab->elf.text_index_section;
3204 sindx = elf_section_data (oi)->dynindx;
3205 }
3206 BFD_ASSERT (sindx != 0);
3207 }
3208
3209 outrel.r_info = htab->r_info (sindx, r_type);
3210 outrel.r_addend = relocation + rel->r_addend;
3211 }
3212 }
3213
3214 sreloc = elf_section_data (input_section)->sreloc;
3215
3216 if (sreloc == NULL || sreloc->contents == NULL)
3217 {
3218 r = bfd_reloc_notsupported;
3219 goto check_relocation_error;
3220 }
3221
3222 elf_append_rela (output_bfd, sreloc, &outrel);
3223
3224 /* If this reloc is against an external symbol, we do
3225 not want to fiddle with the addend. Otherwise, we
3226 need to include the symbol value so that it becomes
3227 an addend for the dynamic reloc. */
3228 if (! relocate)
3229 continue;
3230 }
3231
3232 break;
3233
3234 case R_X86_64_TLSGD:
3235 case R_X86_64_GOTPC32_TLSDESC:
3236 case R_X86_64_TLSDESC_CALL:
3237 case R_X86_64_GOTTPOFF:
3238 tls_type = GOT_UNKNOWN;
3239 if (h == NULL && local_got_offsets)
3240 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3241 else if (h != NULL)
3242 tls_type = elf_x86_hash_entry (h)->tls_type;
3243
3244 r_type_tls = r_type;
3245 if (! elf_x86_64_tls_transition (info, input_bfd,
3246 input_section, contents,
3247 symtab_hdr, sym_hashes,
3248 &r_type_tls, tls_type, rel,
3249 relend, h, r_symndx, TRUE))
3250 return FALSE;
3251
3252 if (r_type_tls == R_X86_64_TPOFF32)
3253 {
3254 bfd_vma roff = rel->r_offset;
3255
3256 BFD_ASSERT (! unresolved_reloc);
3257
3258 if (r_type == R_X86_64_TLSGD)
3259 {
3260 /* GD->LE transition. For 64bit, change
3261 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3262 .word 0x6666; rex64; call __tls_get_addr@PLT
3263 or
3264 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3265 .byte 0x66; rex64
3266 call *__tls_get_addr@GOTPCREL(%rip)
3267 which may be converted to
3268 addr32 call __tls_get_addr
3269 into:
3270 movq %fs:0, %rax
3271 leaq foo@tpoff(%rax), %rax
3272 For 32bit, change
3273 leaq foo@tlsgd(%rip), %rdi
3274 .word 0x6666; rex64; call __tls_get_addr@PLT
3275 or
3276 leaq foo@tlsgd(%rip), %rdi
3277 .byte 0x66; rex64
3278 call *__tls_get_addr@GOTPCREL(%rip)
3279 which may be converted to
3280 addr32 call __tls_get_addr
3281 into:
3282 movl %fs:0, %eax
3283 leaq foo@tpoff(%rax), %rax
3284 For largepic, change:
3285 leaq foo@tlsgd(%rip), %rdi
3286 movabsq $__tls_get_addr@pltoff, %rax
3287 addq %r15, %rax
3288 call *%rax
3289 into:
3290 movq %fs:0, %rax
3291 leaq foo@tpoff(%rax), %rax
3292 nopw 0x0(%rax,%rax,1) */
3293 int largepic = 0;
3294 if (ABI_64_P (output_bfd))
3295 {
3296 if (contents[roff + 5] == 0xb8)
3297 {
3298 memcpy (contents + roff - 3,
3299 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3300 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3301 largepic = 1;
3302 }
3303 else
3304 memcpy (contents + roff - 4,
3305 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3306 16);
3307 }
3308 else
3309 memcpy (contents + roff - 3,
3310 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3311 15);
3312 bfd_put_32 (output_bfd,
3313 elf_x86_64_tpoff (info, relocation),
3314 contents + roff + 8 + largepic);
3315 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3316 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3317 rel++;
3318 wrel++;
3319 continue;
3320 }
3321 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3322 {
3323 /* GDesc -> LE transition.
3324 It's originally something like:
3325 leaq x@tlsdesc(%rip), %rax
3326
3327 Change it to:
3328 movl $x@tpoff, %rax. */
3329
3330 unsigned int val, type;
3331
3332 type = bfd_get_8 (input_bfd, contents + roff - 3);
3333 val = bfd_get_8 (input_bfd, contents + roff - 1);
3334 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3335 contents + roff - 3);
3336 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3337 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3338 contents + roff - 1);
3339 bfd_put_32 (output_bfd,
3340 elf_x86_64_tpoff (info, relocation),
3341 contents + roff);
3342 continue;
3343 }
3344 else if (r_type == R_X86_64_TLSDESC_CALL)
3345 {
3346 /* GDesc -> LE transition.
3347 It's originally:
3348 call *(%rax)
3349 Turn it into:
3350 xchg %ax,%ax. */
3351 bfd_put_8 (output_bfd, 0x66, contents + roff);
3352 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3353 continue;
3354 }
3355 else if (r_type == R_X86_64_GOTTPOFF)
3356 {
3357 /* IE->LE transition:
3358 For 64bit, originally it can be one of:
3359 movq foo@gottpoff(%rip), %reg
3360 addq foo@gottpoff(%rip), %reg
3361 We change it into:
3362 movq $foo, %reg
3363 leaq foo(%reg), %reg
3364 addq $foo, %reg.
3365 For 32bit, originally it can be one of:
3366 movq foo@gottpoff(%rip), %reg
3367 addl foo@gottpoff(%rip), %reg
3368 We change it into:
3369 movq $foo, %reg
3370 leal foo(%reg), %reg
3371 addl $foo, %reg. */
3372
3373 unsigned int val, type, reg;
3374
3375 if (roff >= 3)
3376 val = bfd_get_8 (input_bfd, contents + roff - 3);
3377 else
3378 val = 0;
3379 type = bfd_get_8 (input_bfd, contents + roff - 2);
3380 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3381 reg >>= 3;
3382 if (type == 0x8b)
3383 {
3384 /* movq */
3385 if (val == 0x4c)
3386 bfd_put_8 (output_bfd, 0x49,
3387 contents + roff - 3);
3388 else if (!ABI_64_P (output_bfd) && val == 0x44)
3389 bfd_put_8 (output_bfd, 0x41,
3390 contents + roff - 3);
3391 bfd_put_8 (output_bfd, 0xc7,
3392 contents + roff - 2);
3393 bfd_put_8 (output_bfd, 0xc0 | reg,
3394 contents + roff - 1);
3395 }
3396 else if (reg == 4)
3397 {
3398 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3399 is special */
3400 if (val == 0x4c)
3401 bfd_put_8 (output_bfd, 0x49,
3402 contents + roff - 3);
3403 else if (!ABI_64_P (output_bfd) && val == 0x44)
3404 bfd_put_8 (output_bfd, 0x41,
3405 contents + roff - 3);
3406 bfd_put_8 (output_bfd, 0x81,
3407 contents + roff - 2);
3408 bfd_put_8 (output_bfd, 0xc0 | reg,
3409 contents + roff - 1);
3410 }
3411 else
3412 {
3413 /* addq/addl -> leaq/leal */
3414 if (val == 0x4c)
3415 bfd_put_8 (output_bfd, 0x4d,
3416 contents + roff - 3);
3417 else if (!ABI_64_P (output_bfd) && val == 0x44)
3418 bfd_put_8 (output_bfd, 0x45,
3419 contents + roff - 3);
3420 bfd_put_8 (output_bfd, 0x8d,
3421 contents + roff - 2);
3422 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3423 contents + roff - 1);
3424 }
3425 bfd_put_32 (output_bfd,
3426 elf_x86_64_tpoff (info, relocation),
3427 contents + roff);
3428 continue;
3429 }
3430 else
3431 BFD_ASSERT (FALSE);
3432 }
3433
3434 if (htab->elf.sgot == NULL)
3435 abort ();
3436
3437 if (h != NULL)
3438 {
3439 off = h->got.offset;
3440 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3441 }
3442 else
3443 {
3444 if (local_got_offsets == NULL)
3445 abort ();
3446
3447 off = local_got_offsets[r_symndx];
3448 offplt = local_tlsdesc_gotents[r_symndx];
3449 }
3450
3451 if ((off & 1) != 0)
3452 off &= ~1;
3453 else
3454 {
3455 Elf_Internal_Rela outrel;
3456 int dr_type, indx;
3457 asection *sreloc;
3458
3459 if (htab->elf.srelgot == NULL)
3460 abort ();
3461
3462 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3463
3464 if (GOT_TLS_GDESC_P (tls_type))
3465 {
3466 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3467 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3468 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3469 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3470 + htab->elf.sgotplt->output_offset
3471 + offplt
3472 + htab->sgotplt_jump_table_size);
3473 sreloc = htab->elf.srelplt;
3474 if (indx == 0)
3475 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3476 else
3477 outrel.r_addend = 0;
3478 elf_append_rela (output_bfd, sreloc, &outrel);
3479 }
3480
3481 sreloc = htab->elf.srelgot;
3482
3483 outrel.r_offset = (htab->elf.sgot->output_section->vma
3484 + htab->elf.sgot->output_offset + off);
3485
3486 if (GOT_TLS_GD_P (tls_type))
3487 dr_type = R_X86_64_DTPMOD64;
3488 else if (GOT_TLS_GDESC_P (tls_type))
3489 goto dr_done;
3490 else
3491 dr_type = R_X86_64_TPOFF64;
3492
3493 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3494 outrel.r_addend = 0;
3495 if ((dr_type == R_X86_64_TPOFF64
3496 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3497 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3498 outrel.r_info = htab->r_info (indx, dr_type);
3499
3500 elf_append_rela (output_bfd, sreloc, &outrel);
3501
3502 if (GOT_TLS_GD_P (tls_type))
3503 {
3504 if (indx == 0)
3505 {
3506 BFD_ASSERT (! unresolved_reloc);
3507 bfd_put_64 (output_bfd,
3508 relocation - _bfd_x86_elf_dtpoff_base (info),
3509 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3510 }
3511 else
3512 {
3513 bfd_put_64 (output_bfd, 0,
3514 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3515 outrel.r_info = htab->r_info (indx,
3516 R_X86_64_DTPOFF64);
3517 outrel.r_offset += GOT_ENTRY_SIZE;
3518 elf_append_rela (output_bfd, sreloc,
3519 &outrel);
3520 }
3521 }
3522
3523 dr_done:
3524 if (h != NULL)
3525 h->got.offset |= 1;
3526 else
3527 local_got_offsets[r_symndx] |= 1;
3528 }
3529
3530 if (off >= (bfd_vma) -2
3531 && ! GOT_TLS_GDESC_P (tls_type))
3532 abort ();
3533 if (r_type_tls == r_type)
3534 {
3535 if (r_type == R_X86_64_GOTPC32_TLSDESC
3536 || r_type == R_X86_64_TLSDESC_CALL)
3537 relocation = htab->elf.sgotplt->output_section->vma
3538 + htab->elf.sgotplt->output_offset
3539 + offplt + htab->sgotplt_jump_table_size;
3540 else
3541 relocation = htab->elf.sgot->output_section->vma
3542 + htab->elf.sgot->output_offset + off;
3543 unresolved_reloc = FALSE;
3544 }
3545 else
3546 {
3547 bfd_vma roff = rel->r_offset;
3548
3549 if (r_type == R_X86_64_TLSGD)
3550 {
3551 /* GD->IE transition. For 64bit, change
3552 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3553 .word 0x6666; rex64; call __tls_get_addr@PLT
3554 or
3555 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3556 .byte 0x66; rex64
3557 call *__tls_get_addr@GOTPCREL(%rip
3558 which may be converted to
3559 addr32 call __tls_get_addr
3560 into:
3561 movq %fs:0, %rax
3562 addq foo@gottpoff(%rip), %rax
3563 For 32bit, change
3564 leaq foo@tlsgd(%rip), %rdi
3565 .word 0x6666; rex64; call __tls_get_addr@PLT
3566 or
3567 leaq foo@tlsgd(%rip), %rdi
3568 .byte 0x66; rex64;
3569 call *__tls_get_addr@GOTPCREL(%rip)
3570 which may be converted to
3571 addr32 call __tls_get_addr
3572 into:
3573 movl %fs:0, %eax
3574 addq foo@gottpoff(%rip), %rax
3575 For largepic, change:
3576 leaq foo@tlsgd(%rip), %rdi
3577 movabsq $__tls_get_addr@pltoff, %rax
3578 addq %r15, %rax
3579 call *%rax
3580 into:
3581 movq %fs:0, %rax
3582 addq foo@gottpoff(%rax), %rax
3583 nopw 0x0(%rax,%rax,1) */
3584 int largepic = 0;
3585 if (ABI_64_P (output_bfd))
3586 {
3587 if (contents[roff + 5] == 0xb8)
3588 {
3589 memcpy (contents + roff - 3,
3590 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3591 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3592 largepic = 1;
3593 }
3594 else
3595 memcpy (contents + roff - 4,
3596 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3597 16);
3598 }
3599 else
3600 memcpy (contents + roff - 3,
3601 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3602 15);
3603
3604 relocation = (htab->elf.sgot->output_section->vma
3605 + htab->elf.sgot->output_offset + off
3606 - roff
3607 - largepic
3608 - input_section->output_section->vma
3609 - input_section->output_offset
3610 - 12);
3611 bfd_put_32 (output_bfd, relocation,
3612 contents + roff + 8 + largepic);
3613 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3614 rel++;
3615 wrel++;
3616 continue;
3617 }
3618 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3619 {
3620 /* GDesc -> IE transition.
3621 It's originally something like:
3622 leaq x@tlsdesc(%rip), %rax
3623
3624 Change it to:
3625 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3626
3627 /* Now modify the instruction as appropriate. To
3628 turn a leaq into a movq in the form we use it, it
3629 suffices to change the second byte from 0x8d to
3630 0x8b. */
3631 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3632
3633 bfd_put_32 (output_bfd,
3634 htab->elf.sgot->output_section->vma
3635 + htab->elf.sgot->output_offset + off
3636 - rel->r_offset
3637 - input_section->output_section->vma
3638 - input_section->output_offset
3639 - 4,
3640 contents + roff);
3641 continue;
3642 }
3643 else if (r_type == R_X86_64_TLSDESC_CALL)
3644 {
3645 /* GDesc -> IE transition.
3646 It's originally:
3647 call *(%rax)
3648
3649 Change it to:
3650 xchg %ax, %ax. */
3651
3652 bfd_put_8 (output_bfd, 0x66, contents + roff);
3653 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3654 continue;
3655 }
3656 else
3657 BFD_ASSERT (FALSE);
3658 }
3659 break;
3660
3661 case R_X86_64_TLSLD:
3662 if (! elf_x86_64_tls_transition (info, input_bfd,
3663 input_section, contents,
3664 symtab_hdr, sym_hashes,
3665 &r_type, GOT_UNKNOWN, rel,
3666 relend, h, r_symndx, TRUE))
3667 return FALSE;
3668
3669 if (r_type != R_X86_64_TLSLD)
3670 {
3671 /* LD->LE transition:
3672 leaq foo@tlsld(%rip), %rdi
3673 call __tls_get_addr@PLT
3674 For 64bit, we change it into:
3675 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3676 For 32bit, we change it into:
3677 nopl 0x0(%rax); movl %fs:0, %eax
3678 Or
3679 leaq foo@tlsld(%rip), %rdi;
3680 call *__tls_get_addr@GOTPCREL(%rip)
3681 which may be converted to
3682 addr32 call __tls_get_addr
3683 For 64bit, we change it into:
3684 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3685 For 32bit, we change it into:
3686 nopw 0x0(%rax); movl %fs:0, %eax
3687 For largepic, change:
3688 leaq foo@tlsgd(%rip), %rdi
3689 movabsq $__tls_get_addr@pltoff, %rax
3690 addq %rbx, %rax
3691 call *%rax
3692 into
3693 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3694 movq %fs:0, %eax */
3695
3696 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3697 if (ABI_64_P (output_bfd))
3698 {
3699 if (contents[rel->r_offset + 5] == 0xb8)
3700 memcpy (contents + rel->r_offset - 3,
3701 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3702 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3703 else if (contents[rel->r_offset + 4] == 0xff
3704 || contents[rel->r_offset + 4] == 0x67)
3705 memcpy (contents + rel->r_offset - 3,
3706 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3707 13);
3708 else
3709 memcpy (contents + rel->r_offset - 3,
3710 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3711 }
3712 else
3713 {
3714 if (contents[rel->r_offset + 4] == 0xff)
3715 memcpy (contents + rel->r_offset - 3,
3716 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3717 13);
3718 else
3719 memcpy (contents + rel->r_offset - 3,
3720 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3721 }
3722 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3723 and R_X86_64_PLTOFF64. */
3724 rel++;
3725 wrel++;
3726 continue;
3727 }
3728
3729 if (htab->elf.sgot == NULL)
3730 abort ();
3731
3732 off = htab->tls_ld_or_ldm_got.offset;
3733 if (off & 1)
3734 off &= ~1;
3735 else
3736 {
3737 Elf_Internal_Rela outrel;
3738
3739 if (htab->elf.srelgot == NULL)
3740 abort ();
3741
3742 outrel.r_offset = (htab->elf.sgot->output_section->vma
3743 + htab->elf.sgot->output_offset + off);
3744
3745 bfd_put_64 (output_bfd, 0,
3746 htab->elf.sgot->contents + off);
3747 bfd_put_64 (output_bfd, 0,
3748 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3749 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3750 outrel.r_addend = 0;
3751 elf_append_rela (output_bfd, htab->elf.srelgot,
3752 &outrel);
3753 htab->tls_ld_or_ldm_got.offset |= 1;
3754 }
3755 relocation = htab->elf.sgot->output_section->vma
3756 + htab->elf.sgot->output_offset + off;
3757 unresolved_reloc = FALSE;
3758 break;
3759
3760 case R_X86_64_DTPOFF32:
3761 if (!bfd_link_executable (info)
3762 || (input_section->flags & SEC_CODE) == 0)
3763 relocation -= _bfd_x86_elf_dtpoff_base (info);
3764 else
3765 relocation = elf_x86_64_tpoff (info, relocation);
3766 break;
3767
3768 case R_X86_64_TPOFF32:
3769 case R_X86_64_TPOFF64:
3770 BFD_ASSERT (bfd_link_executable (info));
3771 relocation = elf_x86_64_tpoff (info, relocation);
3772 break;
3773
3774 case R_X86_64_DTPOFF64:
3775 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3776 relocation -= _bfd_x86_elf_dtpoff_base (info);
3777 break;
3778
3779 default:
3780 break;
3781 }
3782
3783 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3784 because such sections are not SEC_ALLOC and thus ld.so will
3785 not process them. */
3786 if (unresolved_reloc
3787 && !((input_section->flags & SEC_DEBUGGING) != 0
3788 && h->def_dynamic)
3789 && _bfd_elf_section_offset (output_bfd, info, input_section,
3790 rel->r_offset) != (bfd_vma) -1)
3791 {
3792 switch (r_type)
3793 {
3794 case R_X86_64_32S:
3795 sec = h->root.u.def.section;
3796 if ((info->nocopyreloc
3797 || (eh->def_protected
3798 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3799 && !(h->root.u.def.section->flags & SEC_CODE))
3800 return elf_x86_64_need_pic (info, input_bfd, input_section,
3801 h, NULL, NULL, howto);
3802 /* Fall through. */
3803
3804 default:
3805 _bfd_error_handler
3806 /* xgettext:c-format */
3807 (_("%pB(%pA+%#" PRIx64 "): "
3808 "unresolvable %s relocation against symbol `%s'"),
3809 input_bfd,
3810 input_section,
3811 (uint64_t) rel->r_offset,
3812 howto->name,
3813 h->root.root.string);
3814 return FALSE;
3815 }
3816 }
3817
3818 do_relocation:
3819 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3820 contents, rel->r_offset,
3821 relocation, rel->r_addend);
3822
3823 check_relocation_error:
3824 if (r != bfd_reloc_ok)
3825 {
3826 const char *name;
3827
3828 if (h != NULL)
3829 name = h->root.root.string;
3830 else
3831 {
3832 name = bfd_elf_string_from_elf_section (input_bfd,
3833 symtab_hdr->sh_link,
3834 sym->st_name);
3835 if (name == NULL)
3836 return FALSE;
3837 if (*name == '\0')
3838 name = bfd_section_name (input_bfd, sec);
3839 }
3840
3841 if (r == bfd_reloc_overflow)
3842 {
3843 if (converted_reloc)
3844 {
3845 info->callbacks->einfo
3846 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3847 return FALSE;
3848 }
3849 (*info->callbacks->reloc_overflow)
3850 (info, (h ? &h->root : NULL), name, howto->name,
3851 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3852 }
3853 else
3854 {
3855 _bfd_error_handler
3856 /* xgettext:c-format */
3857 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3858 input_bfd, input_section,
3859 (uint64_t) rel->r_offset, name, (int) r);
3860 return FALSE;
3861 }
3862 }
3863
3864 if (wrel != rel)
3865 *wrel = *rel;
3866 }
3867
3868 if (wrel != rel)
3869 {
3870 Elf_Internal_Shdr *rel_hdr;
3871 size_t deleted = rel - wrel;
3872
3873 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3874 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3875 if (rel_hdr->sh_size == 0)
3876 {
3877 /* It is too late to remove an empty reloc section. Leave
3878 one NONE reloc.
3879 ??? What is wrong with an empty section??? */
3880 rel_hdr->sh_size = rel_hdr->sh_entsize;
3881 deleted -= 1;
3882 }
3883 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3884 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3885 input_section->reloc_count -= deleted;
3886 }
3887
3888 return TRUE;
3889 }
3890
3891 /* Finish up dynamic symbol handling. We set the contents of various
3892 dynamic sections here. */
3893
3894 static bfd_boolean
3895 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3896 struct bfd_link_info *info,
3897 struct elf_link_hash_entry *h,
3898 Elf_Internal_Sym *sym)
3899 {
3900 struct elf_x86_link_hash_table *htab;
3901 bfd_boolean use_plt_second;
3902 struct elf_x86_link_hash_entry *eh;
3903 bfd_boolean local_undefweak;
3904
3905 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3906 if (htab == NULL)
3907 return FALSE;
3908
3909 /* Use the second PLT section only if there is .plt section. */
3910 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3911
3912 eh = (struct elf_x86_link_hash_entry *) h;
3913 if (eh->no_finish_dynamic_symbol)
3914 abort ();
3915
3916 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3917 resolved undefined weak symbols in executable so that their
3918 references have value 0 at run-time. */
3919 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3920
3921 if (h->plt.offset != (bfd_vma) -1)
3922 {
3923 bfd_vma plt_index;
3924 bfd_vma got_offset, plt_offset;
3925 Elf_Internal_Rela rela;
3926 bfd_byte *loc;
3927 asection *plt, *gotplt, *relplt, *resolved_plt;
3928 const struct elf_backend_data *bed;
3929 bfd_vma plt_got_pcrel_offset;
3930
3931 /* When building a static executable, use .iplt, .igot.plt and
3932 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3933 if (htab->elf.splt != NULL)
3934 {
3935 plt = htab->elf.splt;
3936 gotplt = htab->elf.sgotplt;
3937 relplt = htab->elf.srelplt;
3938 }
3939 else
3940 {
3941 plt = htab->elf.iplt;
3942 gotplt = htab->elf.igotplt;
3943 relplt = htab->elf.irelplt;
3944 }
3945
3946 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3947
3948 /* Get the index in the procedure linkage table which
3949 corresponds to this symbol. This is the index of this symbol
3950 in all the symbols for which we are making plt entries. The
3951 first entry in the procedure linkage table is reserved.
3952
3953 Get the offset into the .got table of the entry that
3954 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3955 bytes. The first three are reserved for the dynamic linker.
3956
3957 For static executables, we don't reserve anything. */
3958
3959 if (plt == htab->elf.splt)
3960 {
3961 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3962 - htab->plt.has_plt0);
3963 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3964 }
3965 else
3966 {
3967 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3968 got_offset = got_offset * GOT_ENTRY_SIZE;
3969 }
3970
3971 /* Fill in the entry in the procedure linkage table. */
3972 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3973 htab->plt.plt_entry_size);
3974 if (use_plt_second)
3975 {
3976 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3977 htab->non_lazy_plt->plt_entry,
3978 htab->non_lazy_plt->plt_entry_size);
3979
3980 resolved_plt = htab->plt_second;
3981 plt_offset = eh->plt_second.offset;
3982 }
3983 else
3984 {
3985 resolved_plt = plt;
3986 plt_offset = h->plt.offset;
3987 }
3988
3989 /* Insert the relocation positions of the plt section. */
3990
3991 /* Put offset the PC-relative instruction referring to the GOT entry,
3992 subtracting the size of that instruction. */
3993 plt_got_pcrel_offset = (gotplt->output_section->vma
3994 + gotplt->output_offset
3995 + got_offset
3996 - resolved_plt->output_section->vma
3997 - resolved_plt->output_offset
3998 - plt_offset
3999 - htab->plt.plt_got_insn_size);
4000
4001 /* Check PC-relative offset overflow in PLT entry. */
4002 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4003 /* xgettext:c-format */
4004 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4005 output_bfd, h->root.root.string);
4006
4007 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4008 (resolved_plt->contents + plt_offset
4009 + htab->plt.plt_got_offset));
4010
4011 /* Fill in the entry in the global offset table, initially this
4012 points to the second part of the PLT entry. Leave the entry
4013 as zero for undefined weak symbol in PIE. No PLT relocation
4014 against undefined weak symbol in PIE. */
4015 if (!local_undefweak)
4016 {
4017 if (htab->plt.has_plt0)
4018 bfd_put_64 (output_bfd, (plt->output_section->vma
4019 + plt->output_offset
4020 + h->plt.offset
4021 + htab->lazy_plt->plt_lazy_offset),
4022 gotplt->contents + got_offset);
4023
4024 /* Fill in the entry in the .rela.plt section. */
4025 rela.r_offset = (gotplt->output_section->vma
4026 + gotplt->output_offset
4027 + got_offset);
4028 if (PLT_LOCAL_IFUNC_P (info, h))
4029 {
4030 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4031 h->root.root.string,
4032 h->root.u.def.section->owner);
4033
4034 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4035 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4036 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4037 rela.r_addend = (h->root.u.def.value
4038 + h->root.u.def.section->output_section->vma
4039 + h->root.u.def.section->output_offset);
4040 /* R_X86_64_IRELATIVE comes last. */
4041 plt_index = htab->next_irelative_index--;
4042 }
4043 else
4044 {
4045 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4046 rela.r_addend = 0;
4047 plt_index = htab->next_jump_slot_index++;
4048 }
4049
4050 /* Don't fill the second and third slots in PLT entry for
4051 static executables nor without PLT0. */
4052 if (plt == htab->elf.splt && htab->plt.has_plt0)
4053 {
4054 bfd_vma plt0_offset
4055 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4056
4057 /* Put relocation index. */
4058 bfd_put_32 (output_bfd, plt_index,
4059 (plt->contents + h->plt.offset
4060 + htab->lazy_plt->plt_reloc_offset));
4061
4062 /* Put offset for jmp .PLT0 and check for overflow. We don't
4063 check relocation index for overflow since branch displacement
4064 will overflow first. */
4065 if (plt0_offset > 0x80000000)
4066 /* xgettext:c-format */
4067 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4068 output_bfd, h->root.root.string);
4069 bfd_put_32 (output_bfd, - plt0_offset,
4070 (plt->contents + h->plt.offset
4071 + htab->lazy_plt->plt_plt_offset));
4072 }
4073
4074 bed = get_elf_backend_data (output_bfd);
4075 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4076 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4077 }
4078 }
4079 else if (eh->plt_got.offset != (bfd_vma) -1)
4080 {
4081 bfd_vma got_offset, plt_offset;
4082 asection *plt, *got;
4083 bfd_boolean got_after_plt;
4084 int32_t got_pcrel_offset;
4085
4086 /* Set the entry in the GOT procedure linkage table. */
4087 plt = htab->plt_got;
4088 got = htab->elf.sgot;
4089 got_offset = h->got.offset;
4090
4091 if (got_offset == (bfd_vma) -1
4092 || (h->type == STT_GNU_IFUNC && h->def_regular)
4093 || plt == NULL
4094 || got == NULL)
4095 abort ();
4096
4097 /* Use the non-lazy PLT entry template for the GOT PLT since they
4098 are the identical. */
4099 /* Fill in the entry in the GOT procedure linkage table. */
4100 plt_offset = eh->plt_got.offset;
4101 memcpy (plt->contents + plt_offset,
4102 htab->non_lazy_plt->plt_entry,
4103 htab->non_lazy_plt->plt_entry_size);
4104
4105 /* Put offset the PC-relative instruction referring to the GOT
4106 entry, subtracting the size of that instruction. */
4107 got_pcrel_offset = (got->output_section->vma
4108 + got->output_offset
4109 + got_offset
4110 - plt->output_section->vma
4111 - plt->output_offset
4112 - plt_offset
4113 - htab->non_lazy_plt->plt_got_insn_size);
4114
4115 /* Check PC-relative offset overflow in GOT PLT entry. */
4116 got_after_plt = got->output_section->vma > plt->output_section->vma;
4117 if ((got_after_plt && got_pcrel_offset < 0)
4118 || (!got_after_plt && got_pcrel_offset > 0))
4119 /* xgettext:c-format */
4120 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4121 output_bfd, h->root.root.string);
4122
4123 bfd_put_32 (output_bfd, got_pcrel_offset,
4124 (plt->contents + plt_offset
4125 + htab->non_lazy_plt->plt_got_offset));
4126 }
4127
4128 if (!local_undefweak
4129 && !h->def_regular
4130 && (h->plt.offset != (bfd_vma) -1
4131 || eh->plt_got.offset != (bfd_vma) -1))
4132 {
4133 /* Mark the symbol as undefined, rather than as defined in
4134 the .plt section. Leave the value if there were any
4135 relocations where pointer equality matters (this is a clue
4136 for the dynamic linker, to make function pointer
4137 comparisons work between an application and shared
4138 library), otherwise set it to zero. If a function is only
4139 called from a binary, there is no need to slow down
4140 shared libraries because of that. */
4141 sym->st_shndx = SHN_UNDEF;
4142 if (!h->pointer_equality_needed)
4143 sym->st_value = 0;
4144 }
4145
4146 /* Don't generate dynamic GOT relocation against undefined weak
4147 symbol in executable. */
4148 if (h->got.offset != (bfd_vma) -1
4149 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4150 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4151 && !local_undefweak)
4152 {
4153 Elf_Internal_Rela rela;
4154 asection *relgot = htab->elf.srelgot;
4155
4156 /* This symbol has an entry in the global offset table. Set it
4157 up. */
4158 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4159 abort ();
4160
4161 rela.r_offset = (htab->elf.sgot->output_section->vma
4162 + htab->elf.sgot->output_offset
4163 + (h->got.offset &~ (bfd_vma) 1));
4164
4165 /* If this is a static link, or it is a -Bsymbolic link and the
4166 symbol is defined locally or was forced to be local because
4167 of a version file, we just want to emit a RELATIVE reloc.
4168 The entry in the global offset table will already have been
4169 initialized in the relocate_section function. */
4170 if (h->def_regular
4171 && h->type == STT_GNU_IFUNC)
4172 {
4173 if (h->plt.offset == (bfd_vma) -1)
4174 {
4175 /* STT_GNU_IFUNC is referenced without PLT. */
4176 if (htab->elf.splt == NULL)
4177 {
4178 /* use .rel[a].iplt section to store .got relocations
4179 in static executable. */
4180 relgot = htab->elf.irelplt;
4181 }
4182 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4183 {
4184 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4185 h->root.root.string,
4186 h->root.u.def.section->owner);
4187
4188 rela.r_info = htab->r_info (0,
4189 R_X86_64_IRELATIVE);
4190 rela.r_addend = (h->root.u.def.value
4191 + h->root.u.def.section->output_section->vma
4192 + h->root.u.def.section->output_offset);
4193 }
4194 else
4195 goto do_glob_dat;
4196 }
4197 else if (bfd_link_pic (info))
4198 {
4199 /* Generate R_X86_64_GLOB_DAT. */
4200 goto do_glob_dat;
4201 }
4202 else
4203 {
4204 asection *plt;
4205 bfd_vma plt_offset;
4206
4207 if (!h->pointer_equality_needed)
4208 abort ();
4209
4210 /* For non-shared object, we can't use .got.plt, which
4211 contains the real function addres if we need pointer
4212 equality. We load the GOT entry with the PLT entry. */
4213 if (htab->plt_second != NULL)
4214 {
4215 plt = htab->plt_second;
4216 plt_offset = eh->plt_second.offset;
4217 }
4218 else
4219 {
4220 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4221 plt_offset = h->plt.offset;
4222 }
4223 bfd_put_64 (output_bfd, (plt->output_section->vma
4224 + plt->output_offset
4225 + plt_offset),
4226 htab->elf.sgot->contents + h->got.offset);
4227 return TRUE;
4228 }
4229 }
4230 else if (bfd_link_pic (info)
4231 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4232 {
4233 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4234 return FALSE;
4235 BFD_ASSERT((h->got.offset & 1) != 0);
4236 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4237 rela.r_addend = (h->root.u.def.value
4238 + h->root.u.def.section->output_section->vma
4239 + h->root.u.def.section->output_offset);
4240 }
4241 else
4242 {
4243 BFD_ASSERT((h->got.offset & 1) == 0);
4244 do_glob_dat:
4245 bfd_put_64 (output_bfd, (bfd_vma) 0,
4246 htab->elf.sgot->contents + h->got.offset);
4247 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4248 rela.r_addend = 0;
4249 }
4250
4251 elf_append_rela (output_bfd, relgot, &rela);
4252 }
4253
4254 if (h->needs_copy)
4255 {
4256 Elf_Internal_Rela rela;
4257 asection *s;
4258
4259 /* This symbol needs a copy reloc. Set it up. */
4260 VERIFY_COPY_RELOC (h, htab)
4261
4262 rela.r_offset = (h->root.u.def.value
4263 + h->root.u.def.section->output_section->vma
4264 + h->root.u.def.section->output_offset);
4265 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4266 rela.r_addend = 0;
4267 if (h->root.u.def.section == htab->elf.sdynrelro)
4268 s = htab->elf.sreldynrelro;
4269 else
4270 s = htab->elf.srelbss;
4271 elf_append_rela (output_bfd, s, &rela);
4272 }
4273
4274 return TRUE;
4275 }
4276
4277 /* Finish up local dynamic symbol handling. We set the contents of
4278 various dynamic sections here. */
4279
4280 static bfd_boolean
4281 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4282 {
4283 struct elf_link_hash_entry *h
4284 = (struct elf_link_hash_entry *) *slot;
4285 struct bfd_link_info *info
4286 = (struct bfd_link_info *) inf;
4287
4288 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4289 info, h, NULL);
4290 }
4291
4292 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4293 here since undefined weak symbol may not be dynamic and may not be
4294 called for elf_x86_64_finish_dynamic_symbol. */
4295
4296 static bfd_boolean
4297 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4298 void *inf)
4299 {
4300 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4301 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4302
4303 if (h->root.type != bfd_link_hash_undefweak
4304 || h->dynindx != -1)
4305 return TRUE;
4306
4307 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4308 info, h, NULL);
4309 }
4310
4311 /* Used to decide how to sort relocs in an optimal manner for the
4312 dynamic linker, before writing them out. */
4313
4314 static enum elf_reloc_type_class
4315 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4316 const asection *rel_sec ATTRIBUTE_UNUSED,
4317 const Elf_Internal_Rela *rela)
4318 {
4319 bfd *abfd = info->output_bfd;
4320 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4321 struct elf_x86_link_hash_table *htab
4322 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4323
4324 if (htab->elf.dynsym != NULL
4325 && htab->elf.dynsym->contents != NULL)
4326 {
4327 /* Check relocation against STT_GNU_IFUNC symbol if there are
4328 dynamic symbols. */
4329 unsigned long r_symndx = htab->r_sym (rela->r_info);
4330 if (r_symndx != STN_UNDEF)
4331 {
4332 Elf_Internal_Sym sym;
4333 if (!bed->s->swap_symbol_in (abfd,
4334 (htab->elf.dynsym->contents
4335 + r_symndx * bed->s->sizeof_sym),
4336 0, &sym))
4337 abort ();
4338
4339 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4340 return reloc_class_ifunc;
4341 }
4342 }
4343
4344 switch ((int) ELF32_R_TYPE (rela->r_info))
4345 {
4346 case R_X86_64_IRELATIVE:
4347 return reloc_class_ifunc;
4348 case R_X86_64_RELATIVE:
4349 case R_X86_64_RELATIVE64:
4350 return reloc_class_relative;
4351 case R_X86_64_JUMP_SLOT:
4352 return reloc_class_plt;
4353 case R_X86_64_COPY:
4354 return reloc_class_copy;
4355 default:
4356 return reloc_class_normal;
4357 }
4358 }
4359
4360 /* Finish up the dynamic sections. */
4361
4362 static bfd_boolean
4363 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4364 struct bfd_link_info *info)
4365 {
4366 struct elf_x86_link_hash_table *htab;
4367
4368 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4369 if (htab == NULL)
4370 return FALSE;
4371
4372 if (! htab->elf.dynamic_sections_created)
4373 return TRUE;
4374
4375 if (htab->elf.splt && htab->elf.splt->size > 0)
4376 {
4377 elf_section_data (htab->elf.splt->output_section)
4378 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4379
4380 if (htab->plt.has_plt0)
4381 {
4382 /* Fill in the special first entry in the procedure linkage
4383 table. */
4384 memcpy (htab->elf.splt->contents,
4385 htab->lazy_plt->plt0_entry,
4386 htab->lazy_plt->plt0_entry_size);
4387 /* Add offset for pushq GOT+8(%rip), since the instruction
4388 uses 6 bytes subtract this value. */
4389 bfd_put_32 (output_bfd,
4390 (htab->elf.sgotplt->output_section->vma
4391 + htab->elf.sgotplt->output_offset
4392 + 8
4393 - htab->elf.splt->output_section->vma
4394 - htab->elf.splt->output_offset
4395 - 6),
4396 (htab->elf.splt->contents
4397 + htab->lazy_plt->plt0_got1_offset));
4398 /* Add offset for the PC-relative instruction accessing
4399 GOT+16, subtracting the offset to the end of that
4400 instruction. */
4401 bfd_put_32 (output_bfd,
4402 (htab->elf.sgotplt->output_section->vma
4403 + htab->elf.sgotplt->output_offset
4404 + 16
4405 - htab->elf.splt->output_section->vma
4406 - htab->elf.splt->output_offset
4407 - htab->lazy_plt->plt0_got2_insn_end),
4408 (htab->elf.splt->contents
4409 + htab->lazy_plt->plt0_got2_offset));
4410 }
4411
4412 if (htab->tlsdesc_plt)
4413 {
4414 /* The TLSDESC entry in a lazy procedure linkage table. */
4415 static const bfd_byte tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
4416 {
4417 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
4418 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
4419 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
4420 };
4421
4422 bfd_put_64 (output_bfd, (bfd_vma) 0,
4423 htab->elf.sgot->contents + htab->tlsdesc_got);
4424
4425 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4426 tlsdesc_plt_entry, LAZY_PLT_ENTRY_SIZE);
4427
4428 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4429 bytes and the instruction uses 6 bytes, subtract these
4430 values. */
4431 bfd_put_32 (output_bfd,
4432 (htab->elf.sgotplt->output_section->vma
4433 + htab->elf.sgotplt->output_offset
4434 + 8
4435 - htab->elf.splt->output_section->vma
4436 - htab->elf.splt->output_offset
4437 - htab->tlsdesc_plt
4438 - 4 - 6),
4439 (htab->elf.splt->contents
4440 + htab->tlsdesc_plt
4441 + 4 + 2));
4442 /* Add offset for indirect branch via GOT+TDG, where TDG
4443 stands for htab->tlsdesc_got, subtracting the offset
4444 to the end of that instruction. */
4445 bfd_put_32 (output_bfd,
4446 (htab->elf.sgot->output_section->vma
4447 + htab->elf.sgot->output_offset
4448 + htab->tlsdesc_got
4449 - htab->elf.splt->output_section->vma
4450 - htab->elf.splt->output_offset
4451 - htab->tlsdesc_plt
4452 - 4 - 6 - 6),
4453 (htab->elf.splt->contents
4454 + htab->tlsdesc_plt + 4 + 6 + 2));
4455 }
4456 }
4457
4458 /* Fill PLT entries for undefined weak symbols in PIE. */
4459 if (bfd_link_pie (info))
4460 bfd_hash_traverse (&info->hash->table,
4461 elf_x86_64_pie_finish_undefweak_symbol,
4462 info);
4463
4464 return TRUE;
4465 }
4466
4467 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4468 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4469 It has to be done before elf_link_sort_relocs is called so that
4470 dynamic relocations are properly sorted. */
4471
4472 static bfd_boolean
4473 elf_x86_64_output_arch_local_syms
4474 (bfd *output_bfd ATTRIBUTE_UNUSED,
4475 struct bfd_link_info *info,
4476 void *flaginfo ATTRIBUTE_UNUSED,
4477 int (*func) (void *, const char *,
4478 Elf_Internal_Sym *,
4479 asection *,
4480 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4481 {
4482 struct elf_x86_link_hash_table *htab
4483 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4484 if (htab == NULL)
4485 return FALSE;
4486
4487 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4488 htab_traverse (htab->loc_hash_table,
4489 elf_x86_64_finish_local_dynamic_symbol,
4490 info);
4491
4492 return TRUE;
4493 }
4494
4495 /* Forward declaration. */
4496 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4497
4498 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4499 dynamic relocations. */
4500
4501 static long
4502 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4503 long symcount ATTRIBUTE_UNUSED,
4504 asymbol **syms ATTRIBUTE_UNUSED,
4505 long dynsymcount,
4506 asymbol **dynsyms,
4507 asymbol **ret)
4508 {
4509 long count, i, n;
4510 int j;
4511 bfd_byte *plt_contents;
4512 long relsize;
4513 const struct elf_x86_lazy_plt_layout *lazy_plt;
4514 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4515 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4516 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4517 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4518 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4519 asection *plt;
4520 enum elf_x86_plt_type plt_type;
4521 struct elf_x86_plt plts[] =
4522 {
4523 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4524 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4525 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4526 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4527 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4528 };
4529
4530 *ret = NULL;
4531
4532 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4533 return 0;
4534
4535 if (dynsymcount <= 0)
4536 return 0;
4537
4538 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4539 if (relsize <= 0)
4540 return -1;
4541
4542 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4543 {
4544 lazy_plt = &elf_x86_64_lazy_plt;
4545 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4546 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4547 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4548 if (ABI_64_P (abfd))
4549 {
4550 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4551 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4552 }
4553 else
4554 {
4555 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4556 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4557 }
4558 }
4559 else
4560 {
4561 lazy_plt = &elf_x86_64_nacl_plt;
4562 non_lazy_plt = NULL;
4563 lazy_bnd_plt = NULL;
4564 non_lazy_bnd_plt = NULL;
4565 lazy_ibt_plt = NULL;
4566 non_lazy_ibt_plt = NULL;
4567 }
4568
4569 count = 0;
4570 for (j = 0; plts[j].name != NULL; j++)
4571 {
4572 plt = bfd_get_section_by_name (abfd, plts[j].name);
4573 if (plt == NULL || plt->size == 0)
4574 continue;
4575
4576 /* Get the PLT section contents. */
4577 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4578 if (plt_contents == NULL)
4579 break;
4580 if (!bfd_get_section_contents (abfd, (asection *) plt,
4581 plt_contents, 0, plt->size))
4582 {
4583 free (plt_contents);
4584 break;
4585 }
4586
4587 /* Check what kind of PLT it is. */
4588 plt_type = plt_unknown;
4589 if (plts[j].type == plt_unknown
4590 && (plt->size >= (lazy_plt->plt_entry_size
4591 + lazy_plt->plt_entry_size)))
4592 {
4593 /* Match lazy PLT first. Need to check the first two
4594 instructions. */
4595 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4596 lazy_plt->plt0_got1_offset) == 0)
4597 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4598 2) == 0))
4599 plt_type = plt_lazy;
4600 else if (lazy_bnd_plt != NULL
4601 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4602 lazy_bnd_plt->plt0_got1_offset) == 0)
4603 && (memcmp (plt_contents + 6,
4604 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4605 {
4606 plt_type = plt_lazy | plt_second;
4607 /* The fist entry in the lazy IBT PLT is the same as the
4608 lazy BND PLT. */
4609 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4610 lazy_ibt_plt->plt_entry,
4611 lazy_ibt_plt->plt_got_offset) == 0))
4612 lazy_plt = lazy_ibt_plt;
4613 else
4614 lazy_plt = lazy_bnd_plt;
4615 }
4616 }
4617
4618 if (non_lazy_plt != NULL
4619 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4620 && plt->size >= non_lazy_plt->plt_entry_size)
4621 {
4622 /* Match non-lazy PLT. */
4623 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4624 non_lazy_plt->plt_got_offset) == 0)
4625 plt_type = plt_non_lazy;
4626 }
4627
4628 if (plt_type == plt_unknown || plt_type == plt_second)
4629 {
4630 if (non_lazy_bnd_plt != NULL
4631 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4632 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4633 non_lazy_bnd_plt->plt_got_offset) == 0))
4634 {
4635 /* Match BND PLT. */
4636 plt_type = plt_second;
4637 non_lazy_plt = non_lazy_bnd_plt;
4638 }
4639 else if (non_lazy_ibt_plt != NULL
4640 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4641 && (memcmp (plt_contents,
4642 non_lazy_ibt_plt->plt_entry,
4643 non_lazy_ibt_plt->plt_got_offset) == 0))
4644 {
4645 /* Match IBT PLT. */
4646 plt_type = plt_second;
4647 non_lazy_plt = non_lazy_ibt_plt;
4648 }
4649 }
4650
4651 if (plt_type == plt_unknown)
4652 {
4653 free (plt_contents);
4654 continue;
4655 }
4656
4657 plts[j].sec = plt;
4658 plts[j].type = plt_type;
4659
4660 if ((plt_type & plt_lazy))
4661 {
4662 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4663 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4664 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4665 /* Skip PLT0 in lazy PLT. */
4666 i = 1;
4667 }
4668 else
4669 {
4670 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4671 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4672 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4673 i = 0;
4674 }
4675
4676 /* Skip lazy PLT when the second PLT is used. */
4677 if (plt_type == (plt_lazy | plt_second))
4678 plts[j].count = 0;
4679 else
4680 {
4681 n = plt->size / plts[j].plt_entry_size;
4682 plts[j].count = n;
4683 count += n - i;
4684 }
4685
4686 plts[j].contents = plt_contents;
4687 }
4688
4689 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4690 (bfd_vma) 0, plts, dynsyms,
4691 ret);
4692 }
4693
4694 /* Handle an x86-64 specific section when reading an object file. This
4695 is called when elfcode.h finds a section with an unknown type. */
4696
4697 static bfd_boolean
4698 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4699 const char *name, int shindex)
4700 {
4701 if (hdr->sh_type != SHT_X86_64_UNWIND)
4702 return FALSE;
4703
4704 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4705 return FALSE;
4706
4707 return TRUE;
4708 }
4709
4710 /* Hook called by the linker routine which adds symbols from an object
4711 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4712 of .bss. */
4713
4714 static bfd_boolean
4715 elf_x86_64_add_symbol_hook (bfd *abfd,
4716 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4717 Elf_Internal_Sym *sym,
4718 const char **namep ATTRIBUTE_UNUSED,
4719 flagword *flagsp ATTRIBUTE_UNUSED,
4720 asection **secp,
4721 bfd_vma *valp)
4722 {
4723 asection *lcomm;
4724
4725 switch (sym->st_shndx)
4726 {
4727 case SHN_X86_64_LCOMMON:
4728 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4729 if (lcomm == NULL)
4730 {
4731 lcomm = bfd_make_section_with_flags (abfd,
4732 "LARGE_COMMON",
4733 (SEC_ALLOC
4734 | SEC_IS_COMMON
4735 | SEC_LINKER_CREATED));
4736 if (lcomm == NULL)
4737 return FALSE;
4738 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4739 }
4740 *secp = lcomm;
4741 *valp = sym->st_size;
4742 return TRUE;
4743 }
4744
4745 return TRUE;
4746 }
4747
4748
4749 /* Given a BFD section, try to locate the corresponding ELF section
4750 index. */
4751
4752 static bfd_boolean
4753 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4754 asection *sec, int *index_return)
4755 {
4756 if (sec == &_bfd_elf_large_com_section)
4757 {
4758 *index_return = SHN_X86_64_LCOMMON;
4759 return TRUE;
4760 }
4761 return FALSE;
4762 }
4763
4764 /* Process a symbol. */
4765
4766 static void
4767 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4768 asymbol *asym)
4769 {
4770 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4771
4772 switch (elfsym->internal_elf_sym.st_shndx)
4773 {
4774 case SHN_X86_64_LCOMMON:
4775 asym->section = &_bfd_elf_large_com_section;
4776 asym->value = elfsym->internal_elf_sym.st_size;
4777 /* Common symbol doesn't set BSF_GLOBAL. */
4778 asym->flags &= ~BSF_GLOBAL;
4779 break;
4780 }
4781 }
4782
4783 static bfd_boolean
4784 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4785 {
4786 return (sym->st_shndx == SHN_COMMON
4787 || sym->st_shndx == SHN_X86_64_LCOMMON);
4788 }
4789
4790 static unsigned int
4791 elf_x86_64_common_section_index (asection *sec)
4792 {
4793 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4794 return SHN_COMMON;
4795 else
4796 return SHN_X86_64_LCOMMON;
4797 }
4798
4799 static asection *
4800 elf_x86_64_common_section (asection *sec)
4801 {
4802 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4803 return bfd_com_section_ptr;
4804 else
4805 return &_bfd_elf_large_com_section;
4806 }
4807
4808 static bfd_boolean
4809 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4810 const Elf_Internal_Sym *sym,
4811 asection **psec,
4812 bfd_boolean newdef,
4813 bfd_boolean olddef,
4814 bfd *oldbfd,
4815 const asection *oldsec)
4816 {
4817 /* A normal common symbol and a large common symbol result in a
4818 normal common symbol. We turn the large common symbol into a
4819 normal one. */
4820 if (!olddef
4821 && h->root.type == bfd_link_hash_common
4822 && !newdef
4823 && bfd_is_com_section (*psec)
4824 && oldsec != *psec)
4825 {
4826 if (sym->st_shndx == SHN_COMMON
4827 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4828 {
4829 h->root.u.c.p->section
4830 = bfd_make_section_old_way (oldbfd, "COMMON");
4831 h->root.u.c.p->section->flags = SEC_ALLOC;
4832 }
4833 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4834 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4835 *psec = bfd_com_section_ptr;
4836 }
4837
4838 return TRUE;
4839 }
4840
4841 static int
4842 elf_x86_64_additional_program_headers (bfd *abfd,
4843 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4844 {
4845 asection *s;
4846 int count = 0;
4847
4848 /* Check to see if we need a large readonly segment. */
4849 s = bfd_get_section_by_name (abfd, ".lrodata");
4850 if (s && (s->flags & SEC_LOAD))
4851 count++;
4852
4853 /* Check to see if we need a large data segment. Since .lbss sections
4854 is placed right after the .bss section, there should be no need for
4855 a large data segment just because of .lbss. */
4856 s = bfd_get_section_by_name (abfd, ".ldata");
4857 if (s && (s->flags & SEC_LOAD))
4858 count++;
4859
4860 return count;
4861 }
4862
4863 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4864
4865 static bfd_boolean
4866 elf_x86_64_relocs_compatible (const bfd_target *input,
4867 const bfd_target *output)
4868 {
4869 return ((xvec_get_elf_backend_data (input)->s->elfclass
4870 == xvec_get_elf_backend_data (output)->s->elfclass)
4871 && _bfd_elf_relocs_compatible (input, output));
4872 }
4873
4874 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4875 with GNU properties if found. Otherwise, return NULL. */
4876
4877 static bfd *
4878 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4879 {
4880 struct elf_x86_init_table init_table;
4881
4882 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4883 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4884 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4885 != (int) R_X86_64_GNU_VTINHERIT)
4886 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4887 != (int) R_X86_64_GNU_VTENTRY))
4888 abort ();
4889
4890 /* This is unused for x86-64. */
4891 init_table.plt0_pad_byte = 0x90;
4892
4893 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4894 {
4895 if (info->bndplt)
4896 {
4897 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4898 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4899 }
4900 else
4901 {
4902 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4903 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4904 }
4905
4906 if (ABI_64_P (info->output_bfd))
4907 {
4908 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4909 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4910 }
4911 else
4912 {
4913 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4914 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4915 }
4916 }
4917 else
4918 {
4919 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4920 init_table.non_lazy_plt = NULL;
4921 init_table.lazy_ibt_plt = NULL;
4922 init_table.non_lazy_ibt_plt = NULL;
4923 }
4924
4925 if (ABI_64_P (info->output_bfd))
4926 {
4927 init_table.r_info = elf64_r_info;
4928 init_table.r_sym = elf64_r_sym;
4929 }
4930 else
4931 {
4932 init_table.r_info = elf32_r_info;
4933 init_table.r_sym = elf32_r_sym;
4934 }
4935
4936 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4937 }
4938
4939 static const struct bfd_elf_special_section
4940 elf_x86_64_special_sections[]=
4941 {
4942 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4943 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4944 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4945 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4946 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4947 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4948 { NULL, 0, 0, 0, 0 }
4949 };
4950
4951 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4952 #define TARGET_LITTLE_NAME "elf64-x86-64"
4953 #define ELF_ARCH bfd_arch_i386
4954 #define ELF_TARGET_ID X86_64_ELF_DATA
4955 #define ELF_MACHINE_CODE EM_X86_64
4956 #if DEFAULT_LD_Z_SEPARATE_CODE
4957 # define ELF_MAXPAGESIZE 0x1000
4958 #else
4959 # define ELF_MAXPAGESIZE 0x200000
4960 #endif
4961 #define ELF_MINPAGESIZE 0x1000
4962 #define ELF_COMMONPAGESIZE 0x1000
4963
4964 #define elf_backend_can_gc_sections 1
4965 #define elf_backend_can_refcount 1
4966 #define elf_backend_want_got_plt 1
4967 #define elf_backend_plt_readonly 1
4968 #define elf_backend_want_plt_sym 0
4969 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4970 #define elf_backend_rela_normal 1
4971 #define elf_backend_plt_alignment 4
4972 #define elf_backend_extern_protected_data 1
4973 #define elf_backend_caches_rawsize 1
4974 #define elf_backend_dtrel_excludes_plt 1
4975 #define elf_backend_want_dynrelro 1
4976
4977 #define elf_info_to_howto elf_x86_64_info_to_howto
4978
4979 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4980 #define bfd_elf64_bfd_reloc_name_lookup \
4981 elf_x86_64_reloc_name_lookup
4982
4983 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4984 #define elf_backend_check_relocs elf_x86_64_check_relocs
4985 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4986 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4987 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4988 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4989 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4990 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4991 #ifdef CORE_HEADER
4992 #define elf_backend_write_core_note elf_x86_64_write_core_note
4993 #endif
4994 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4995 #define elf_backend_relocate_section elf_x86_64_relocate_section
4996 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4997 #define elf_backend_object_p elf64_x86_64_elf_object_p
4998 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4999
5000 #define elf_backend_section_from_shdr \
5001 elf_x86_64_section_from_shdr
5002
5003 #define elf_backend_section_from_bfd_section \
5004 elf_x86_64_elf_section_from_bfd_section
5005 #define elf_backend_add_symbol_hook \
5006 elf_x86_64_add_symbol_hook
5007 #define elf_backend_symbol_processing \
5008 elf_x86_64_symbol_processing
5009 #define elf_backend_common_section_index \
5010 elf_x86_64_common_section_index
5011 #define elf_backend_common_section \
5012 elf_x86_64_common_section
5013 #define elf_backend_common_definition \
5014 elf_x86_64_common_definition
5015 #define elf_backend_merge_symbol \
5016 elf_x86_64_merge_symbol
5017 #define elf_backend_special_sections \
5018 elf_x86_64_special_sections
5019 #define elf_backend_additional_program_headers \
5020 elf_x86_64_additional_program_headers
5021 #define elf_backend_setup_gnu_properties \
5022 elf_x86_64_link_setup_gnu_properties
5023 #define elf_backend_hide_symbol \
5024 _bfd_x86_elf_hide_symbol
5025
5026 #include "elf64-target.h"
5027
5028 /* CloudABI support. */
5029
5030 #undef TARGET_LITTLE_SYM
5031 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5032 #undef TARGET_LITTLE_NAME
5033 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5034
5035 #undef ELF_OSABI
5036 #define ELF_OSABI ELFOSABI_CLOUDABI
5037
5038 #undef elf64_bed
5039 #define elf64_bed elf64_x86_64_cloudabi_bed
5040
5041 #include "elf64-target.h"
5042
5043 /* FreeBSD support. */
5044
5045 #undef TARGET_LITTLE_SYM
5046 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5047 #undef TARGET_LITTLE_NAME
5048 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5049
5050 #undef ELF_OSABI
5051 #define ELF_OSABI ELFOSABI_FREEBSD
5052
5053 #undef elf64_bed
5054 #define elf64_bed elf64_x86_64_fbsd_bed
5055
5056 #include "elf64-target.h"
5057
5058 /* Solaris 2 support. */
5059
5060 #undef TARGET_LITTLE_SYM
5061 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5062 #undef TARGET_LITTLE_NAME
5063 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5064
5065 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5066 {
5067 is_solaris /* os */
5068 };
5069
5070 #undef elf_backend_arch_data
5071 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5072
5073 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5074 objects won't be recognized. */
5075 #undef ELF_OSABI
5076
5077 #undef elf64_bed
5078 #define elf64_bed elf64_x86_64_sol2_bed
5079
5080 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5081 boundary. */
5082 #undef elf_backend_static_tls_alignment
5083 #define elf_backend_static_tls_alignment 16
5084
5085 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5086
5087 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5088 File, p.63. */
5089 #undef elf_backend_want_plt_sym
5090 #define elf_backend_want_plt_sym 1
5091
5092 #undef elf_backend_strtab_flags
5093 #define elf_backend_strtab_flags SHF_STRINGS
5094
5095 static bfd_boolean
5096 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5097 bfd *obfd ATTRIBUTE_UNUSED,
5098 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5099 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5100 {
5101 /* PR 19938: FIXME: Need to add code for setting the sh_info
5102 and sh_link fields of Solaris specific section types. */
5103 return FALSE;
5104 }
5105
5106 #undef elf_backend_copy_special_section_fields
5107 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5108
5109 #include "elf64-target.h"
5110
5111 /* Native Client support. */
5112
5113 static bfd_boolean
5114 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5115 {
5116 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5117 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5118 return TRUE;
5119 }
5120
5121 #undef TARGET_LITTLE_SYM
5122 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5123 #undef TARGET_LITTLE_NAME
5124 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5125 #undef elf64_bed
5126 #define elf64_bed elf64_x86_64_nacl_bed
5127
5128 #undef ELF_MAXPAGESIZE
5129 #undef ELF_MINPAGESIZE
5130 #undef ELF_COMMONPAGESIZE
5131 #define ELF_MAXPAGESIZE 0x10000
5132 #define ELF_MINPAGESIZE 0x10000
5133 #define ELF_COMMONPAGESIZE 0x10000
5134
5135 /* Restore defaults. */
5136 #undef ELF_OSABI
5137 #undef elf_backend_static_tls_alignment
5138 #undef elf_backend_want_plt_sym
5139 #define elf_backend_want_plt_sym 0
5140 #undef elf_backend_strtab_flags
5141 #undef elf_backend_copy_special_section_fields
5142
5143 /* NaCl uses substantially different PLT entries for the same effects. */
5144
5145 #undef elf_backend_plt_alignment
5146 #define elf_backend_plt_alignment 5
5147 #define NACL_PLT_ENTRY_SIZE 64
5148 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5149
5150 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5151 {
5152 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5153 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5154 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5155 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5156 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5157
5158 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5159 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5160
5161 /* 32 bytes of nop to pad out to the standard size. */
5162 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5163 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5164 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5165 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5166 0x66, /* excess data16 prefix */
5167 0x90 /* nop */
5168 };
5169
5170 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5171 {
5172 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5173 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5174 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5175 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5176
5177 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5178 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5179 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5180
5181 /* Lazy GOT entries point here (32-byte aligned). */
5182 0x68, /* pushq immediate */
5183 0, 0, 0, 0, /* replaced with index into relocation table. */
5184 0xe9, /* jmp relative */
5185 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5186
5187 /* 22 bytes of nop to pad out to the standard size. */
5188 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5189 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5190 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5191 };
5192
5193 /* .eh_frame covering the .plt section. */
5194
5195 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5196 {
5197 #if (PLT_CIE_LENGTH != 20 \
5198 || PLT_FDE_LENGTH != 36 \
5199 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5200 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5201 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5202 #endif
5203 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5204 0, 0, 0, 0, /* CIE ID */
5205 1, /* CIE version */
5206 'z', 'R', 0, /* Augmentation string */
5207 1, /* Code alignment factor */
5208 0x78, /* Data alignment factor */
5209 16, /* Return address column */
5210 1, /* Augmentation size */
5211 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5212 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5213 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5214 DW_CFA_nop, DW_CFA_nop,
5215
5216 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5217 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5218 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5219 0, 0, 0, 0, /* .plt size goes here */
5220 0, /* Augmentation size */
5221 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5222 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5223 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5224 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5225 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5226 13, /* Block length */
5227 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5228 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5229 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5230 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5231 DW_CFA_nop, DW_CFA_nop
5232 };
5233
5234 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5235 {
5236 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5237 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5238 elf_x86_64_nacl_plt_entry, /* plt_entry */
5239 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5240 2, /* plt0_got1_offset */
5241 9, /* plt0_got2_offset */
5242 13, /* plt0_got2_insn_end */
5243 3, /* plt_got_offset */
5244 33, /* plt_reloc_offset */
5245 38, /* plt_plt_offset */
5246 7, /* plt_got_insn_size */
5247 42, /* plt_plt_insn_end */
5248 32, /* plt_lazy_offset */
5249 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5250 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5251 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5252 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5253 };
5254
5255 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5256 {
5257 is_nacl /* os */
5258 };
5259
5260 #undef elf_backend_arch_data
5261 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5262
5263 #undef elf_backend_object_p
5264 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5265 #undef elf_backend_modify_segment_map
5266 #define elf_backend_modify_segment_map nacl_modify_segment_map
5267 #undef elf_backend_modify_program_headers
5268 #define elf_backend_modify_program_headers nacl_modify_program_headers
5269 #undef elf_backend_final_write_processing
5270 #define elf_backend_final_write_processing nacl_final_write_processing
5271
5272 #include "elf64-target.h"
5273
5274 /* Native Client x32 support. */
5275
5276 static bfd_boolean
5277 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5278 {
5279 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5280 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5281 return TRUE;
5282 }
5283
5284 #undef TARGET_LITTLE_SYM
5285 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5286 #undef TARGET_LITTLE_NAME
5287 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5288 #undef elf32_bed
5289 #define elf32_bed elf32_x86_64_nacl_bed
5290
5291 #define bfd_elf32_bfd_reloc_type_lookup \
5292 elf_x86_64_reloc_type_lookup
5293 #define bfd_elf32_bfd_reloc_name_lookup \
5294 elf_x86_64_reloc_name_lookup
5295 #define bfd_elf32_get_synthetic_symtab \
5296 elf_x86_64_get_synthetic_symtab
5297
5298 #undef elf_backend_object_p
5299 #define elf_backend_object_p \
5300 elf32_x86_64_nacl_elf_object_p
5301
5302 #undef elf_backend_bfd_from_remote_memory
5303 #define elf_backend_bfd_from_remote_memory \
5304 _bfd_elf32_bfd_from_remote_memory
5305
5306 #undef elf_backend_size_info
5307 #define elf_backend_size_info \
5308 _bfd_elf32_size_info
5309
5310 #include "elf32-target.h"
5311
5312 /* Restore defaults. */
5313 #undef elf_backend_object_p
5314 #define elf_backend_object_p elf64_x86_64_elf_object_p
5315 #undef elf_backend_bfd_from_remote_memory
5316 #undef elf_backend_size_info
5317 #undef elf_backend_modify_segment_map
5318 #undef elf_backend_modify_program_headers
5319 #undef elf_backend_final_write_processing
5320
5321 /* Intel L1OM support. */
5322
5323 static bfd_boolean
5324 elf64_l1om_elf_object_p (bfd *abfd)
5325 {
5326 /* Set the right machine number for an L1OM elf64 file. */
5327 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5328 return TRUE;
5329 }
5330
5331 #undef TARGET_LITTLE_SYM
5332 #define TARGET_LITTLE_SYM l1om_elf64_vec
5333 #undef TARGET_LITTLE_NAME
5334 #define TARGET_LITTLE_NAME "elf64-l1om"
5335 #undef ELF_ARCH
5336 #define ELF_ARCH bfd_arch_l1om
5337
5338 #undef ELF_MACHINE_CODE
5339 #define ELF_MACHINE_CODE EM_L1OM
5340
5341 #undef ELF_OSABI
5342
5343 #undef elf64_bed
5344 #define elf64_bed elf64_l1om_bed
5345
5346 #undef elf_backend_object_p
5347 #define elf_backend_object_p elf64_l1om_elf_object_p
5348
5349 /* Restore defaults. */
5350 #undef ELF_MAXPAGESIZE
5351 #undef ELF_MINPAGESIZE
5352 #undef ELF_COMMONPAGESIZE
5353 #if DEFAULT_LD_Z_SEPARATE_CODE
5354 # define ELF_MAXPAGESIZE 0x1000
5355 #else
5356 # define ELF_MAXPAGESIZE 0x200000
5357 #endif
5358 #define ELF_MINPAGESIZE 0x1000
5359 #define ELF_COMMONPAGESIZE 0x1000
5360 #undef elf_backend_plt_alignment
5361 #define elf_backend_plt_alignment 4
5362 #undef elf_backend_arch_data
5363 #define elf_backend_arch_data &elf_x86_64_arch_bed
5364
5365 #include "elf64-target.h"
5366
5367 /* FreeBSD L1OM support. */
5368
5369 #undef TARGET_LITTLE_SYM
5370 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5371 #undef TARGET_LITTLE_NAME
5372 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5373
5374 #undef ELF_OSABI
5375 #define ELF_OSABI ELFOSABI_FREEBSD
5376
5377 #undef elf64_bed
5378 #define elf64_bed elf64_l1om_fbsd_bed
5379
5380 #include "elf64-target.h"
5381
5382 /* Intel K1OM support. */
5383
5384 static bfd_boolean
5385 elf64_k1om_elf_object_p (bfd *abfd)
5386 {
5387 /* Set the right machine number for an K1OM elf64 file. */
5388 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5389 return TRUE;
5390 }
5391
5392 #undef TARGET_LITTLE_SYM
5393 #define TARGET_LITTLE_SYM k1om_elf64_vec
5394 #undef TARGET_LITTLE_NAME
5395 #define TARGET_LITTLE_NAME "elf64-k1om"
5396 #undef ELF_ARCH
5397 #define ELF_ARCH bfd_arch_k1om
5398
5399 #undef ELF_MACHINE_CODE
5400 #define ELF_MACHINE_CODE EM_K1OM
5401
5402 #undef ELF_OSABI
5403
5404 #undef elf64_bed
5405 #define elf64_bed elf64_k1om_bed
5406
5407 #undef elf_backend_object_p
5408 #define elf_backend_object_p elf64_k1om_elf_object_p
5409
5410 #undef elf_backend_static_tls_alignment
5411
5412 #undef elf_backend_want_plt_sym
5413 #define elf_backend_want_plt_sym 0
5414
5415 #include "elf64-target.h"
5416
5417 /* FreeBSD K1OM support. */
5418
5419 #undef TARGET_LITTLE_SYM
5420 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5421 #undef TARGET_LITTLE_NAME
5422 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5423
5424 #undef ELF_OSABI
5425 #define ELF_OSABI ELFOSABI_FREEBSD
5426
5427 #undef elf64_bed
5428 #define elf64_bed elf64_k1om_fbsd_bed
5429
5430 #include "elf64-target.h"
5431
5432 /* 32bit x86-64 support. */
5433
5434 #undef TARGET_LITTLE_SYM
5435 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5436 #undef TARGET_LITTLE_NAME
5437 #define TARGET_LITTLE_NAME "elf32-x86-64"
5438 #undef elf32_bed
5439
5440 #undef ELF_ARCH
5441 #define ELF_ARCH bfd_arch_i386
5442
5443 #undef ELF_MACHINE_CODE
5444 #define ELF_MACHINE_CODE EM_X86_64
5445
5446 #undef ELF_OSABI
5447
5448 #undef elf_backend_object_p
5449 #define elf_backend_object_p \
5450 elf32_x86_64_elf_object_p
5451
5452 #undef elf_backend_bfd_from_remote_memory
5453 #define elf_backend_bfd_from_remote_memory \
5454 _bfd_elf32_bfd_from_remote_memory
5455
5456 #undef elf_backend_size_info
5457 #define elf_backend_size_info \
5458 _bfd_elf32_size_info
5459
5460 #include "elf32-target.h"
This page took 0.250854 seconds and 3 git commands to generate.