x86: Remove func_pointer_refcount
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
825 {
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
843 };
844
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
846 {
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
854 };
855
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
857 {
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
878 {
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
886 };
887
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
889 {
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
928 };
929
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
931 {
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
939 };
940
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
942 {
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
953 {
954 is_normal /* os */
955 };
956
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
958
959 static bfd_boolean
960 elf64_x86_64_elf_object_p (bfd *abfd)
961 {
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
964 return TRUE;
965 }
966
967 static bfd_boolean
968 elf32_x86_64_elf_object_p (bfd *abfd)
969 {
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
972 return TRUE;
973 }
974
975 /* Return TRUE if the TLS access code sequence support transition
976 from R_TYPE. */
977
978 static bfd_boolean
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
981 asection *sec,
982 bfd_byte *contents,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
985 unsigned int r_type,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
988 {
989 unsigned int val;
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
993 bfd_vma offset;
994 struct elf_x86_link_hash_table *htab;
995 bfd_byte *call;
996 bfd_boolean indirect_call;
997
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1000 switch (r_type)
1001 {
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1005 return FALSE;
1006
1007 if (r_type == R_X86_64_TLSGD)
1008 {
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1012 or
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .byte 0x66; rex64
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1021 or
1022 leaq foo@tlsgd(%rip), %rdi
1023 .byte 0x66; rex64
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1028 we also support:
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1031 addq $r15, %rax
1032 call *%rax
1033 or
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1036 addq $rbx, %rax
1037 call *%rax */
1038
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1040
1041 if ((offset + 12) > sec->size)
1042 return FALSE;
1043
1044 call = contents + offset + 4;
1045 if (call[0] != 0x66
1046 || !((call[1] == 0x48
1047 && call[2] == 0xff
1048 && call[3] == 0x15)
1049 || (call[1] == 0x48
1050 && call[2] == 0x67
1051 && call[3] == 0xe8)
1052 || (call[1] == 0x66
1053 && call[2] == 0x48
1054 && call[3] == 0xe8)))
1055 {
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1058 || offset < 3
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1061 || call[11] != 0x01
1062 || call[13] != 0xff
1063 || call[14] != 0xd0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1066 return FALSE;
1067 largepic = TRUE;
1068 }
1069 else if (ABI_64_P (abfd))
1070 {
1071 if (offset < 4
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1073 return FALSE;
1074 }
1075 else
1076 {
1077 if (offset < 3
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1079 return FALSE;
1080 }
1081 indirect_call = call[2] == 0xff;
1082 }
1083 else
1084 {
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1088 or
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1094 we also support:
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1097 addq $r15, %rax
1098 call *%rax
1099 or
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1102 addq $rbx, %rax
1103 call *%rax */
1104
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1106
1107 if (offset < 3 || (offset + 9) > sec->size)
1108 return FALSE;
1109
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1111 return FALSE;
1112
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1117 {
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1121 || call[11] != 0x01
1122 || call[13] != 0xff
1123 || call[14] != 0xd0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1126 return FALSE;
1127 largepic = TRUE;
1128 }
1129 indirect_call = call[0] == 0xff;
1130 }
1131
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1134 return FALSE;
1135
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1137 if (h == NULL
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1139 return FALSE;
1140 else
1141 {
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1144 if (largepic)
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1148 else
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1150 }
1151
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1156 */
1157
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1160 {
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1163 {
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1166 return FALSE;
1167 }
1168 }
1169 else
1170 {
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 if (offset < 2 || (offset + 3) > sec->size)
1175 return FALSE;
1176 }
1177
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1180 return FALSE;
1181
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1184
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1188
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1191 going to be rax. */
1192
1193 if (offset < 3 || (offset + 4) > sec->size)
1194 return FALSE;
1195
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1198 return FALSE;
1199
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1205
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1209 */
1210 if (offset + 2 <= sec->size)
1211 {
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1215 }
1216
1217 return FALSE;
1218
1219 default:
1220 abort ();
1221 }
1222 }
1223
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1226
1227 static bfd_boolean
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1238 {
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1242
1243 /* Skip TLS transition for functions. */
1244 if (h != NULL
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1247 return TRUE;
1248
1249 switch (from_type)
1250 {
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1256 {
1257 if (h == NULL)
1258 to_type = R_X86_64_TPOFF32;
1259 else
1260 to_type = R_X86_64_GOTTPOFF;
1261 }
1262
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1266 {
1267 unsigned int new_to_type = to_type;
1268
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1271
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1275 {
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1278 }
1279
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1285 }
1286
1287 break;
1288
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1292 break;
1293
1294 default:
1295 return TRUE;
1296 }
1297
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1300 return TRUE;
1301
1302 /* Check if the transition can be performed. */
1303 if (check
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1307 {
1308 reloc_howto_type *from, *to;
1309 const char *name;
1310
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1313
1314 if (h)
1315 name = h->root.root.string;
1316 else
1317 {
1318 struct elf_x86_link_hash_table *htab;
1319
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1321 if (htab == NULL)
1322 name = "*unknown*";
1323 else
1324 {
1325 Elf_Internal_Sym *isym;
1326
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1328 abfd, r_symndx);
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1330 }
1331 }
1332
1333 _bfd_error_handler
1334 /* xgettext:c-format */
1335 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%A' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1339 return FALSE;
1340 }
1341
1342 *r_type = to_type;
1343 return TRUE;
1344 }
1345
1346 /* Rename some of the generic section flags to better document how they
1347 are used here. */
1348 #define check_relocs_failed sec_flg0
1349
1350 static bfd_boolean
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1357 {
1358 const char *v = "";
1359 const char *und = "";
1360 const char *pic = "";
1361 const char *object;
1362
1363 const char *name;
1364 if (h)
1365 {
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1368 {
1369 case STV_HIDDEN:
1370 v = _("hidden symbol ");
1371 break;
1372 case STV_INTERNAL:
1373 v = _("internal symbol ");
1374 break;
1375 case STV_PROTECTED:
1376 v = _("protected symbol ");
1377 break;
1378 default:
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1381 else
1382 v = _("symbol ");
1383 pic = _("; recompile with -fPIC");
1384 break;
1385 }
1386
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1389 }
1390 else
1391 {
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1394 }
1395
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1400 else
1401 object = _("a PDE object");
1402
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1407 object, pic);
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1410 return FALSE;
1411 }
1412
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1415 to
1416 lea foo(%rip), %reg
1417 and convert
1418 call/jmp *foo@GOTPCREL(%rip)
1419 to
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1423 to
1424 test $foo, %reg
1425 and convert
1426 binop foo@GOTPCREL(%rip), %reg
1427 to
1428 binop $foo, %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1430 instructions. */
1431
1432 static bfd_boolean
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1434 bfd_byte *contents,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1440 {
1441 struct elf_x86_link_hash_table *htab;
1442 bfd_boolean is_pic;
1443 bfd_boolean no_overflow;
1444 bfd_boolean relocx;
1445 bfd_boolean to_reloc_pc32;
1446 asection *tsec;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1449 unsigned int modrm;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1453
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1455 return TRUE;
1456
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1459 if (raddend != -4)
1460 return TRUE;
1461
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1464
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1467
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1470
1471 r_symndx = htab->r_sym (irel->r_info);
1472
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1474
1475 /* Convert mov to lea since it has been done for a while. */
1476 if (opcode != 0x8b)
1477 {
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1481 if (!relocx)
1482 return TRUE;
1483 }
1484
1485 /* We convert only to R_X86_64_PC32:
1486 1. Branch.
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1489 4. PIC.
1490 */
1491 to_reloc_pc32 = (opcode == 0xff
1492 || !relocx
1493 || no_overflow
1494 || is_pic);
1495
1496 /* Get the symbol referred to by the reloc. */
1497 if (h == NULL)
1498 {
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1501
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1504 return TRUE;
1505
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1512 else
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1514 }
1515 else
1516 {
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1522 R_X86_64_PC32. */
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1525
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1530 && !eh->linker_def
1531 && local_ref))
1532 {
1533 if (opcode == 0xff)
1534 {
1535 /* Skip for branch instructions since R_X86_64_PC32
1536 may overflow. */
1537 if (no_overflow)
1538 return TRUE;
1539 }
1540 else if (relocx)
1541 {
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1544 is a REX byte. */
1545 to_reloc_pc32 = FALSE;
1546 }
1547
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1551 return TRUE;
1552
1553 goto convert;
1554 }
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1558 || eh->linker_def
1559 || ((h->def_regular
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1563 && local_ref))
1564 {
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1569 SECNAME. */
1570 if (h->start_stop
1571 || eh->linker_def
1572 || (h->def_regular
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1578 {
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1580 if (no_overflow)
1581 return TRUE;
1582 goto convert;
1583 }
1584 tsec = h->root.u.def.section;
1585 }
1586 else
1587 return TRUE;
1588 }
1589
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1593 return TRUE;
1594
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1596 if (no_overflow)
1597 return TRUE;
1598
1599 convert:
1600 if (opcode == 0xff)
1601 {
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1603 unsigned int nop;
1604 unsigned int disp;
1605 bfd_vma nop_offset;
1606
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1608 R_X86_64_PC32. */
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1610 if (modrm == 0x25)
1611 {
1612 /* Convert to "jmp foo nop". */
1613 modrm = 0xe9;
1614 nop = NOP_OPCODE;
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1619 }
1620 else
1621 {
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1624
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1626 is a nop prefix. */
1627 modrm = 0xe8;
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1631 {
1632 nop = 0x67;
1633 nop_offset = irel->r_offset - 2;
1634 }
1635 else
1636 {
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1639 {
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1644 }
1645 else
1646 nop_offset = irel->r_offset - 2;
1647 }
1648 }
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1652 }
1653 else
1654 {
1655 unsigned int rex;
1656 unsigned int rex_mask = REX_R;
1657
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1660 else
1661 rex = 0;
1662
1663 if (opcode == 0x8b)
1664 {
1665 if (to_reloc_pc32)
1666 {
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1669 opcode = 0x8d;
1670 r_type = R_X86_64_PC32;
1671 }
1672 else
1673 {
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1676 opcode = 0xc7;
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1681 {
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1685 }
1686 else
1687 {
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1693 rex_mask |= REX_W;
1694 goto rewrite_modrm_rex;
1695 }
1696 }
1697 }
1698 else
1699 {
1700 /* R_X86_64_PC32 isn't supported. */
1701 if (to_reloc_pc32)
1702 return TRUE;
1703
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1705 if (opcode == 0x85)
1706 {
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1710 opcode = 0xf7;
1711 }
1712 else
1713 {
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1717 opcode = 0x81;
1718 }
1719
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1723
1724 rewrite_modrm_rex:
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1726
1727 if (rex)
1728 {
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1732 }
1733
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1735 irel->r_addend = 0;
1736 }
1737
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1739 }
1740
1741 *r_type_p = r_type;
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1744
1745 *converted = TRUE;
1746
1747 return TRUE;
1748 }
1749
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1753
1754 static bfd_boolean
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1756 asection *sec,
1757 const Elf_Internal_Rela *relocs)
1758 {
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1764 asection *sreloc;
1765 bfd_byte *contents;
1766 bfd_boolean converted;
1767
1768 if (bfd_link_relocatable (info))
1769 return TRUE;
1770
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1778 return TRUE;
1779
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1781 if (htab == NULL)
1782 {
1783 sec->check_relocs_failed = 1;
1784 return FALSE;
1785 }
1786
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1788
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1793 {
1794 sec->check_relocs_failed = 1;
1795 return FALSE;
1796 }
1797
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1800
1801 converted = FALSE;
1802
1803 sreloc = NULL;
1804
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1807 {
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1813 const char *name;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1816
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1819
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1821 {
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%B: bad symbol index: %d"),
1824 abfd, r_symndx);
1825 goto error_return;
1826 }
1827
1828 if (r_symndx < symtab_hdr->sh_info)
1829 {
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1832 abfd, r_symndx);
1833 if (isym == NULL)
1834 goto error_return;
1835
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1838 {
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1840 TRUE);
1841 if (h == NULL)
1842 goto error_return;
1843
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1846 isym, NULL);
1847 h->type = STT_GNU_IFUNC;
1848 h->def_regular = 1;
1849 h->ref_regular = 1;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1852 }
1853 else
1854 h = NULL;
1855 }
1856 else
1857 {
1858 isym = NULL;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1863 }
1864
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1867 switch (r_type)
1868 {
1869 default:
1870 break;
1871
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1874 case R_X86_64_PC64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1881 {
1882 if (h)
1883 name = h->root.root.string;
1884 else
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1886 NULL);
1887 _bfd_error_handler
1888 /* xgettext:c-format */
1889 (_("%B: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1893 goto error_return;
1894 }
1895 break;
1896 }
1897
1898 if (h != NULL)
1899 {
1900 /* It is referenced by a non-shared object. */
1901 h->ref_regular = 1;
1902 h->root.non_ir_ref_regular = 1;
1903
1904 if (h->type == STT_GNU_IFUNC)
1905 elf_tdata (info->output_bfd)->has_gnu_symbols
1906 |= elf_gnu_symbol_ifunc;
1907 }
1908
1909 converted_reloc = FALSE;
1910 if ((r_type == R_X86_64_GOTPCREL
1911 || r_type == R_X86_64_GOTPCRELX
1912 || r_type == R_X86_64_REX_GOTPCRELX)
1913 && (h == NULL || h->type != STT_GNU_IFUNC))
1914 {
1915 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1916 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1917 irel, h, &converted_reloc,
1918 info))
1919 goto error_return;
1920
1921 if (converted_reloc)
1922 converted = TRUE;
1923 }
1924
1925 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1926 symtab_hdr, sym_hashes,
1927 &r_type, GOT_UNKNOWN,
1928 rel, rel_end, h, r_symndx, FALSE))
1929 goto error_return;
1930
1931 eh = (struct elf_x86_link_hash_entry *) h;
1932 switch (r_type)
1933 {
1934 case R_X86_64_TLSLD:
1935 htab->tls_ld_or_ldm_got.refcount = 1;
1936 goto create_got;
1937
1938 case R_X86_64_TPOFF32:
1939 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1940 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1941 &x86_64_elf_howto_table[r_type]);
1942 if (eh != NULL)
1943 eh->zero_undefweak &= 0x2;
1944 break;
1945
1946 case R_X86_64_GOTTPOFF:
1947 if (!bfd_link_executable (info))
1948 info->flags |= DF_STATIC_TLS;
1949 /* Fall through */
1950
1951 case R_X86_64_GOT32:
1952 case R_X86_64_GOTPCREL:
1953 case R_X86_64_GOTPCRELX:
1954 case R_X86_64_REX_GOTPCRELX:
1955 case R_X86_64_TLSGD:
1956 case R_X86_64_GOT64:
1957 case R_X86_64_GOTPCREL64:
1958 case R_X86_64_GOTPLT64:
1959 case R_X86_64_GOTPC32_TLSDESC:
1960 case R_X86_64_TLSDESC_CALL:
1961 /* This symbol requires a global offset table entry. */
1962 {
1963 int tls_type, old_tls_type;
1964
1965 switch (r_type)
1966 {
1967 default: tls_type = GOT_NORMAL; break;
1968 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1969 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1970 case R_X86_64_GOTPC32_TLSDESC:
1971 case R_X86_64_TLSDESC_CALL:
1972 tls_type = GOT_TLS_GDESC; break;
1973 }
1974
1975 if (h != NULL)
1976 {
1977 h->got.refcount = 1;
1978 old_tls_type = eh->tls_type;
1979 }
1980 else
1981 {
1982 bfd_signed_vma *local_got_refcounts;
1983
1984 /* This is a global offset table entry for a local symbol. */
1985 local_got_refcounts = elf_local_got_refcounts (abfd);
1986 if (local_got_refcounts == NULL)
1987 {
1988 bfd_size_type size;
1989
1990 size = symtab_hdr->sh_info;
1991 size *= sizeof (bfd_signed_vma)
1992 + sizeof (bfd_vma) + sizeof (char);
1993 local_got_refcounts = ((bfd_signed_vma *)
1994 bfd_zalloc (abfd, size));
1995 if (local_got_refcounts == NULL)
1996 goto error_return;
1997 elf_local_got_refcounts (abfd) = local_got_refcounts;
1998 elf_x86_local_tlsdesc_gotent (abfd)
1999 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2000 elf_x86_local_got_tls_type (abfd)
2001 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2002 }
2003 local_got_refcounts[r_symndx] = 1;
2004 old_tls_type
2005 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2006 }
2007
2008 /* If a TLS symbol is accessed using IE at least once,
2009 there is no point to use dynamic model for it. */
2010 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2011 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2012 || tls_type != GOT_TLS_IE))
2013 {
2014 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2015 tls_type = old_tls_type;
2016 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2017 && GOT_TLS_GD_ANY_P (tls_type))
2018 tls_type |= old_tls_type;
2019 else
2020 {
2021 if (h)
2022 name = h->root.root.string;
2023 else
2024 name = bfd_elf_sym_name (abfd, symtab_hdr,
2025 isym, NULL);
2026 _bfd_error_handler
2027 /* xgettext:c-format */
2028 (_("%B: '%s' accessed both as normal and"
2029 " thread local symbol"),
2030 abfd, name);
2031 bfd_set_error (bfd_error_bad_value);
2032 goto error_return;
2033 }
2034 }
2035
2036 if (old_tls_type != tls_type)
2037 {
2038 if (eh != NULL)
2039 eh->tls_type = tls_type;
2040 else
2041 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2042 }
2043 }
2044 /* Fall through */
2045
2046 case R_X86_64_GOTOFF64:
2047 case R_X86_64_GOTPC32:
2048 case R_X86_64_GOTPC64:
2049 create_got:
2050 if (eh != NULL)
2051 eh->zero_undefweak &= 0x2;
2052 break;
2053
2054 case R_X86_64_PLT32:
2055 case R_X86_64_PLT32_BND:
2056 /* This symbol requires a procedure linkage table entry. We
2057 actually build the entry in adjust_dynamic_symbol,
2058 because this might be a case of linking PIC code which is
2059 never referenced by a dynamic object, in which case we
2060 don't need to generate a procedure linkage table entry
2061 after all. */
2062
2063 /* If this is a local symbol, we resolve it directly without
2064 creating a procedure linkage table entry. */
2065 if (h == NULL)
2066 continue;
2067
2068 eh->zero_undefweak &= 0x2;
2069 h->needs_plt = 1;
2070 h->plt.refcount = 1;
2071 break;
2072
2073 case R_X86_64_PLTOFF64:
2074 /* This tries to form the 'address' of a function relative
2075 to GOT. For global symbols we need a PLT entry. */
2076 if (h != NULL)
2077 {
2078 h->needs_plt = 1;
2079 h->plt.refcount = 1;
2080 }
2081 goto create_got;
2082
2083 case R_X86_64_SIZE32:
2084 case R_X86_64_SIZE64:
2085 size_reloc = TRUE;
2086 goto do_size;
2087
2088 case R_X86_64_32:
2089 if (!ABI_64_P (abfd))
2090 goto pointer;
2091 /* Fall through. */
2092 case R_X86_64_8:
2093 case R_X86_64_16:
2094 case R_X86_64_32S:
2095 /* Check relocation overflow as these relocs may lead to
2096 run-time relocation overflow. Don't error out for
2097 sections we don't care about, such as debug sections or
2098 when relocation overflow check is disabled. */
2099 if (!info->no_reloc_overflow_check
2100 && !converted_reloc
2101 && (bfd_link_pic (info)
2102 || (bfd_link_executable (info)
2103 && h != NULL
2104 && !h->def_regular
2105 && h->def_dynamic
2106 && (sec->flags & SEC_READONLY) == 0)))
2107 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2108 &x86_64_elf_howto_table[r_type]);
2109 /* Fall through. */
2110
2111 case R_X86_64_PC8:
2112 case R_X86_64_PC16:
2113 case R_X86_64_PC32:
2114 case R_X86_64_PC32_BND:
2115 case R_X86_64_PC64:
2116 case R_X86_64_64:
2117 pointer:
2118 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2119 eh->zero_undefweak |= 0x2;
2120 /* We are called after all symbols have been resolved. Only
2121 relocation against STT_GNU_IFUNC symbol must go through
2122 PLT. */
2123 if (h != NULL
2124 && (bfd_link_executable (info)
2125 || h->type == STT_GNU_IFUNC))
2126 {
2127 bfd_boolean func_pointer_ref = FALSE;
2128
2129 if (r_type == R_X86_64_PC32)
2130 {
2131 /* Since something like ".long foo - ." may be used
2132 as pointer, make sure that PLT is used if foo is
2133 a function defined in a shared library. */
2134 if ((sec->flags & SEC_CODE) == 0)
2135 h->pointer_equality_needed = 1;
2136 }
2137 else if (r_type != R_X86_64_PC32_BND
2138 && r_type != R_X86_64_PC64)
2139 {
2140 h->pointer_equality_needed = 1;
2141 /* At run-time, R_X86_64_64 can be resolved for both
2142 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2143 can only be resolved for x32. */
2144 if ((sec->flags & SEC_READONLY) == 0
2145 && (r_type == R_X86_64_64
2146 || (!ABI_64_P (abfd)
2147 && (r_type == R_X86_64_32
2148 || r_type == R_X86_64_32S))))
2149 func_pointer_ref = TRUE;
2150 }
2151
2152 if (!func_pointer_ref)
2153 {
2154 /* If this reloc is in a read-only section, we might
2155 need a copy reloc. We can't check reliably at this
2156 stage whether the section is read-only, as input
2157 sections have not yet been mapped to output sections.
2158 Tentatively set the flag for now, and correct in
2159 adjust_dynamic_symbol. */
2160 h->non_got_ref = 1;
2161
2162 /* We may need a .plt entry if the symbol is a function
2163 defined in a shared lib or is a function referenced
2164 from the code or read-only section. */
2165 if (!h->def_regular
2166 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2167 h->plt.refcount = 1;
2168 }
2169 }
2170
2171 size_reloc = FALSE;
2172 do_size:
2173 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2174 htab->pointer_r_type))
2175 {
2176 struct elf_dyn_relocs *p;
2177 struct elf_dyn_relocs **head;
2178
2179 /* We must copy these reloc types into the output file.
2180 Create a reloc section in dynobj and make room for
2181 this reloc. */
2182 if (sreloc == NULL)
2183 {
2184 sreloc = _bfd_elf_make_dynamic_reloc_section
2185 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2186 abfd, /*rela?*/ TRUE);
2187
2188 if (sreloc == NULL)
2189 goto error_return;
2190 }
2191
2192 /* If this is a global symbol, we count the number of
2193 relocations we need for this symbol. */
2194 if (h != NULL)
2195 head = &eh->dyn_relocs;
2196 else
2197 {
2198 /* Track dynamic relocs needed for local syms too.
2199 We really need local syms available to do this
2200 easily. Oh well. */
2201 asection *s;
2202 void **vpp;
2203
2204 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2205 abfd, r_symndx);
2206 if (isym == NULL)
2207 goto error_return;
2208
2209 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2210 if (s == NULL)
2211 s = sec;
2212
2213 /* Beware of type punned pointers vs strict aliasing
2214 rules. */
2215 vpp = &(elf_section_data (s)->local_dynrel);
2216 head = (struct elf_dyn_relocs **)vpp;
2217 }
2218
2219 p = *head;
2220 if (p == NULL || p->sec != sec)
2221 {
2222 bfd_size_type amt = sizeof *p;
2223
2224 p = ((struct elf_dyn_relocs *)
2225 bfd_alloc (htab->elf.dynobj, amt));
2226 if (p == NULL)
2227 goto error_return;
2228 p->next = *head;
2229 *head = p;
2230 p->sec = sec;
2231 p->count = 0;
2232 p->pc_count = 0;
2233 }
2234
2235 p->count += 1;
2236 /* Count size relocation as PC-relative relocation. */
2237 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2238 p->pc_count += 1;
2239 }
2240 break;
2241
2242 /* This relocation describes the C++ object vtable hierarchy.
2243 Reconstruct it for later use during GC. */
2244 case R_X86_64_GNU_VTINHERIT:
2245 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2246 goto error_return;
2247 break;
2248
2249 /* This relocation describes which C++ vtable entries are actually
2250 used. Record for later use during GC. */
2251 case R_X86_64_GNU_VTENTRY:
2252 BFD_ASSERT (h != NULL);
2253 if (h != NULL
2254 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2255 goto error_return;
2256 break;
2257
2258 default:
2259 break;
2260 }
2261 }
2262
2263 if (elf_section_data (sec)->this_hdr.contents != contents)
2264 {
2265 if (!converted && !info->keep_memory)
2266 free (contents);
2267 else
2268 {
2269 /* Cache the section contents for elf_link_input_bfd if any
2270 load is converted or --no-keep-memory isn't used. */
2271 elf_section_data (sec)->this_hdr.contents = contents;
2272 }
2273 }
2274
2275 /* Cache relocations if any load is converted. */
2276 if (elf_section_data (sec)->relocs != relocs && converted)
2277 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2278
2279 return TRUE;
2280
2281 error_return:
2282 if (elf_section_data (sec)->this_hdr.contents != contents)
2283 free (contents);
2284 sec->check_relocs_failed = 1;
2285 return FALSE;
2286 }
2287
2288 /* Return the relocation value for @tpoff relocation
2289 if STT_TLS virtual address is ADDRESS. */
2290
2291 static bfd_vma
2292 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2293 {
2294 struct elf_link_hash_table *htab = elf_hash_table (info);
2295 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2296 bfd_vma static_tls_size;
2297
2298 /* If tls_segment is NULL, we should have signalled an error already. */
2299 if (htab->tls_sec == NULL)
2300 return 0;
2301
2302 /* Consider special static TLS alignment requirements. */
2303 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2304 return address - static_tls_size - htab->tls_sec->vma;
2305 }
2306
2307 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2308 branch? */
2309
2310 static bfd_boolean
2311 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2312 {
2313 /* Opcode Instruction
2314 0xe8 call
2315 0xe9 jump
2316 0x0f 0x8x conditional jump */
2317 return ((offset > 0
2318 && (contents [offset - 1] == 0xe8
2319 || contents [offset - 1] == 0xe9))
2320 || (offset > 1
2321 && contents [offset - 2] == 0x0f
2322 && (contents [offset - 1] & 0xf0) == 0x80));
2323 }
2324
2325 /* Relocate an x86_64 ELF section. */
2326
2327 static bfd_boolean
2328 elf_x86_64_relocate_section (bfd *output_bfd,
2329 struct bfd_link_info *info,
2330 bfd *input_bfd,
2331 asection *input_section,
2332 bfd_byte *contents,
2333 Elf_Internal_Rela *relocs,
2334 Elf_Internal_Sym *local_syms,
2335 asection **local_sections)
2336 {
2337 struct elf_x86_link_hash_table *htab;
2338 Elf_Internal_Shdr *symtab_hdr;
2339 struct elf_link_hash_entry **sym_hashes;
2340 bfd_vma *local_got_offsets;
2341 bfd_vma *local_tlsdesc_gotents;
2342 Elf_Internal_Rela *rel;
2343 Elf_Internal_Rela *wrel;
2344 Elf_Internal_Rela *relend;
2345 unsigned int plt_entry_size;
2346
2347 /* Skip if check_relocs failed. */
2348 if (input_section->check_relocs_failed)
2349 return FALSE;
2350
2351 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2352 if (htab == NULL)
2353 return FALSE;
2354
2355 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2356
2357 plt_entry_size = htab->plt.plt_entry_size;
2358 symtab_hdr = &elf_symtab_hdr (input_bfd);
2359 sym_hashes = elf_sym_hashes (input_bfd);
2360 local_got_offsets = elf_local_got_offsets (input_bfd);
2361 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2362
2363 _bfd_x86_elf_set_tls_module_base (info);
2364
2365 rel = wrel = relocs;
2366 relend = relocs + input_section->reloc_count;
2367 for (; rel < relend; wrel++, rel++)
2368 {
2369 unsigned int r_type, r_type_tls;
2370 reloc_howto_type *howto;
2371 unsigned long r_symndx;
2372 struct elf_link_hash_entry *h;
2373 struct elf_x86_link_hash_entry *eh;
2374 Elf_Internal_Sym *sym;
2375 asection *sec;
2376 bfd_vma off, offplt, plt_offset;
2377 bfd_vma relocation;
2378 bfd_boolean unresolved_reloc;
2379 bfd_reloc_status_type r;
2380 int tls_type;
2381 asection *base_got, *resolved_plt;
2382 bfd_vma st_size;
2383 bfd_boolean resolved_to_zero;
2384 bfd_boolean relative_reloc;
2385 bfd_boolean converted_reloc;
2386 bfd_boolean need_copy_reloc_in_pie;
2387
2388 r_type = ELF32_R_TYPE (rel->r_info);
2389 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2390 || r_type == (int) R_X86_64_GNU_VTENTRY)
2391 {
2392 if (wrel != rel)
2393 *wrel = *rel;
2394 continue;
2395 }
2396
2397 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2398 r_type &= ~R_X86_64_converted_reloc_bit;
2399
2400 if (r_type >= (int) R_X86_64_standard)
2401 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2402
2403 if (r_type != (int) R_X86_64_32
2404 || ABI_64_P (output_bfd))
2405 howto = x86_64_elf_howto_table + r_type;
2406 else
2407 howto = (x86_64_elf_howto_table
2408 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2409 r_symndx = htab->r_sym (rel->r_info);
2410 h = NULL;
2411 sym = NULL;
2412 sec = NULL;
2413 unresolved_reloc = FALSE;
2414 if (r_symndx < symtab_hdr->sh_info)
2415 {
2416 sym = local_syms + r_symndx;
2417 sec = local_sections[r_symndx];
2418
2419 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2420 &sec, rel);
2421 st_size = sym->st_size;
2422
2423 /* Relocate against local STT_GNU_IFUNC symbol. */
2424 if (!bfd_link_relocatable (info)
2425 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2426 {
2427 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2428 rel, FALSE);
2429 if (h == NULL)
2430 abort ();
2431
2432 /* Set STT_GNU_IFUNC symbol value. */
2433 h->root.u.def.value = sym->st_value;
2434 h->root.u.def.section = sec;
2435 }
2436 }
2437 else
2438 {
2439 bfd_boolean warned ATTRIBUTE_UNUSED;
2440 bfd_boolean ignored ATTRIBUTE_UNUSED;
2441
2442 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2443 r_symndx, symtab_hdr, sym_hashes,
2444 h, sec, relocation,
2445 unresolved_reloc, warned, ignored);
2446 st_size = h->size;
2447 }
2448
2449 if (sec != NULL && discarded_section (sec))
2450 {
2451 _bfd_clear_contents (howto, input_bfd, input_section,
2452 contents + rel->r_offset);
2453 wrel->r_offset = rel->r_offset;
2454 wrel->r_info = 0;
2455 wrel->r_addend = 0;
2456
2457 /* For ld -r, remove relocations in debug sections against
2458 sections defined in discarded sections. Not done for
2459 eh_frame editing code expects to be present. */
2460 if (bfd_link_relocatable (info)
2461 && (input_section->flags & SEC_DEBUGGING))
2462 wrel--;
2463
2464 continue;
2465 }
2466
2467 if (bfd_link_relocatable (info))
2468 {
2469 if (wrel != rel)
2470 *wrel = *rel;
2471 continue;
2472 }
2473
2474 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2475 {
2476 if (r_type == R_X86_64_64)
2477 {
2478 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2479 zero-extend it to 64bit if addend is zero. */
2480 r_type = R_X86_64_32;
2481 memset (contents + rel->r_offset + 4, 0, 4);
2482 }
2483 else if (r_type == R_X86_64_SIZE64)
2484 {
2485 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2486 zero-extend it to 64bit if addend is zero. */
2487 r_type = R_X86_64_SIZE32;
2488 memset (contents + rel->r_offset + 4, 0, 4);
2489 }
2490 }
2491
2492 eh = (struct elf_x86_link_hash_entry *) h;
2493
2494 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2495 it here if it is defined in a non-shared object. */
2496 if (h != NULL
2497 && h->type == STT_GNU_IFUNC
2498 && h->def_regular)
2499 {
2500 bfd_vma plt_index;
2501 const char *name;
2502
2503 if ((input_section->flags & SEC_ALLOC) == 0)
2504 {
2505 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2506 sections because such sections are not SEC_ALLOC and
2507 thus ld.so will not process them. */
2508 if ((input_section->flags & SEC_DEBUGGING) != 0)
2509 continue;
2510 abort ();
2511 }
2512
2513 switch (r_type)
2514 {
2515 default:
2516 break;
2517
2518 case R_X86_64_GOTPCREL:
2519 case R_X86_64_GOTPCRELX:
2520 case R_X86_64_REX_GOTPCRELX:
2521 case R_X86_64_GOTPCREL64:
2522 base_got = htab->elf.sgot;
2523 off = h->got.offset;
2524
2525 if (base_got == NULL)
2526 abort ();
2527
2528 if (off == (bfd_vma) -1)
2529 {
2530 /* We can't use h->got.offset here to save state, or
2531 even just remember the offset, as finish_dynamic_symbol
2532 would use that as offset into .got. */
2533
2534 if (h->plt.offset == (bfd_vma) -1)
2535 abort ();
2536
2537 if (htab->elf.splt != NULL)
2538 {
2539 plt_index = (h->plt.offset / plt_entry_size
2540 - htab->plt.has_plt0);
2541 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2542 base_got = htab->elf.sgotplt;
2543 }
2544 else
2545 {
2546 plt_index = h->plt.offset / plt_entry_size;
2547 off = plt_index * GOT_ENTRY_SIZE;
2548 base_got = htab->elf.igotplt;
2549 }
2550
2551 if (h->dynindx == -1
2552 || h->forced_local
2553 || info->symbolic)
2554 {
2555 /* This references the local defitionion. We must
2556 initialize this entry in the global offset table.
2557 Since the offset must always be a multiple of 8,
2558 we use the least significant bit to record
2559 whether we have initialized it already.
2560
2561 When doing a dynamic link, we create a .rela.got
2562 relocation entry to initialize the value. This
2563 is done in the finish_dynamic_symbol routine. */
2564 if ((off & 1) != 0)
2565 off &= ~1;
2566 else
2567 {
2568 bfd_put_64 (output_bfd, relocation,
2569 base_got->contents + off);
2570 /* Note that this is harmless for the GOTPLT64
2571 case, as -1 | 1 still is -1. */
2572 h->got.offset |= 1;
2573 }
2574 }
2575 }
2576
2577 relocation = (base_got->output_section->vma
2578 + base_got->output_offset + off);
2579
2580 goto do_relocation;
2581 }
2582
2583 if (h->plt.offset == (bfd_vma) -1)
2584 {
2585 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2586 if (r_type == htab->pointer_r_type
2587 && (input_section->flags & SEC_CODE) == 0)
2588 goto do_ifunc_pointer;
2589 goto bad_ifunc_reloc;
2590 }
2591
2592 /* STT_GNU_IFUNC symbol must go through PLT. */
2593 if (htab->elf.splt != NULL)
2594 {
2595 if (htab->plt_second != NULL)
2596 {
2597 resolved_plt = htab->plt_second;
2598 plt_offset = eh->plt_second.offset;
2599 }
2600 else
2601 {
2602 resolved_plt = htab->elf.splt;
2603 plt_offset = h->plt.offset;
2604 }
2605 }
2606 else
2607 {
2608 resolved_plt = htab->elf.iplt;
2609 plt_offset = h->plt.offset;
2610 }
2611
2612 relocation = (resolved_plt->output_section->vma
2613 + resolved_plt->output_offset + plt_offset);
2614
2615 switch (r_type)
2616 {
2617 default:
2618 bad_ifunc_reloc:
2619 if (h->root.root.string)
2620 name = h->root.root.string;
2621 else
2622 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2623 NULL);
2624 _bfd_error_handler
2625 /* xgettext:c-format */
2626 (_("%B: relocation %s against STT_GNU_IFUNC "
2627 "symbol `%s' isn't supported"), input_bfd,
2628 howto->name, name);
2629 bfd_set_error (bfd_error_bad_value);
2630 return FALSE;
2631
2632 case R_X86_64_32S:
2633 if (bfd_link_pic (info))
2634 abort ();
2635 goto do_relocation;
2636
2637 case R_X86_64_32:
2638 if (ABI_64_P (output_bfd))
2639 goto do_relocation;
2640 /* FALLTHROUGH */
2641 case R_X86_64_64:
2642 do_ifunc_pointer:
2643 if (rel->r_addend != 0)
2644 {
2645 if (h->root.root.string)
2646 name = h->root.root.string;
2647 else
2648 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2649 sym, NULL);
2650 _bfd_error_handler
2651 /* xgettext:c-format */
2652 (_("%B: relocation %s against STT_GNU_IFUNC "
2653 "symbol `%s' has non-zero addend: %Ld"),
2654 input_bfd, howto->name, name, rel->r_addend);
2655 bfd_set_error (bfd_error_bad_value);
2656 return FALSE;
2657 }
2658
2659 /* Generate dynamic relcoation only when there is a
2660 non-GOT reference in a shared object or there is no
2661 PLT. */
2662 if ((bfd_link_pic (info) && h->non_got_ref)
2663 || h->plt.offset == (bfd_vma) -1)
2664 {
2665 Elf_Internal_Rela outrel;
2666 asection *sreloc;
2667
2668 /* Need a dynamic relocation to get the real function
2669 address. */
2670 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2671 info,
2672 input_section,
2673 rel->r_offset);
2674 if (outrel.r_offset == (bfd_vma) -1
2675 || outrel.r_offset == (bfd_vma) -2)
2676 abort ();
2677
2678 outrel.r_offset += (input_section->output_section->vma
2679 + input_section->output_offset);
2680
2681 if (POINTER_LOCAL_IFUNC_P (info, h))
2682 {
2683 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2684 h->root.root.string,
2685 h->root.u.def.section->owner);
2686
2687 /* This symbol is resolved locally. */
2688 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2689 outrel.r_addend = (h->root.u.def.value
2690 + h->root.u.def.section->output_section->vma
2691 + h->root.u.def.section->output_offset);
2692 }
2693 else
2694 {
2695 outrel.r_info = htab->r_info (h->dynindx, r_type);
2696 outrel.r_addend = 0;
2697 }
2698
2699 /* Dynamic relocations are stored in
2700 1. .rela.ifunc section in PIC object.
2701 2. .rela.got section in dynamic executable.
2702 3. .rela.iplt section in static executable. */
2703 if (bfd_link_pic (info))
2704 sreloc = htab->elf.irelifunc;
2705 else if (htab->elf.splt != NULL)
2706 sreloc = htab->elf.srelgot;
2707 else
2708 sreloc = htab->elf.irelplt;
2709 elf_append_rela (output_bfd, sreloc, &outrel);
2710
2711 /* If this reloc is against an external symbol, we
2712 do not want to fiddle with the addend. Otherwise,
2713 we need to include the symbol value so that it
2714 becomes an addend for the dynamic reloc. For an
2715 internal symbol, we have updated addend. */
2716 continue;
2717 }
2718 /* FALLTHROUGH */
2719 case R_X86_64_PC32:
2720 case R_X86_64_PC32_BND:
2721 case R_X86_64_PC64:
2722 case R_X86_64_PLT32:
2723 case R_X86_64_PLT32_BND:
2724 goto do_relocation;
2725 }
2726 }
2727
2728 resolved_to_zero = (eh != NULL
2729 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2730
2731 /* When generating a shared object, the relocations handled here are
2732 copied into the output file to be resolved at run time. */
2733 switch (r_type)
2734 {
2735 case R_X86_64_GOT32:
2736 case R_X86_64_GOT64:
2737 /* Relocation is to the entry for this symbol in the global
2738 offset table. */
2739 case R_X86_64_GOTPCREL:
2740 case R_X86_64_GOTPCRELX:
2741 case R_X86_64_REX_GOTPCRELX:
2742 case R_X86_64_GOTPCREL64:
2743 /* Use global offset table entry as symbol value. */
2744 case R_X86_64_GOTPLT64:
2745 /* This is obsolete and treated the same as GOT64. */
2746 base_got = htab->elf.sgot;
2747
2748 if (htab->elf.sgot == NULL)
2749 abort ();
2750
2751 relative_reloc = FALSE;
2752 if (h != NULL)
2753 {
2754 off = h->got.offset;
2755 if (h->needs_plt
2756 && h->plt.offset != (bfd_vma)-1
2757 && off == (bfd_vma)-1)
2758 {
2759 /* We can't use h->got.offset here to save
2760 state, or even just remember the offset, as
2761 finish_dynamic_symbol would use that as offset into
2762 .got. */
2763 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2764 - htab->plt.has_plt0);
2765 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2766 base_got = htab->elf.sgotplt;
2767 }
2768
2769 if (RESOLVED_LOCALLY_P (info, h, htab))
2770 {
2771 /* We must initialize this entry in the global offset
2772 table. Since the offset must always be a multiple
2773 of 8, we use the least significant bit to record
2774 whether we have initialized it already.
2775
2776 When doing a dynamic link, we create a .rela.got
2777 relocation entry to initialize the value. This is
2778 done in the finish_dynamic_symbol routine. */
2779 if ((off & 1) != 0)
2780 off &= ~1;
2781 else
2782 {
2783 bfd_put_64 (output_bfd, relocation,
2784 base_got->contents + off);
2785 /* Note that this is harmless for the GOTPLT64 case,
2786 as -1 | 1 still is -1. */
2787 h->got.offset |= 1;
2788
2789 if (GENERATE_RELATIVE_RELOC_P (info, h))
2790 {
2791 /* If this symbol isn't dynamic in PIC,
2792 generate R_X86_64_RELATIVE here. */
2793 eh->no_finish_dynamic_symbol = 1;
2794 relative_reloc = TRUE;
2795 }
2796 }
2797 }
2798 else
2799 unresolved_reloc = FALSE;
2800 }
2801 else
2802 {
2803 if (local_got_offsets == NULL)
2804 abort ();
2805
2806 off = local_got_offsets[r_symndx];
2807
2808 /* The offset must always be a multiple of 8. We use
2809 the least significant bit to record whether we have
2810 already generated the necessary reloc. */
2811 if ((off & 1) != 0)
2812 off &= ~1;
2813 else
2814 {
2815 bfd_put_64 (output_bfd, relocation,
2816 base_got->contents + off);
2817 local_got_offsets[r_symndx] |= 1;
2818
2819 if (bfd_link_pic (info))
2820 relative_reloc = TRUE;
2821 }
2822 }
2823
2824 if (relative_reloc)
2825 {
2826 asection *s;
2827 Elf_Internal_Rela outrel;
2828
2829 /* We need to generate a R_X86_64_RELATIVE reloc
2830 for the dynamic linker. */
2831 s = htab->elf.srelgot;
2832 if (s == NULL)
2833 abort ();
2834
2835 outrel.r_offset = (base_got->output_section->vma
2836 + base_got->output_offset
2837 + off);
2838 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2839 outrel.r_addend = relocation;
2840 elf_append_rela (output_bfd, s, &outrel);
2841 }
2842
2843 if (off >= (bfd_vma) -2)
2844 abort ();
2845
2846 relocation = base_got->output_section->vma
2847 + base_got->output_offset + off;
2848 if (r_type != R_X86_64_GOTPCREL
2849 && r_type != R_X86_64_GOTPCRELX
2850 && r_type != R_X86_64_REX_GOTPCRELX
2851 && r_type != R_X86_64_GOTPCREL64)
2852 relocation -= htab->elf.sgotplt->output_section->vma
2853 - htab->elf.sgotplt->output_offset;
2854
2855 break;
2856
2857 case R_X86_64_GOTOFF64:
2858 /* Relocation is relative to the start of the global offset
2859 table. */
2860
2861 /* Check to make sure it isn't a protected function or data
2862 symbol for shared library since it may not be local when
2863 used as function address or with copy relocation. We also
2864 need to make sure that a symbol is referenced locally. */
2865 if (bfd_link_pic (info) && h)
2866 {
2867 if (!h->def_regular)
2868 {
2869 const char *v;
2870
2871 switch (ELF_ST_VISIBILITY (h->other))
2872 {
2873 case STV_HIDDEN:
2874 v = _("hidden symbol");
2875 break;
2876 case STV_INTERNAL:
2877 v = _("internal symbol");
2878 break;
2879 case STV_PROTECTED:
2880 v = _("protected symbol");
2881 break;
2882 default:
2883 v = _("symbol");
2884 break;
2885 }
2886
2887 _bfd_error_handler
2888 /* xgettext:c-format */
2889 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2890 " `%s' can not be used when making a shared object"),
2891 input_bfd, v, h->root.root.string);
2892 bfd_set_error (bfd_error_bad_value);
2893 return FALSE;
2894 }
2895 else if (!bfd_link_executable (info)
2896 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2897 && (h->type == STT_FUNC
2898 || h->type == STT_OBJECT)
2899 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2900 {
2901 _bfd_error_handler
2902 /* xgettext:c-format */
2903 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2904 " `%s' can not be used when making a shared object"),
2905 input_bfd,
2906 h->type == STT_FUNC ? "function" : "data",
2907 h->root.root.string);
2908 bfd_set_error (bfd_error_bad_value);
2909 return FALSE;
2910 }
2911 }
2912
2913 /* Note that sgot is not involved in this
2914 calculation. We always want the start of .got.plt. If we
2915 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2916 permitted by the ABI, we might have to change this
2917 calculation. */
2918 relocation -= htab->elf.sgotplt->output_section->vma
2919 + htab->elf.sgotplt->output_offset;
2920 break;
2921
2922 case R_X86_64_GOTPC32:
2923 case R_X86_64_GOTPC64:
2924 /* Use global offset table as symbol value. */
2925 relocation = htab->elf.sgotplt->output_section->vma
2926 + htab->elf.sgotplt->output_offset;
2927 unresolved_reloc = FALSE;
2928 break;
2929
2930 case R_X86_64_PLTOFF64:
2931 /* Relocation is PLT entry relative to GOT. For local
2932 symbols it's the symbol itself relative to GOT. */
2933 if (h != NULL
2934 /* See PLT32 handling. */
2935 && (h->plt.offset != (bfd_vma) -1
2936 || eh->plt_got.offset != (bfd_vma) -1)
2937 && htab->elf.splt != NULL)
2938 {
2939 if (eh->plt_got.offset != (bfd_vma) -1)
2940 {
2941 /* Use the GOT PLT. */
2942 resolved_plt = htab->plt_got;
2943 plt_offset = eh->plt_got.offset;
2944 }
2945 else if (htab->plt_second != NULL)
2946 {
2947 resolved_plt = htab->plt_second;
2948 plt_offset = eh->plt_second.offset;
2949 }
2950 else
2951 {
2952 resolved_plt = htab->elf.splt;
2953 plt_offset = h->plt.offset;
2954 }
2955
2956 relocation = (resolved_plt->output_section->vma
2957 + resolved_plt->output_offset
2958 + plt_offset);
2959 unresolved_reloc = FALSE;
2960 }
2961
2962 relocation -= htab->elf.sgotplt->output_section->vma
2963 + htab->elf.sgotplt->output_offset;
2964 break;
2965
2966 case R_X86_64_PLT32:
2967 case R_X86_64_PLT32_BND:
2968 /* Relocation is to the entry for this symbol in the
2969 procedure linkage table. */
2970
2971 /* Resolve a PLT32 reloc against a local symbol directly,
2972 without using the procedure linkage table. */
2973 if (h == NULL)
2974 break;
2975
2976 if ((h->plt.offset == (bfd_vma) -1
2977 && eh->plt_got.offset == (bfd_vma) -1)
2978 || htab->elf.splt == NULL)
2979 {
2980 /* We didn't make a PLT entry for this symbol. This
2981 happens when statically linking PIC code, or when
2982 using -Bsymbolic. */
2983 break;
2984 }
2985
2986 if (h->plt.offset != (bfd_vma) -1)
2987 {
2988 if (htab->plt_second != NULL)
2989 {
2990 resolved_plt = htab->plt_second;
2991 plt_offset = eh->plt_second.offset;
2992 }
2993 else
2994 {
2995 resolved_plt = htab->elf.splt;
2996 plt_offset = h->plt.offset;
2997 }
2998 }
2999 else
3000 {
3001 /* Use the GOT PLT. */
3002 resolved_plt = htab->plt_got;
3003 plt_offset = eh->plt_got.offset;
3004 }
3005
3006 relocation = (resolved_plt->output_section->vma
3007 + resolved_plt->output_offset
3008 + plt_offset);
3009 unresolved_reloc = FALSE;
3010 break;
3011
3012 case R_X86_64_SIZE32:
3013 case R_X86_64_SIZE64:
3014 /* Set to symbol size. */
3015 relocation = st_size;
3016 goto direct;
3017
3018 case R_X86_64_PC8:
3019 case R_X86_64_PC16:
3020 case R_X86_64_PC32:
3021 case R_X86_64_PC32_BND:
3022 /* Don't complain about -fPIC if the symbol is undefined when
3023 building executable unless it is unresolved weak symbol or
3024 -z nocopyreloc is used. */
3025 if ((input_section->flags & SEC_ALLOC) != 0
3026 && (input_section->flags & SEC_READONLY) != 0
3027 && h != NULL
3028 && ((bfd_link_executable (info)
3029 && ((h->root.type == bfd_link_hash_undefweak
3030 && !resolved_to_zero)
3031 || ((info->nocopyreloc
3032 || (eh->def_protected
3033 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3034 && h->def_dynamic
3035 && !(h->root.u.def.section->flags & SEC_CODE))))
3036 || bfd_link_dll (info)))
3037 {
3038 bfd_boolean fail = FALSE;
3039 bfd_boolean branch
3040 = ((r_type == R_X86_64_PC32
3041 || r_type == R_X86_64_PC32_BND)
3042 && is_32bit_relative_branch (contents, rel->r_offset));
3043
3044 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3045 {
3046 /* Symbol is referenced locally. Make sure it is
3047 defined locally or for a branch. */
3048 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3049 && !branch);
3050 }
3051 else if (!(bfd_link_pie (info)
3052 && (h->needs_copy || eh->needs_copy)))
3053 {
3054 /* Symbol doesn't need copy reloc and isn't referenced
3055 locally. We only allow branch to symbol with
3056 non-default visibility. */
3057 fail = (!branch
3058 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3059 }
3060
3061 if (fail)
3062 return elf_x86_64_need_pic (info, input_bfd, input_section,
3063 h, NULL, NULL, howto);
3064 }
3065 /* Fall through. */
3066
3067 case R_X86_64_8:
3068 case R_X86_64_16:
3069 case R_X86_64_32:
3070 case R_X86_64_PC64:
3071 case R_X86_64_64:
3072 /* FIXME: The ABI says the linker should make sure the value is
3073 the same when it's zeroextended to 64 bit. */
3074
3075 direct:
3076 if ((input_section->flags & SEC_ALLOC) == 0)
3077 break;
3078
3079 need_copy_reloc_in_pie = (bfd_link_pie (info)
3080 && h != NULL
3081 && (h->needs_copy
3082 || eh->needs_copy
3083 || (h->root.type
3084 == bfd_link_hash_undefined))
3085 && (X86_PCREL_TYPE_P (r_type)
3086 || X86_SIZE_TYPE_P (r_type)));
3087
3088 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3089 need_copy_reloc_in_pie,
3090 resolved_to_zero, FALSE))
3091 {
3092 Elf_Internal_Rela outrel;
3093 bfd_boolean skip, relocate;
3094 asection *sreloc;
3095
3096 /* When generating a shared object, these relocations
3097 are copied into the output file to be resolved at run
3098 time. */
3099 skip = FALSE;
3100 relocate = FALSE;
3101
3102 outrel.r_offset =
3103 _bfd_elf_section_offset (output_bfd, info, input_section,
3104 rel->r_offset);
3105 if (outrel.r_offset == (bfd_vma) -1)
3106 skip = TRUE;
3107 else if (outrel.r_offset == (bfd_vma) -2)
3108 skip = TRUE, relocate = TRUE;
3109
3110 outrel.r_offset += (input_section->output_section->vma
3111 + input_section->output_offset);
3112
3113 if (skip)
3114 memset (&outrel, 0, sizeof outrel);
3115
3116 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3117 {
3118 outrel.r_info = htab->r_info (h->dynindx, r_type);
3119 outrel.r_addend = rel->r_addend;
3120 }
3121 else
3122 {
3123 /* This symbol is local, or marked to become local.
3124 When relocation overflow check is disabled, we
3125 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3126 if (r_type == htab->pointer_r_type
3127 || (r_type == R_X86_64_32
3128 && info->no_reloc_overflow_check))
3129 {
3130 relocate = TRUE;
3131 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3132 outrel.r_addend = relocation + rel->r_addend;
3133 }
3134 else if (r_type == R_X86_64_64
3135 && !ABI_64_P (output_bfd))
3136 {
3137 relocate = TRUE;
3138 outrel.r_info = htab->r_info (0,
3139 R_X86_64_RELATIVE64);
3140 outrel.r_addend = relocation + rel->r_addend;
3141 /* Check addend overflow. */
3142 if ((outrel.r_addend & 0x80000000)
3143 != (rel->r_addend & 0x80000000))
3144 {
3145 const char *name;
3146 int addend = rel->r_addend;
3147 if (h && h->root.root.string)
3148 name = h->root.root.string;
3149 else
3150 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3151 sym, NULL);
3152 _bfd_error_handler
3153 /* xgettext:c-format */
3154 (_("%B: addend %s%#x in relocation %s against "
3155 "symbol `%s' at %#Lx in section `%A' is "
3156 "out of range"),
3157 input_bfd, addend < 0 ? "-" : "", addend,
3158 howto->name, name, rel->r_offset, input_section);
3159 bfd_set_error (bfd_error_bad_value);
3160 return FALSE;
3161 }
3162 }
3163 else
3164 {
3165 long sindx;
3166
3167 if (bfd_is_abs_section (sec))
3168 sindx = 0;
3169 else if (sec == NULL || sec->owner == NULL)
3170 {
3171 bfd_set_error (bfd_error_bad_value);
3172 return FALSE;
3173 }
3174 else
3175 {
3176 asection *osec;
3177
3178 /* We are turning this relocation into one
3179 against a section symbol. It would be
3180 proper to subtract the symbol's value,
3181 osec->vma, from the emitted reloc addend,
3182 but ld.so expects buggy relocs. */
3183 osec = sec->output_section;
3184 sindx = elf_section_data (osec)->dynindx;
3185 if (sindx == 0)
3186 {
3187 asection *oi = htab->elf.text_index_section;
3188 sindx = elf_section_data (oi)->dynindx;
3189 }
3190 BFD_ASSERT (sindx != 0);
3191 }
3192
3193 outrel.r_info = htab->r_info (sindx, r_type);
3194 outrel.r_addend = relocation + rel->r_addend;
3195 }
3196 }
3197
3198 sreloc = elf_section_data (input_section)->sreloc;
3199
3200 if (sreloc == NULL || sreloc->contents == NULL)
3201 {
3202 r = bfd_reloc_notsupported;
3203 goto check_relocation_error;
3204 }
3205
3206 elf_append_rela (output_bfd, sreloc, &outrel);
3207
3208 /* If this reloc is against an external symbol, we do
3209 not want to fiddle with the addend. Otherwise, we
3210 need to include the symbol value so that it becomes
3211 an addend for the dynamic reloc. */
3212 if (! relocate)
3213 continue;
3214 }
3215
3216 break;
3217
3218 case R_X86_64_TLSGD:
3219 case R_X86_64_GOTPC32_TLSDESC:
3220 case R_X86_64_TLSDESC_CALL:
3221 case R_X86_64_GOTTPOFF:
3222 tls_type = GOT_UNKNOWN;
3223 if (h == NULL && local_got_offsets)
3224 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3225 else if (h != NULL)
3226 tls_type = elf_x86_hash_entry (h)->tls_type;
3227
3228 r_type_tls = r_type;
3229 if (! elf_x86_64_tls_transition (info, input_bfd,
3230 input_section, contents,
3231 symtab_hdr, sym_hashes,
3232 &r_type_tls, tls_type, rel,
3233 relend, h, r_symndx, TRUE))
3234 return FALSE;
3235
3236 if (r_type_tls == R_X86_64_TPOFF32)
3237 {
3238 bfd_vma roff = rel->r_offset;
3239
3240 BFD_ASSERT (! unresolved_reloc);
3241
3242 if (r_type == R_X86_64_TLSGD)
3243 {
3244 /* GD->LE transition. For 64bit, change
3245 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3246 .word 0x6666; rex64; call __tls_get_addr@PLT
3247 or
3248 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3249 .byte 0x66; rex64
3250 call *__tls_get_addr@GOTPCREL(%rip)
3251 which may be converted to
3252 addr32 call __tls_get_addr
3253 into:
3254 movq %fs:0, %rax
3255 leaq foo@tpoff(%rax), %rax
3256 For 32bit, change
3257 leaq foo@tlsgd(%rip), %rdi
3258 .word 0x6666; rex64; call __tls_get_addr@PLT
3259 or
3260 leaq foo@tlsgd(%rip), %rdi
3261 .byte 0x66; rex64
3262 call *__tls_get_addr@GOTPCREL(%rip)
3263 which may be converted to
3264 addr32 call __tls_get_addr
3265 into:
3266 movl %fs:0, %eax
3267 leaq foo@tpoff(%rax), %rax
3268 For largepic, change:
3269 leaq foo@tlsgd(%rip), %rdi
3270 movabsq $__tls_get_addr@pltoff, %rax
3271 addq %r15, %rax
3272 call *%rax
3273 into:
3274 movq %fs:0, %rax
3275 leaq foo@tpoff(%rax), %rax
3276 nopw 0x0(%rax,%rax,1) */
3277 int largepic = 0;
3278 if (ABI_64_P (output_bfd))
3279 {
3280 if (contents[roff + 5] == 0xb8)
3281 {
3282 memcpy (contents + roff - 3,
3283 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3284 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3285 largepic = 1;
3286 }
3287 else
3288 memcpy (contents + roff - 4,
3289 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3290 16);
3291 }
3292 else
3293 memcpy (contents + roff - 3,
3294 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3295 15);
3296 bfd_put_32 (output_bfd,
3297 elf_x86_64_tpoff (info, relocation),
3298 contents + roff + 8 + largepic);
3299 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3300 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3301 rel++;
3302 wrel++;
3303 continue;
3304 }
3305 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3306 {
3307 /* GDesc -> LE transition.
3308 It's originally something like:
3309 leaq x@tlsdesc(%rip), %rax
3310
3311 Change it to:
3312 movl $x@tpoff, %rax. */
3313
3314 unsigned int val, type;
3315
3316 type = bfd_get_8 (input_bfd, contents + roff - 3);
3317 val = bfd_get_8 (input_bfd, contents + roff - 1);
3318 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3319 contents + roff - 3);
3320 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3321 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3322 contents + roff - 1);
3323 bfd_put_32 (output_bfd,
3324 elf_x86_64_tpoff (info, relocation),
3325 contents + roff);
3326 continue;
3327 }
3328 else if (r_type == R_X86_64_TLSDESC_CALL)
3329 {
3330 /* GDesc -> LE transition.
3331 It's originally:
3332 call *(%rax)
3333 Turn it into:
3334 xchg %ax,%ax. */
3335 bfd_put_8 (output_bfd, 0x66, contents + roff);
3336 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3337 continue;
3338 }
3339 else if (r_type == R_X86_64_GOTTPOFF)
3340 {
3341 /* IE->LE transition:
3342 For 64bit, originally it can be one of:
3343 movq foo@gottpoff(%rip), %reg
3344 addq foo@gottpoff(%rip), %reg
3345 We change it into:
3346 movq $foo, %reg
3347 leaq foo(%reg), %reg
3348 addq $foo, %reg.
3349 For 32bit, originally it can be one of:
3350 movq foo@gottpoff(%rip), %reg
3351 addl foo@gottpoff(%rip), %reg
3352 We change it into:
3353 movq $foo, %reg
3354 leal foo(%reg), %reg
3355 addl $foo, %reg. */
3356
3357 unsigned int val, type, reg;
3358
3359 if (roff >= 3)
3360 val = bfd_get_8 (input_bfd, contents + roff - 3);
3361 else
3362 val = 0;
3363 type = bfd_get_8 (input_bfd, contents + roff - 2);
3364 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3365 reg >>= 3;
3366 if (type == 0x8b)
3367 {
3368 /* movq */
3369 if (val == 0x4c)
3370 bfd_put_8 (output_bfd, 0x49,
3371 contents + roff - 3);
3372 else if (!ABI_64_P (output_bfd) && val == 0x44)
3373 bfd_put_8 (output_bfd, 0x41,
3374 contents + roff - 3);
3375 bfd_put_8 (output_bfd, 0xc7,
3376 contents + roff - 2);
3377 bfd_put_8 (output_bfd, 0xc0 | reg,
3378 contents + roff - 1);
3379 }
3380 else if (reg == 4)
3381 {
3382 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3383 is special */
3384 if (val == 0x4c)
3385 bfd_put_8 (output_bfd, 0x49,
3386 contents + roff - 3);
3387 else if (!ABI_64_P (output_bfd) && val == 0x44)
3388 bfd_put_8 (output_bfd, 0x41,
3389 contents + roff - 3);
3390 bfd_put_8 (output_bfd, 0x81,
3391 contents + roff - 2);
3392 bfd_put_8 (output_bfd, 0xc0 | reg,
3393 contents + roff - 1);
3394 }
3395 else
3396 {
3397 /* addq/addl -> leaq/leal */
3398 if (val == 0x4c)
3399 bfd_put_8 (output_bfd, 0x4d,
3400 contents + roff - 3);
3401 else if (!ABI_64_P (output_bfd) && val == 0x44)
3402 bfd_put_8 (output_bfd, 0x45,
3403 contents + roff - 3);
3404 bfd_put_8 (output_bfd, 0x8d,
3405 contents + roff - 2);
3406 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3407 contents + roff - 1);
3408 }
3409 bfd_put_32 (output_bfd,
3410 elf_x86_64_tpoff (info, relocation),
3411 contents + roff);
3412 continue;
3413 }
3414 else
3415 BFD_ASSERT (FALSE);
3416 }
3417
3418 if (htab->elf.sgot == NULL)
3419 abort ();
3420
3421 if (h != NULL)
3422 {
3423 off = h->got.offset;
3424 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3425 }
3426 else
3427 {
3428 if (local_got_offsets == NULL)
3429 abort ();
3430
3431 off = local_got_offsets[r_symndx];
3432 offplt = local_tlsdesc_gotents[r_symndx];
3433 }
3434
3435 if ((off & 1) != 0)
3436 off &= ~1;
3437 else
3438 {
3439 Elf_Internal_Rela outrel;
3440 int dr_type, indx;
3441 asection *sreloc;
3442
3443 if (htab->elf.srelgot == NULL)
3444 abort ();
3445
3446 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3447
3448 if (GOT_TLS_GDESC_P (tls_type))
3449 {
3450 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3451 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3452 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3453 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3454 + htab->elf.sgotplt->output_offset
3455 + offplt
3456 + htab->sgotplt_jump_table_size);
3457 sreloc = htab->elf.srelplt;
3458 if (indx == 0)
3459 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3460 else
3461 outrel.r_addend = 0;
3462 elf_append_rela (output_bfd, sreloc, &outrel);
3463 }
3464
3465 sreloc = htab->elf.srelgot;
3466
3467 outrel.r_offset = (htab->elf.sgot->output_section->vma
3468 + htab->elf.sgot->output_offset + off);
3469
3470 if (GOT_TLS_GD_P (tls_type))
3471 dr_type = R_X86_64_DTPMOD64;
3472 else if (GOT_TLS_GDESC_P (tls_type))
3473 goto dr_done;
3474 else
3475 dr_type = R_X86_64_TPOFF64;
3476
3477 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3478 outrel.r_addend = 0;
3479 if ((dr_type == R_X86_64_TPOFF64
3480 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3481 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3482 outrel.r_info = htab->r_info (indx, dr_type);
3483
3484 elf_append_rela (output_bfd, sreloc, &outrel);
3485
3486 if (GOT_TLS_GD_P (tls_type))
3487 {
3488 if (indx == 0)
3489 {
3490 BFD_ASSERT (! unresolved_reloc);
3491 bfd_put_64 (output_bfd,
3492 relocation - _bfd_x86_elf_dtpoff_base (info),
3493 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3494 }
3495 else
3496 {
3497 bfd_put_64 (output_bfd, 0,
3498 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3499 outrel.r_info = htab->r_info (indx,
3500 R_X86_64_DTPOFF64);
3501 outrel.r_offset += GOT_ENTRY_SIZE;
3502 elf_append_rela (output_bfd, sreloc,
3503 &outrel);
3504 }
3505 }
3506
3507 dr_done:
3508 if (h != NULL)
3509 h->got.offset |= 1;
3510 else
3511 local_got_offsets[r_symndx] |= 1;
3512 }
3513
3514 if (off >= (bfd_vma) -2
3515 && ! GOT_TLS_GDESC_P (tls_type))
3516 abort ();
3517 if (r_type_tls == r_type)
3518 {
3519 if (r_type == R_X86_64_GOTPC32_TLSDESC
3520 || r_type == R_X86_64_TLSDESC_CALL)
3521 relocation = htab->elf.sgotplt->output_section->vma
3522 + htab->elf.sgotplt->output_offset
3523 + offplt + htab->sgotplt_jump_table_size;
3524 else
3525 relocation = htab->elf.sgot->output_section->vma
3526 + htab->elf.sgot->output_offset + off;
3527 unresolved_reloc = FALSE;
3528 }
3529 else
3530 {
3531 bfd_vma roff = rel->r_offset;
3532
3533 if (r_type == R_X86_64_TLSGD)
3534 {
3535 /* GD->IE transition. For 64bit, change
3536 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3537 .word 0x6666; rex64; call __tls_get_addr@PLT
3538 or
3539 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3540 .byte 0x66; rex64
3541 call *__tls_get_addr@GOTPCREL(%rip
3542 which may be converted to
3543 addr32 call __tls_get_addr
3544 into:
3545 movq %fs:0, %rax
3546 addq foo@gottpoff(%rip), %rax
3547 For 32bit, change
3548 leaq foo@tlsgd(%rip), %rdi
3549 .word 0x6666; rex64; call __tls_get_addr@PLT
3550 or
3551 leaq foo@tlsgd(%rip), %rdi
3552 .byte 0x66; rex64;
3553 call *__tls_get_addr@GOTPCREL(%rip)
3554 which may be converted to
3555 addr32 call __tls_get_addr
3556 into:
3557 movl %fs:0, %eax
3558 addq foo@gottpoff(%rip), %rax
3559 For largepic, change:
3560 leaq foo@tlsgd(%rip), %rdi
3561 movabsq $__tls_get_addr@pltoff, %rax
3562 addq %r15, %rax
3563 call *%rax
3564 into:
3565 movq %fs:0, %rax
3566 addq foo@gottpoff(%rax), %rax
3567 nopw 0x0(%rax,%rax,1) */
3568 int largepic = 0;
3569 if (ABI_64_P (output_bfd))
3570 {
3571 if (contents[roff + 5] == 0xb8)
3572 {
3573 memcpy (contents + roff - 3,
3574 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3575 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3576 largepic = 1;
3577 }
3578 else
3579 memcpy (contents + roff - 4,
3580 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3581 16);
3582 }
3583 else
3584 memcpy (contents + roff - 3,
3585 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3586 15);
3587
3588 relocation = (htab->elf.sgot->output_section->vma
3589 + htab->elf.sgot->output_offset + off
3590 - roff
3591 - largepic
3592 - input_section->output_section->vma
3593 - input_section->output_offset
3594 - 12);
3595 bfd_put_32 (output_bfd, relocation,
3596 contents + roff + 8 + largepic);
3597 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3598 rel++;
3599 wrel++;
3600 continue;
3601 }
3602 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3603 {
3604 /* GDesc -> IE transition.
3605 It's originally something like:
3606 leaq x@tlsdesc(%rip), %rax
3607
3608 Change it to:
3609 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3610
3611 /* Now modify the instruction as appropriate. To
3612 turn a leaq into a movq in the form we use it, it
3613 suffices to change the second byte from 0x8d to
3614 0x8b. */
3615 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3616
3617 bfd_put_32 (output_bfd,
3618 htab->elf.sgot->output_section->vma
3619 + htab->elf.sgot->output_offset + off
3620 - rel->r_offset
3621 - input_section->output_section->vma
3622 - input_section->output_offset
3623 - 4,
3624 contents + roff);
3625 continue;
3626 }
3627 else if (r_type == R_X86_64_TLSDESC_CALL)
3628 {
3629 /* GDesc -> IE transition.
3630 It's originally:
3631 call *(%rax)
3632
3633 Change it to:
3634 xchg %ax, %ax. */
3635
3636 bfd_put_8 (output_bfd, 0x66, contents + roff);
3637 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3638 continue;
3639 }
3640 else
3641 BFD_ASSERT (FALSE);
3642 }
3643 break;
3644
3645 case R_X86_64_TLSLD:
3646 if (! elf_x86_64_tls_transition (info, input_bfd,
3647 input_section, contents,
3648 symtab_hdr, sym_hashes,
3649 &r_type, GOT_UNKNOWN, rel,
3650 relend, h, r_symndx, TRUE))
3651 return FALSE;
3652
3653 if (r_type != R_X86_64_TLSLD)
3654 {
3655 /* LD->LE transition:
3656 leaq foo@tlsld(%rip), %rdi
3657 call __tls_get_addr@PLT
3658 For 64bit, we change it into:
3659 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3660 For 32bit, we change it into:
3661 nopl 0x0(%rax); movl %fs:0, %eax
3662 Or
3663 leaq foo@tlsld(%rip), %rdi;
3664 call *__tls_get_addr@GOTPCREL(%rip)
3665 which may be converted to
3666 addr32 call __tls_get_addr
3667 For 64bit, we change it into:
3668 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3669 For 32bit, we change it into:
3670 nopw 0x0(%rax); movl %fs:0, %eax
3671 For largepic, change:
3672 leaq foo@tlsgd(%rip), %rdi
3673 movabsq $__tls_get_addr@pltoff, %rax
3674 addq %rbx, %rax
3675 call *%rax
3676 into
3677 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3678 movq %fs:0, %eax */
3679
3680 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3681 if (ABI_64_P (output_bfd))
3682 {
3683 if (contents[rel->r_offset + 5] == 0xb8)
3684 memcpy (contents + rel->r_offset - 3,
3685 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3686 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3687 else if (contents[rel->r_offset + 4] == 0xff
3688 || contents[rel->r_offset + 4] == 0x67)
3689 memcpy (contents + rel->r_offset - 3,
3690 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3691 13);
3692 else
3693 memcpy (contents + rel->r_offset - 3,
3694 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3695 }
3696 else
3697 {
3698 if (contents[rel->r_offset + 4] == 0xff)
3699 memcpy (contents + rel->r_offset - 3,
3700 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3701 13);
3702 else
3703 memcpy (contents + rel->r_offset - 3,
3704 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3705 }
3706 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3707 and R_X86_64_PLTOFF64. */
3708 rel++;
3709 wrel++;
3710 continue;
3711 }
3712
3713 if (htab->elf.sgot == NULL)
3714 abort ();
3715
3716 off = htab->tls_ld_or_ldm_got.offset;
3717 if (off & 1)
3718 off &= ~1;
3719 else
3720 {
3721 Elf_Internal_Rela outrel;
3722
3723 if (htab->elf.srelgot == NULL)
3724 abort ();
3725
3726 outrel.r_offset = (htab->elf.sgot->output_section->vma
3727 + htab->elf.sgot->output_offset + off);
3728
3729 bfd_put_64 (output_bfd, 0,
3730 htab->elf.sgot->contents + off);
3731 bfd_put_64 (output_bfd, 0,
3732 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3733 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3734 outrel.r_addend = 0;
3735 elf_append_rela (output_bfd, htab->elf.srelgot,
3736 &outrel);
3737 htab->tls_ld_or_ldm_got.offset |= 1;
3738 }
3739 relocation = htab->elf.sgot->output_section->vma
3740 + htab->elf.sgot->output_offset + off;
3741 unresolved_reloc = FALSE;
3742 break;
3743
3744 case R_X86_64_DTPOFF32:
3745 if (!bfd_link_executable (info)
3746 || (input_section->flags & SEC_CODE) == 0)
3747 relocation -= _bfd_x86_elf_dtpoff_base (info);
3748 else
3749 relocation = elf_x86_64_tpoff (info, relocation);
3750 break;
3751
3752 case R_X86_64_TPOFF32:
3753 case R_X86_64_TPOFF64:
3754 BFD_ASSERT (bfd_link_executable (info));
3755 relocation = elf_x86_64_tpoff (info, relocation);
3756 break;
3757
3758 case R_X86_64_DTPOFF64:
3759 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3760 relocation -= _bfd_x86_elf_dtpoff_base (info);
3761 break;
3762
3763 default:
3764 break;
3765 }
3766
3767 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3768 because such sections are not SEC_ALLOC and thus ld.so will
3769 not process them. */
3770 if (unresolved_reloc
3771 && !((input_section->flags & SEC_DEBUGGING) != 0
3772 && h->def_dynamic)
3773 && _bfd_elf_section_offset (output_bfd, info, input_section,
3774 rel->r_offset) != (bfd_vma) -1)
3775 {
3776 switch (r_type)
3777 {
3778 case R_X86_64_32S:
3779 sec = h->root.u.def.section;
3780 if ((info->nocopyreloc
3781 || (eh->def_protected
3782 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3783 && !(h->root.u.def.section->flags & SEC_CODE))
3784 return elf_x86_64_need_pic (info, input_bfd, input_section,
3785 h, NULL, NULL, howto);
3786 /* Fall through. */
3787
3788 default:
3789 _bfd_error_handler
3790 /* xgettext:c-format */
3791 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3792 input_bfd,
3793 input_section,
3794 rel->r_offset,
3795 howto->name,
3796 h->root.root.string);
3797 return FALSE;
3798 }
3799 }
3800
3801 do_relocation:
3802 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3803 contents, rel->r_offset,
3804 relocation, rel->r_addend);
3805
3806 check_relocation_error:
3807 if (r != bfd_reloc_ok)
3808 {
3809 const char *name;
3810
3811 if (h != NULL)
3812 name = h->root.root.string;
3813 else
3814 {
3815 name = bfd_elf_string_from_elf_section (input_bfd,
3816 symtab_hdr->sh_link,
3817 sym->st_name);
3818 if (name == NULL)
3819 return FALSE;
3820 if (*name == '\0')
3821 name = bfd_section_name (input_bfd, sec);
3822 }
3823
3824 if (r == bfd_reloc_overflow)
3825 {
3826 if (converted_reloc)
3827 {
3828 info->callbacks->einfo
3829 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3830 return FALSE;
3831 }
3832 (*info->callbacks->reloc_overflow)
3833 (info, (h ? &h->root : NULL), name, howto->name,
3834 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3835 }
3836 else
3837 {
3838 _bfd_error_handler
3839 /* xgettext:c-format */
3840 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3841 input_bfd, input_section,
3842 rel->r_offset, name, (int) r);
3843 return FALSE;
3844 }
3845 }
3846
3847 if (wrel != rel)
3848 *wrel = *rel;
3849 }
3850
3851 if (wrel != rel)
3852 {
3853 Elf_Internal_Shdr *rel_hdr;
3854 size_t deleted = rel - wrel;
3855
3856 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3857 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3858 if (rel_hdr->sh_size == 0)
3859 {
3860 /* It is too late to remove an empty reloc section. Leave
3861 one NONE reloc.
3862 ??? What is wrong with an empty section??? */
3863 rel_hdr->sh_size = rel_hdr->sh_entsize;
3864 deleted -= 1;
3865 }
3866 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3867 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3868 input_section->reloc_count -= deleted;
3869 }
3870
3871 return TRUE;
3872 }
3873
3874 /* Finish up dynamic symbol handling. We set the contents of various
3875 dynamic sections here. */
3876
3877 static bfd_boolean
3878 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3879 struct bfd_link_info *info,
3880 struct elf_link_hash_entry *h,
3881 Elf_Internal_Sym *sym)
3882 {
3883 struct elf_x86_link_hash_table *htab;
3884 bfd_boolean use_plt_second;
3885 struct elf_x86_link_hash_entry *eh;
3886 bfd_boolean local_undefweak;
3887
3888 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3889 if (htab == NULL)
3890 return FALSE;
3891
3892 /* Use the second PLT section only if there is .plt section. */
3893 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3894
3895 eh = (struct elf_x86_link_hash_entry *) h;
3896 if (eh->no_finish_dynamic_symbol)
3897 abort ();
3898
3899 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3900 resolved undefined weak symbols in executable so that their
3901 references have value 0 at run-time. */
3902 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3903
3904 if (h->plt.offset != (bfd_vma) -1)
3905 {
3906 bfd_vma plt_index;
3907 bfd_vma got_offset, plt_offset;
3908 Elf_Internal_Rela rela;
3909 bfd_byte *loc;
3910 asection *plt, *gotplt, *relplt, *resolved_plt;
3911 const struct elf_backend_data *bed;
3912 bfd_vma plt_got_pcrel_offset;
3913
3914 /* When building a static executable, use .iplt, .igot.plt and
3915 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3916 if (htab->elf.splt != NULL)
3917 {
3918 plt = htab->elf.splt;
3919 gotplt = htab->elf.sgotplt;
3920 relplt = htab->elf.srelplt;
3921 }
3922 else
3923 {
3924 plt = htab->elf.iplt;
3925 gotplt = htab->elf.igotplt;
3926 relplt = htab->elf.irelplt;
3927 }
3928
3929 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3930
3931 /* Get the index in the procedure linkage table which
3932 corresponds to this symbol. This is the index of this symbol
3933 in all the symbols for which we are making plt entries. The
3934 first entry in the procedure linkage table is reserved.
3935
3936 Get the offset into the .got table of the entry that
3937 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3938 bytes. The first three are reserved for the dynamic linker.
3939
3940 For static executables, we don't reserve anything. */
3941
3942 if (plt == htab->elf.splt)
3943 {
3944 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3945 - htab->plt.has_plt0);
3946 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3947 }
3948 else
3949 {
3950 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3951 got_offset = got_offset * GOT_ENTRY_SIZE;
3952 }
3953
3954 /* Fill in the entry in the procedure linkage table. */
3955 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3956 htab->plt.plt_entry_size);
3957 if (use_plt_second)
3958 {
3959 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3960 htab->non_lazy_plt->plt_entry,
3961 htab->non_lazy_plt->plt_entry_size);
3962
3963 resolved_plt = htab->plt_second;
3964 plt_offset = eh->plt_second.offset;
3965 }
3966 else
3967 {
3968 resolved_plt = plt;
3969 plt_offset = h->plt.offset;
3970 }
3971
3972 /* Insert the relocation positions of the plt section. */
3973
3974 /* Put offset the PC-relative instruction referring to the GOT entry,
3975 subtracting the size of that instruction. */
3976 plt_got_pcrel_offset = (gotplt->output_section->vma
3977 + gotplt->output_offset
3978 + got_offset
3979 - resolved_plt->output_section->vma
3980 - resolved_plt->output_offset
3981 - plt_offset
3982 - htab->plt.plt_got_insn_size);
3983
3984 /* Check PC-relative offset overflow in PLT entry. */
3985 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3986 /* xgettext:c-format */
3987 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
3988 output_bfd, h->root.root.string);
3989
3990 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3991 (resolved_plt->contents + plt_offset
3992 + htab->plt.plt_got_offset));
3993
3994 /* Fill in the entry in the global offset table, initially this
3995 points to the second part of the PLT entry. Leave the entry
3996 as zero for undefined weak symbol in PIE. No PLT relocation
3997 against undefined weak symbol in PIE. */
3998 if (!local_undefweak)
3999 {
4000 if (htab->plt.has_plt0)
4001 bfd_put_64 (output_bfd, (plt->output_section->vma
4002 + plt->output_offset
4003 + h->plt.offset
4004 + htab->lazy_plt->plt_lazy_offset),
4005 gotplt->contents + got_offset);
4006
4007 /* Fill in the entry in the .rela.plt section. */
4008 rela.r_offset = (gotplt->output_section->vma
4009 + gotplt->output_offset
4010 + got_offset);
4011 if (PLT_LOCAL_IFUNC_P (info, h))
4012 {
4013 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4014 h->root.root.string,
4015 h->root.u.def.section->owner);
4016
4017 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4018 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4019 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4020 rela.r_addend = (h->root.u.def.value
4021 + h->root.u.def.section->output_section->vma
4022 + h->root.u.def.section->output_offset);
4023 /* R_X86_64_IRELATIVE comes last. */
4024 plt_index = htab->next_irelative_index--;
4025 }
4026 else
4027 {
4028 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4029 rela.r_addend = 0;
4030 plt_index = htab->next_jump_slot_index++;
4031 }
4032
4033 /* Don't fill the second and third slots in PLT entry for
4034 static executables nor without PLT0. */
4035 if (plt == htab->elf.splt && htab->plt.has_plt0)
4036 {
4037 bfd_vma plt0_offset
4038 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4039
4040 /* Put relocation index. */
4041 bfd_put_32 (output_bfd, plt_index,
4042 (plt->contents + h->plt.offset
4043 + htab->lazy_plt->plt_reloc_offset));
4044
4045 /* Put offset for jmp .PLT0 and check for overflow. We don't
4046 check relocation index for overflow since branch displacement
4047 will overflow first. */
4048 if (plt0_offset > 0x80000000)
4049 /* xgettext:c-format */
4050 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4051 output_bfd, h->root.root.string);
4052 bfd_put_32 (output_bfd, - plt0_offset,
4053 (plt->contents + h->plt.offset
4054 + htab->lazy_plt->plt_plt_offset));
4055 }
4056
4057 bed = get_elf_backend_data (output_bfd);
4058 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4059 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4060 }
4061 }
4062 else if (eh->plt_got.offset != (bfd_vma) -1)
4063 {
4064 bfd_vma got_offset, plt_offset;
4065 asection *plt, *got;
4066 bfd_boolean got_after_plt;
4067 int32_t got_pcrel_offset;
4068
4069 /* Set the entry in the GOT procedure linkage table. */
4070 plt = htab->plt_got;
4071 got = htab->elf.sgot;
4072 got_offset = h->got.offset;
4073
4074 if (got_offset == (bfd_vma) -1
4075 || (h->type == STT_GNU_IFUNC && h->def_regular)
4076 || plt == NULL
4077 || got == NULL)
4078 abort ();
4079
4080 /* Use the non-lazy PLT entry template for the GOT PLT since they
4081 are the identical. */
4082 /* Fill in the entry in the GOT procedure linkage table. */
4083 plt_offset = eh->plt_got.offset;
4084 memcpy (plt->contents + plt_offset,
4085 htab->non_lazy_plt->plt_entry,
4086 htab->non_lazy_plt->plt_entry_size);
4087
4088 /* Put offset the PC-relative instruction referring to the GOT
4089 entry, subtracting the size of that instruction. */
4090 got_pcrel_offset = (got->output_section->vma
4091 + got->output_offset
4092 + got_offset
4093 - plt->output_section->vma
4094 - plt->output_offset
4095 - plt_offset
4096 - htab->non_lazy_plt->plt_got_insn_size);
4097
4098 /* Check PC-relative offset overflow in GOT PLT entry. */
4099 got_after_plt = got->output_section->vma > plt->output_section->vma;
4100 if ((got_after_plt && got_pcrel_offset < 0)
4101 || (!got_after_plt && got_pcrel_offset > 0))
4102 /* xgettext:c-format */
4103 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4104 output_bfd, h->root.root.string);
4105
4106 bfd_put_32 (output_bfd, got_pcrel_offset,
4107 (plt->contents + plt_offset
4108 + htab->non_lazy_plt->plt_got_offset));
4109 }
4110
4111 if (!local_undefweak
4112 && !h->def_regular
4113 && (h->plt.offset != (bfd_vma) -1
4114 || eh->plt_got.offset != (bfd_vma) -1))
4115 {
4116 /* Mark the symbol as undefined, rather than as defined in
4117 the .plt section. Leave the value if there were any
4118 relocations where pointer equality matters (this is a clue
4119 for the dynamic linker, to make function pointer
4120 comparisons work between an application and shared
4121 library), otherwise set it to zero. If a function is only
4122 called from a binary, there is no need to slow down
4123 shared libraries because of that. */
4124 sym->st_shndx = SHN_UNDEF;
4125 if (!h->pointer_equality_needed)
4126 sym->st_value = 0;
4127 }
4128
4129 /* Don't generate dynamic GOT relocation against undefined weak
4130 symbol in executable. */
4131 if (h->got.offset != (bfd_vma) -1
4132 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4133 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4134 && !local_undefweak)
4135 {
4136 Elf_Internal_Rela rela;
4137 asection *relgot = htab->elf.srelgot;
4138
4139 /* This symbol has an entry in the global offset table. Set it
4140 up. */
4141 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4142 abort ();
4143
4144 rela.r_offset = (htab->elf.sgot->output_section->vma
4145 + htab->elf.sgot->output_offset
4146 + (h->got.offset &~ (bfd_vma) 1));
4147
4148 /* If this is a static link, or it is a -Bsymbolic link and the
4149 symbol is defined locally or was forced to be local because
4150 of a version file, we just want to emit a RELATIVE reloc.
4151 The entry in the global offset table will already have been
4152 initialized in the relocate_section function. */
4153 if (h->def_regular
4154 && h->type == STT_GNU_IFUNC)
4155 {
4156 if (h->plt.offset == (bfd_vma) -1)
4157 {
4158 /* STT_GNU_IFUNC is referenced without PLT. */
4159 if (htab->elf.splt == NULL)
4160 {
4161 /* use .rel[a].iplt section to store .got relocations
4162 in static executable. */
4163 relgot = htab->elf.irelplt;
4164 }
4165 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4166 {
4167 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4168 h->root.root.string,
4169 h->root.u.def.section->owner);
4170
4171 rela.r_info = htab->r_info (0,
4172 R_X86_64_IRELATIVE);
4173 rela.r_addend = (h->root.u.def.value
4174 + h->root.u.def.section->output_section->vma
4175 + h->root.u.def.section->output_offset);
4176 }
4177 else
4178 goto do_glob_dat;
4179 }
4180 else if (bfd_link_pic (info))
4181 {
4182 /* Generate R_X86_64_GLOB_DAT. */
4183 goto do_glob_dat;
4184 }
4185 else
4186 {
4187 asection *plt;
4188 bfd_vma plt_offset;
4189
4190 if (!h->pointer_equality_needed)
4191 abort ();
4192
4193 /* For non-shared object, we can't use .got.plt, which
4194 contains the real function addres if we need pointer
4195 equality. We load the GOT entry with the PLT entry. */
4196 if (htab->plt_second != NULL)
4197 {
4198 plt = htab->plt_second;
4199 plt_offset = eh->plt_second.offset;
4200 }
4201 else
4202 {
4203 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4204 plt_offset = h->plt.offset;
4205 }
4206 bfd_put_64 (output_bfd, (plt->output_section->vma
4207 + plt->output_offset
4208 + plt_offset),
4209 htab->elf.sgot->contents + h->got.offset);
4210 return TRUE;
4211 }
4212 }
4213 else if (bfd_link_pic (info)
4214 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4215 {
4216 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4217 return FALSE;
4218 BFD_ASSERT((h->got.offset & 1) != 0);
4219 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4220 rela.r_addend = (h->root.u.def.value
4221 + h->root.u.def.section->output_section->vma
4222 + h->root.u.def.section->output_offset);
4223 }
4224 else
4225 {
4226 BFD_ASSERT((h->got.offset & 1) == 0);
4227 do_glob_dat:
4228 bfd_put_64 (output_bfd, (bfd_vma) 0,
4229 htab->elf.sgot->contents + h->got.offset);
4230 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4231 rela.r_addend = 0;
4232 }
4233
4234 elf_append_rela (output_bfd, relgot, &rela);
4235 }
4236
4237 if (h->needs_copy)
4238 {
4239 Elf_Internal_Rela rela;
4240 asection *s;
4241
4242 /* This symbol needs a copy reloc. Set it up. */
4243 VERIFY_COPY_RELOC (h, htab)
4244
4245 rela.r_offset = (h->root.u.def.value
4246 + h->root.u.def.section->output_section->vma
4247 + h->root.u.def.section->output_offset);
4248 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4249 rela.r_addend = 0;
4250 if (h->root.u.def.section == htab->elf.sdynrelro)
4251 s = htab->elf.sreldynrelro;
4252 else
4253 s = htab->elf.srelbss;
4254 elf_append_rela (output_bfd, s, &rela);
4255 }
4256
4257 return TRUE;
4258 }
4259
4260 /* Finish up local dynamic symbol handling. We set the contents of
4261 various dynamic sections here. */
4262
4263 static bfd_boolean
4264 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4265 {
4266 struct elf_link_hash_entry *h
4267 = (struct elf_link_hash_entry *) *slot;
4268 struct bfd_link_info *info
4269 = (struct bfd_link_info *) inf;
4270
4271 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4272 info, h, NULL);
4273 }
4274
4275 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4276 here since undefined weak symbol may not be dynamic and may not be
4277 called for elf_x86_64_finish_dynamic_symbol. */
4278
4279 static bfd_boolean
4280 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4281 void *inf)
4282 {
4283 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4284 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4285
4286 if (h->root.type != bfd_link_hash_undefweak
4287 || h->dynindx != -1)
4288 return TRUE;
4289
4290 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4291 info, h, NULL);
4292 }
4293
4294 /* Used to decide how to sort relocs in an optimal manner for the
4295 dynamic linker, before writing them out. */
4296
4297 static enum elf_reloc_type_class
4298 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4299 const asection *rel_sec ATTRIBUTE_UNUSED,
4300 const Elf_Internal_Rela *rela)
4301 {
4302 bfd *abfd = info->output_bfd;
4303 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4304 struct elf_x86_link_hash_table *htab
4305 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4306
4307 if (htab->elf.dynsym != NULL
4308 && htab->elf.dynsym->contents != NULL)
4309 {
4310 /* Check relocation against STT_GNU_IFUNC symbol if there are
4311 dynamic symbols. */
4312 unsigned long r_symndx = htab->r_sym (rela->r_info);
4313 if (r_symndx != STN_UNDEF)
4314 {
4315 Elf_Internal_Sym sym;
4316 if (!bed->s->swap_symbol_in (abfd,
4317 (htab->elf.dynsym->contents
4318 + r_symndx * bed->s->sizeof_sym),
4319 0, &sym))
4320 abort ();
4321
4322 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4323 return reloc_class_ifunc;
4324 }
4325 }
4326
4327 switch ((int) ELF32_R_TYPE (rela->r_info))
4328 {
4329 case R_X86_64_IRELATIVE:
4330 return reloc_class_ifunc;
4331 case R_X86_64_RELATIVE:
4332 case R_X86_64_RELATIVE64:
4333 return reloc_class_relative;
4334 case R_X86_64_JUMP_SLOT:
4335 return reloc_class_plt;
4336 case R_X86_64_COPY:
4337 return reloc_class_copy;
4338 default:
4339 return reloc_class_normal;
4340 }
4341 }
4342
4343 /* Finish up the dynamic sections. */
4344
4345 static bfd_boolean
4346 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4347 struct bfd_link_info *info)
4348 {
4349 struct elf_x86_link_hash_table *htab;
4350
4351 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4352 if (htab == NULL)
4353 return FALSE;
4354
4355 if (! htab->elf.dynamic_sections_created)
4356 return TRUE;
4357
4358 if (htab->elf.splt && htab->elf.splt->size > 0)
4359 {
4360 elf_section_data (htab->elf.splt->output_section)
4361 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4362
4363 if (htab->plt.has_plt0)
4364 {
4365 /* Fill in the special first entry in the procedure linkage
4366 table. */
4367 memcpy (htab->elf.splt->contents,
4368 htab->lazy_plt->plt0_entry,
4369 htab->lazy_plt->plt0_entry_size);
4370 /* Add offset for pushq GOT+8(%rip), since the instruction
4371 uses 6 bytes subtract this value. */
4372 bfd_put_32 (output_bfd,
4373 (htab->elf.sgotplt->output_section->vma
4374 + htab->elf.sgotplt->output_offset
4375 + 8
4376 - htab->elf.splt->output_section->vma
4377 - htab->elf.splt->output_offset
4378 - 6),
4379 (htab->elf.splt->contents
4380 + htab->lazy_plt->plt0_got1_offset));
4381 /* Add offset for the PC-relative instruction accessing
4382 GOT+16, subtracting the offset to the end of that
4383 instruction. */
4384 bfd_put_32 (output_bfd,
4385 (htab->elf.sgotplt->output_section->vma
4386 + htab->elf.sgotplt->output_offset
4387 + 16
4388 - htab->elf.splt->output_section->vma
4389 - htab->elf.splt->output_offset
4390 - htab->lazy_plt->plt0_got2_insn_end),
4391 (htab->elf.splt->contents
4392 + htab->lazy_plt->plt0_got2_offset));
4393 }
4394
4395 if (htab->tlsdesc_plt)
4396 {
4397 bfd_put_64 (output_bfd, (bfd_vma) 0,
4398 htab->elf.sgot->contents + htab->tlsdesc_got);
4399
4400 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4401 htab->lazy_plt->plt0_entry,
4402 htab->lazy_plt->plt0_entry_size);
4403
4404 /* Add offset for pushq GOT+8(%rip), since the
4405 instruction uses 6 bytes subtract this value. */
4406 bfd_put_32 (output_bfd,
4407 (htab->elf.sgotplt->output_section->vma
4408 + htab->elf.sgotplt->output_offset
4409 + 8
4410 - htab->elf.splt->output_section->vma
4411 - htab->elf.splt->output_offset
4412 - htab->tlsdesc_plt
4413 - 6),
4414 (htab->elf.splt->contents
4415 + htab->tlsdesc_plt
4416 + htab->lazy_plt->plt0_got1_offset));
4417 /* Add offset for the PC-relative instruction accessing
4418 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4419 subtracting the offset to the end of that
4420 instruction. */
4421 bfd_put_32 (output_bfd,
4422 (htab->elf.sgot->output_section->vma
4423 + htab->elf.sgot->output_offset
4424 + htab->tlsdesc_got
4425 - htab->elf.splt->output_section->vma
4426 - htab->elf.splt->output_offset
4427 - htab->tlsdesc_plt
4428 - htab->lazy_plt->plt0_got2_insn_end),
4429 (htab->elf.splt->contents
4430 + htab->tlsdesc_plt
4431 + htab->lazy_plt->plt0_got2_offset));
4432 }
4433 }
4434
4435 /* Fill PLT entries for undefined weak symbols in PIE. */
4436 if (bfd_link_pie (info))
4437 bfd_hash_traverse (&info->hash->table,
4438 elf_x86_64_pie_finish_undefweak_symbol,
4439 info);
4440
4441 return TRUE;
4442 }
4443
4444 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4445 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4446 It has to be done before elf_link_sort_relocs is called so that
4447 dynamic relocations are properly sorted. */
4448
4449 static bfd_boolean
4450 elf_x86_64_output_arch_local_syms
4451 (bfd *output_bfd ATTRIBUTE_UNUSED,
4452 struct bfd_link_info *info,
4453 void *flaginfo ATTRIBUTE_UNUSED,
4454 int (*func) (void *, const char *,
4455 Elf_Internal_Sym *,
4456 asection *,
4457 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4458 {
4459 struct elf_x86_link_hash_table *htab
4460 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4461 if (htab == NULL)
4462 return FALSE;
4463
4464 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4465 htab_traverse (htab->loc_hash_table,
4466 elf_x86_64_finish_local_dynamic_symbol,
4467 info);
4468
4469 return TRUE;
4470 }
4471
4472 /* Forward declaration. */
4473 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4474
4475 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4476 dynamic relocations. */
4477
4478 static long
4479 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4480 long symcount ATTRIBUTE_UNUSED,
4481 asymbol **syms ATTRIBUTE_UNUSED,
4482 long dynsymcount,
4483 asymbol **dynsyms,
4484 asymbol **ret)
4485 {
4486 long count, i, n;
4487 int j;
4488 bfd_byte *plt_contents;
4489 long relsize;
4490 const struct elf_x86_lazy_plt_layout *lazy_plt;
4491 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4492 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4493 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4494 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4495 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4496 asection *plt;
4497 enum elf_x86_plt_type plt_type;
4498 struct elf_x86_plt plts[] =
4499 {
4500 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4501 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4502 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4503 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4504 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4505 };
4506
4507 *ret = NULL;
4508
4509 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4510 return 0;
4511
4512 if (dynsymcount <= 0)
4513 return 0;
4514
4515 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4516 if (relsize <= 0)
4517 return -1;
4518
4519 if (get_elf_x86_backend_data (abfd)->target_os == is_normal)
4520 {
4521 lazy_plt = &elf_x86_64_lazy_plt;
4522 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4523 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4524 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4525 if (ABI_64_P (abfd))
4526 {
4527 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4528 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4529 }
4530 else
4531 {
4532 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4533 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4534 }
4535 }
4536 else
4537 {
4538 lazy_plt = &elf_x86_64_nacl_plt;
4539 non_lazy_plt = NULL;
4540 lazy_bnd_plt = NULL;
4541 non_lazy_bnd_plt = NULL;
4542 lazy_ibt_plt = NULL;
4543 non_lazy_ibt_plt = NULL;
4544 }
4545
4546 count = 0;
4547 for (j = 0; plts[j].name != NULL; j++)
4548 {
4549 plt = bfd_get_section_by_name (abfd, plts[j].name);
4550 if (plt == NULL || plt->size == 0)
4551 continue;
4552
4553 /* Get the PLT section contents. */
4554 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4555 if (plt_contents == NULL)
4556 break;
4557 if (!bfd_get_section_contents (abfd, (asection *) plt,
4558 plt_contents, 0, plt->size))
4559 {
4560 free (plt_contents);
4561 break;
4562 }
4563
4564 /* Check what kind of PLT it is. */
4565 plt_type = plt_unknown;
4566 if (plts[j].type == plt_unknown
4567 && (plt->size >= (lazy_plt->plt_entry_size
4568 + lazy_plt->plt_entry_size)))
4569 {
4570 /* Match lazy PLT first. Need to check the first two
4571 instructions. */
4572 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4573 lazy_plt->plt0_got1_offset) == 0)
4574 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4575 2) == 0))
4576 plt_type = plt_lazy;
4577 else if (lazy_bnd_plt != NULL
4578 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4579 lazy_bnd_plt->plt0_got1_offset) == 0)
4580 && (memcmp (plt_contents + 6,
4581 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4582 {
4583 plt_type = plt_lazy | plt_second;
4584 /* The fist entry in the lazy IBT PLT is the same as the
4585 lazy BND PLT. */
4586 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4587 lazy_ibt_plt->plt_entry,
4588 lazy_ibt_plt->plt_got_offset) == 0))
4589 lazy_plt = lazy_ibt_plt;
4590 else
4591 lazy_plt = lazy_bnd_plt;
4592 }
4593 }
4594
4595 if (non_lazy_plt != NULL
4596 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4597 && plt->size >= non_lazy_plt->plt_entry_size)
4598 {
4599 /* Match non-lazy PLT. */
4600 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4601 non_lazy_plt->plt_got_offset) == 0)
4602 plt_type = plt_non_lazy;
4603 }
4604
4605 if (plt_type == plt_unknown || plt_type == plt_second)
4606 {
4607 if (non_lazy_bnd_plt != NULL
4608 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4609 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4610 non_lazy_bnd_plt->plt_got_offset) == 0))
4611 {
4612 /* Match BND PLT. */
4613 plt_type = plt_second;
4614 non_lazy_plt = non_lazy_bnd_plt;
4615 }
4616 else if (non_lazy_ibt_plt != NULL
4617 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4618 && (memcmp (plt_contents,
4619 non_lazy_ibt_plt->plt_entry,
4620 non_lazy_ibt_plt->plt_got_offset) == 0))
4621 {
4622 /* Match IBT PLT. */
4623 plt_type = plt_second;
4624 non_lazy_plt = non_lazy_ibt_plt;
4625 }
4626 }
4627
4628 if (plt_type == plt_unknown)
4629 {
4630 free (plt_contents);
4631 continue;
4632 }
4633
4634 plts[j].sec = plt;
4635 plts[j].type = plt_type;
4636
4637 if ((plt_type & plt_lazy))
4638 {
4639 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4640 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4641 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4642 /* Skip PLT0 in lazy PLT. */
4643 i = 1;
4644 }
4645 else
4646 {
4647 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4648 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4649 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4650 i = 0;
4651 }
4652
4653 /* Skip lazy PLT when the second PLT is used. */
4654 if (plt_type == (plt_lazy | plt_second))
4655 plts[j].count = 0;
4656 else
4657 {
4658 n = plt->size / plts[j].plt_entry_size;
4659 plts[j].count = n;
4660 count += n - i;
4661 }
4662
4663 plts[j].contents = plt_contents;
4664 }
4665
4666 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4667 (bfd_vma) 0, plts, dynsyms,
4668 ret);
4669 }
4670
4671 /* Handle an x86-64 specific section when reading an object file. This
4672 is called when elfcode.h finds a section with an unknown type. */
4673
4674 static bfd_boolean
4675 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4676 const char *name, int shindex)
4677 {
4678 if (hdr->sh_type != SHT_X86_64_UNWIND)
4679 return FALSE;
4680
4681 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4682 return FALSE;
4683
4684 return TRUE;
4685 }
4686
4687 /* Hook called by the linker routine which adds symbols from an object
4688 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4689 of .bss. */
4690
4691 static bfd_boolean
4692 elf_x86_64_add_symbol_hook (bfd *abfd,
4693 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4694 Elf_Internal_Sym *sym,
4695 const char **namep ATTRIBUTE_UNUSED,
4696 flagword *flagsp ATTRIBUTE_UNUSED,
4697 asection **secp,
4698 bfd_vma *valp)
4699 {
4700 asection *lcomm;
4701
4702 switch (sym->st_shndx)
4703 {
4704 case SHN_X86_64_LCOMMON:
4705 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4706 if (lcomm == NULL)
4707 {
4708 lcomm = bfd_make_section_with_flags (abfd,
4709 "LARGE_COMMON",
4710 (SEC_ALLOC
4711 | SEC_IS_COMMON
4712 | SEC_LINKER_CREATED));
4713 if (lcomm == NULL)
4714 return FALSE;
4715 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4716 }
4717 *secp = lcomm;
4718 *valp = sym->st_size;
4719 return TRUE;
4720 }
4721
4722 return TRUE;
4723 }
4724
4725
4726 /* Given a BFD section, try to locate the corresponding ELF section
4727 index. */
4728
4729 static bfd_boolean
4730 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4731 asection *sec, int *index_return)
4732 {
4733 if (sec == &_bfd_elf_large_com_section)
4734 {
4735 *index_return = SHN_X86_64_LCOMMON;
4736 return TRUE;
4737 }
4738 return FALSE;
4739 }
4740
4741 /* Process a symbol. */
4742
4743 static void
4744 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4745 asymbol *asym)
4746 {
4747 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4748
4749 switch (elfsym->internal_elf_sym.st_shndx)
4750 {
4751 case SHN_X86_64_LCOMMON:
4752 asym->section = &_bfd_elf_large_com_section;
4753 asym->value = elfsym->internal_elf_sym.st_size;
4754 /* Common symbol doesn't set BSF_GLOBAL. */
4755 asym->flags &= ~BSF_GLOBAL;
4756 break;
4757 }
4758 }
4759
4760 static bfd_boolean
4761 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4762 {
4763 return (sym->st_shndx == SHN_COMMON
4764 || sym->st_shndx == SHN_X86_64_LCOMMON);
4765 }
4766
4767 static unsigned int
4768 elf_x86_64_common_section_index (asection *sec)
4769 {
4770 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4771 return SHN_COMMON;
4772 else
4773 return SHN_X86_64_LCOMMON;
4774 }
4775
4776 static asection *
4777 elf_x86_64_common_section (asection *sec)
4778 {
4779 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4780 return bfd_com_section_ptr;
4781 else
4782 return &_bfd_elf_large_com_section;
4783 }
4784
4785 static bfd_boolean
4786 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4787 const Elf_Internal_Sym *sym,
4788 asection **psec,
4789 bfd_boolean newdef,
4790 bfd_boolean olddef,
4791 bfd *oldbfd,
4792 const asection *oldsec)
4793 {
4794 /* A normal common symbol and a large common symbol result in a
4795 normal common symbol. We turn the large common symbol into a
4796 normal one. */
4797 if (!olddef
4798 && h->root.type == bfd_link_hash_common
4799 && !newdef
4800 && bfd_is_com_section (*psec)
4801 && oldsec != *psec)
4802 {
4803 if (sym->st_shndx == SHN_COMMON
4804 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4805 {
4806 h->root.u.c.p->section
4807 = bfd_make_section_old_way (oldbfd, "COMMON");
4808 h->root.u.c.p->section->flags = SEC_ALLOC;
4809 }
4810 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4811 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4812 *psec = bfd_com_section_ptr;
4813 }
4814
4815 return TRUE;
4816 }
4817
4818 static int
4819 elf_x86_64_additional_program_headers (bfd *abfd,
4820 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4821 {
4822 asection *s;
4823 int count = 0;
4824
4825 /* Check to see if we need a large readonly segment. */
4826 s = bfd_get_section_by_name (abfd, ".lrodata");
4827 if (s && (s->flags & SEC_LOAD))
4828 count++;
4829
4830 /* Check to see if we need a large data segment. Since .lbss sections
4831 is placed right after the .bss section, there should be no need for
4832 a large data segment just because of .lbss. */
4833 s = bfd_get_section_by_name (abfd, ".ldata");
4834 if (s && (s->flags & SEC_LOAD))
4835 count++;
4836
4837 return count;
4838 }
4839
4840 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4841
4842 static bfd_boolean
4843 elf_x86_64_relocs_compatible (const bfd_target *input,
4844 const bfd_target *output)
4845 {
4846 return ((xvec_get_elf_backend_data (input)->s->elfclass
4847 == xvec_get_elf_backend_data (output)->s->elfclass)
4848 && _bfd_elf_relocs_compatible (input, output));
4849 }
4850
4851 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4852 with GNU properties if found. Otherwise, return NULL. */
4853
4854 static bfd *
4855 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4856 {
4857 struct elf_x86_init_table init_table;
4858
4859 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4860 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4861 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4862 != (int) R_X86_64_GNU_VTINHERIT)
4863 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4864 != (int) R_X86_64_GNU_VTENTRY))
4865 abort ();
4866
4867 /* This is unused for x86-64. */
4868 init_table.plt0_pad_byte = 0x90;
4869
4870 if (get_elf_x86_backend_data (info->output_bfd)->target_os
4871 == is_normal)
4872 {
4873 if (info->bndplt)
4874 {
4875 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4876 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4877 }
4878 else
4879 {
4880 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4881 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4882 }
4883
4884 if (ABI_64_P (info->output_bfd))
4885 {
4886 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4887 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4888 }
4889 else
4890 {
4891 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4892 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4893 }
4894 }
4895 else
4896 {
4897 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4898 init_table.non_lazy_plt = NULL;
4899 init_table.lazy_ibt_plt = NULL;
4900 init_table.non_lazy_ibt_plt = NULL;
4901 }
4902
4903 if (ABI_64_P (info->output_bfd))
4904 {
4905 init_table.r_info = elf64_r_info;
4906 init_table.r_sym = elf64_r_sym;
4907 }
4908 else
4909 {
4910 init_table.r_info = elf32_r_info;
4911 init_table.r_sym = elf32_r_sym;
4912 }
4913
4914 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4915 }
4916
4917 static const struct bfd_elf_special_section
4918 elf_x86_64_special_sections[]=
4919 {
4920 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4921 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4922 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4923 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4924 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4925 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4926 { NULL, 0, 0, 0, 0 }
4927 };
4928
4929 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4930 #define TARGET_LITTLE_NAME "elf64-x86-64"
4931 #define ELF_ARCH bfd_arch_i386
4932 #define ELF_TARGET_ID X86_64_ELF_DATA
4933 #define ELF_MACHINE_CODE EM_X86_64
4934 #define ELF_MAXPAGESIZE 0x200000
4935 #define ELF_MINPAGESIZE 0x1000
4936 #define ELF_COMMONPAGESIZE 0x1000
4937
4938 #define elf_backend_can_gc_sections 1
4939 #define elf_backend_can_refcount 1
4940 #define elf_backend_want_got_plt 1
4941 #define elf_backend_plt_readonly 1
4942 #define elf_backend_want_plt_sym 0
4943 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4944 #define elf_backend_rela_normal 1
4945 #define elf_backend_plt_alignment 4
4946 #define elf_backend_extern_protected_data 1
4947 #define elf_backend_caches_rawsize 1
4948 #define elf_backend_dtrel_excludes_plt 1
4949 #define elf_backend_want_dynrelro 1
4950
4951 #define elf_info_to_howto elf_x86_64_info_to_howto
4952
4953 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4954 #define bfd_elf64_bfd_reloc_name_lookup \
4955 elf_x86_64_reloc_name_lookup
4956
4957 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4958 #define elf_backend_check_relocs elf_x86_64_check_relocs
4959 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4960 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4961 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4962 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4963 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4964 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4965 #ifdef CORE_HEADER
4966 #define elf_backend_write_core_note elf_x86_64_write_core_note
4967 #endif
4968 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4969 #define elf_backend_relocate_section elf_x86_64_relocate_section
4970 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4971 #define elf_backend_object_p elf64_x86_64_elf_object_p
4972 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4973
4974 #define elf_backend_section_from_shdr \
4975 elf_x86_64_section_from_shdr
4976
4977 #define elf_backend_section_from_bfd_section \
4978 elf_x86_64_elf_section_from_bfd_section
4979 #define elf_backend_add_symbol_hook \
4980 elf_x86_64_add_symbol_hook
4981 #define elf_backend_symbol_processing \
4982 elf_x86_64_symbol_processing
4983 #define elf_backend_common_section_index \
4984 elf_x86_64_common_section_index
4985 #define elf_backend_common_section \
4986 elf_x86_64_common_section
4987 #define elf_backend_common_definition \
4988 elf_x86_64_common_definition
4989 #define elf_backend_merge_symbol \
4990 elf_x86_64_merge_symbol
4991 #define elf_backend_special_sections \
4992 elf_x86_64_special_sections
4993 #define elf_backend_additional_program_headers \
4994 elf_x86_64_additional_program_headers
4995 #define elf_backend_setup_gnu_properties \
4996 elf_x86_64_link_setup_gnu_properties
4997 #define elf_backend_hide_symbol \
4998 _bfd_x86_elf_hide_symbol
4999
5000 #include "elf64-target.h"
5001
5002 /* CloudABI support. */
5003
5004 #undef TARGET_LITTLE_SYM
5005 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5006 #undef TARGET_LITTLE_NAME
5007 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5008
5009 #undef ELF_OSABI
5010 #define ELF_OSABI ELFOSABI_CLOUDABI
5011
5012 #undef elf64_bed
5013 #define elf64_bed elf64_x86_64_cloudabi_bed
5014
5015 #include "elf64-target.h"
5016
5017 /* FreeBSD support. */
5018
5019 #undef TARGET_LITTLE_SYM
5020 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5021 #undef TARGET_LITTLE_NAME
5022 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5023
5024 #undef ELF_OSABI
5025 #define ELF_OSABI ELFOSABI_FREEBSD
5026
5027 #undef elf64_bed
5028 #define elf64_bed elf64_x86_64_fbsd_bed
5029
5030 #include "elf64-target.h"
5031
5032 /* Solaris 2 support. */
5033
5034 #undef TARGET_LITTLE_SYM
5035 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5036 #undef TARGET_LITTLE_NAME
5037 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5038
5039 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5040 objects won't be recognized. */
5041 #undef ELF_OSABI
5042
5043 #undef elf64_bed
5044 #define elf64_bed elf64_x86_64_sol2_bed
5045
5046 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5047 boundary. */
5048 #undef elf_backend_static_tls_alignment
5049 #define elf_backend_static_tls_alignment 16
5050
5051 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5052
5053 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5054 File, p.63. */
5055 #undef elf_backend_want_plt_sym
5056 #define elf_backend_want_plt_sym 1
5057
5058 #undef elf_backend_strtab_flags
5059 #define elf_backend_strtab_flags SHF_STRINGS
5060
5061 static bfd_boolean
5062 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5063 bfd *obfd ATTRIBUTE_UNUSED,
5064 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5065 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5066 {
5067 /* PR 19938: FIXME: Need to add code for setting the sh_info
5068 and sh_link fields of Solaris specific section types. */
5069 return FALSE;
5070 }
5071
5072 #undef elf_backend_copy_special_section_fields
5073 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5074
5075 #include "elf64-target.h"
5076
5077 /* Native Client support. */
5078
5079 static bfd_boolean
5080 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5081 {
5082 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5083 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5084 return TRUE;
5085 }
5086
5087 #undef TARGET_LITTLE_SYM
5088 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5089 #undef TARGET_LITTLE_NAME
5090 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5091 #undef elf64_bed
5092 #define elf64_bed elf64_x86_64_nacl_bed
5093
5094 #undef ELF_MAXPAGESIZE
5095 #undef ELF_MINPAGESIZE
5096 #undef ELF_COMMONPAGESIZE
5097 #define ELF_MAXPAGESIZE 0x10000
5098 #define ELF_MINPAGESIZE 0x10000
5099 #define ELF_COMMONPAGESIZE 0x10000
5100
5101 /* Restore defaults. */
5102 #undef ELF_OSABI
5103 #undef elf_backend_static_tls_alignment
5104 #undef elf_backend_want_plt_sym
5105 #define elf_backend_want_plt_sym 0
5106 #undef elf_backend_strtab_flags
5107 #undef elf_backend_copy_special_section_fields
5108
5109 /* NaCl uses substantially different PLT entries for the same effects. */
5110
5111 #undef elf_backend_plt_alignment
5112 #define elf_backend_plt_alignment 5
5113 #define NACL_PLT_ENTRY_SIZE 64
5114 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5115
5116 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5117 {
5118 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5119 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5120 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5121 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5122 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5123
5124 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5125 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5126
5127 /* 32 bytes of nop to pad out to the standard size. */
5128 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5129 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5130 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5131 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5132 0x66, /* excess data16 prefix */
5133 0x90 /* nop */
5134 };
5135
5136 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5137 {
5138 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5139 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5140 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5141 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5142
5143 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5144 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5145 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5146
5147 /* Lazy GOT entries point here (32-byte aligned). */
5148 0x68, /* pushq immediate */
5149 0, 0, 0, 0, /* replaced with index into relocation table. */
5150 0xe9, /* jmp relative */
5151 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5152
5153 /* 22 bytes of nop to pad out to the standard size. */
5154 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5155 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5156 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5157 };
5158
5159 /* .eh_frame covering the .plt section. */
5160
5161 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5162 {
5163 #if (PLT_CIE_LENGTH != 20 \
5164 || PLT_FDE_LENGTH != 36 \
5165 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5166 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5167 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5168 #endif
5169 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5170 0, 0, 0, 0, /* CIE ID */
5171 1, /* CIE version */
5172 'z', 'R', 0, /* Augmentation string */
5173 1, /* Code alignment factor */
5174 0x78, /* Data alignment factor */
5175 16, /* Return address column */
5176 1, /* Augmentation size */
5177 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5178 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5179 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5180 DW_CFA_nop, DW_CFA_nop,
5181
5182 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5183 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5184 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5185 0, 0, 0, 0, /* .plt size goes here */
5186 0, /* Augmentation size */
5187 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5188 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5189 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5190 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5191 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5192 13, /* Block length */
5193 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5194 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5195 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5196 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5197 DW_CFA_nop, DW_CFA_nop
5198 };
5199
5200 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5201 {
5202 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5203 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5204 elf_x86_64_nacl_plt_entry, /* plt_entry */
5205 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5206 2, /* plt0_got1_offset */
5207 9, /* plt0_got2_offset */
5208 13, /* plt0_got2_insn_end */
5209 3, /* plt_got_offset */
5210 33, /* plt_reloc_offset */
5211 38, /* plt_plt_offset */
5212 7, /* plt_got_insn_size */
5213 42, /* plt_plt_insn_end */
5214 32, /* plt_lazy_offset */
5215 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5216 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5217 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5218 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5219 };
5220
5221 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5222 {
5223 is_nacl /* os */
5224 };
5225
5226 #undef elf_backend_arch_data
5227 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5228
5229 #undef elf_backend_object_p
5230 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5231 #undef elf_backend_modify_segment_map
5232 #define elf_backend_modify_segment_map nacl_modify_segment_map
5233 #undef elf_backend_modify_program_headers
5234 #define elf_backend_modify_program_headers nacl_modify_program_headers
5235 #undef elf_backend_final_write_processing
5236 #define elf_backend_final_write_processing nacl_final_write_processing
5237
5238 #include "elf64-target.h"
5239
5240 /* Native Client x32 support. */
5241
5242 static bfd_boolean
5243 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5244 {
5245 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5246 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5247 return TRUE;
5248 }
5249
5250 #undef TARGET_LITTLE_SYM
5251 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5252 #undef TARGET_LITTLE_NAME
5253 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5254 #undef elf32_bed
5255 #define elf32_bed elf32_x86_64_nacl_bed
5256
5257 #define bfd_elf32_bfd_reloc_type_lookup \
5258 elf_x86_64_reloc_type_lookup
5259 #define bfd_elf32_bfd_reloc_name_lookup \
5260 elf_x86_64_reloc_name_lookup
5261 #define bfd_elf32_get_synthetic_symtab \
5262 elf_x86_64_get_synthetic_symtab
5263
5264 #undef elf_backend_object_p
5265 #define elf_backend_object_p \
5266 elf32_x86_64_nacl_elf_object_p
5267
5268 #undef elf_backend_bfd_from_remote_memory
5269 #define elf_backend_bfd_from_remote_memory \
5270 _bfd_elf32_bfd_from_remote_memory
5271
5272 #undef elf_backend_size_info
5273 #define elf_backend_size_info \
5274 _bfd_elf32_size_info
5275
5276 #include "elf32-target.h"
5277
5278 /* Restore defaults. */
5279 #undef elf_backend_object_p
5280 #define elf_backend_object_p elf64_x86_64_elf_object_p
5281 #undef elf_backend_bfd_from_remote_memory
5282 #undef elf_backend_size_info
5283 #undef elf_backend_modify_segment_map
5284 #undef elf_backend_modify_program_headers
5285 #undef elf_backend_final_write_processing
5286
5287 /* Intel L1OM support. */
5288
5289 static bfd_boolean
5290 elf64_l1om_elf_object_p (bfd *abfd)
5291 {
5292 /* Set the right machine number for an L1OM elf64 file. */
5293 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5294 return TRUE;
5295 }
5296
5297 #undef TARGET_LITTLE_SYM
5298 #define TARGET_LITTLE_SYM l1om_elf64_vec
5299 #undef TARGET_LITTLE_NAME
5300 #define TARGET_LITTLE_NAME "elf64-l1om"
5301 #undef ELF_ARCH
5302 #define ELF_ARCH bfd_arch_l1om
5303
5304 #undef ELF_MACHINE_CODE
5305 #define ELF_MACHINE_CODE EM_L1OM
5306
5307 #undef ELF_OSABI
5308
5309 #undef elf64_bed
5310 #define elf64_bed elf64_l1om_bed
5311
5312 #undef elf_backend_object_p
5313 #define elf_backend_object_p elf64_l1om_elf_object_p
5314
5315 /* Restore defaults. */
5316 #undef ELF_MAXPAGESIZE
5317 #undef ELF_MINPAGESIZE
5318 #undef ELF_COMMONPAGESIZE
5319 #define ELF_MAXPAGESIZE 0x200000
5320 #define ELF_MINPAGESIZE 0x1000
5321 #define ELF_COMMONPAGESIZE 0x1000
5322 #undef elf_backend_plt_alignment
5323 #define elf_backend_plt_alignment 4
5324 #undef elf_backend_arch_data
5325 #define elf_backend_arch_data &elf_x86_64_arch_bed
5326
5327 #include "elf64-target.h"
5328
5329 /* FreeBSD L1OM support. */
5330
5331 #undef TARGET_LITTLE_SYM
5332 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5333 #undef TARGET_LITTLE_NAME
5334 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5335
5336 #undef ELF_OSABI
5337 #define ELF_OSABI ELFOSABI_FREEBSD
5338
5339 #undef elf64_bed
5340 #define elf64_bed elf64_l1om_fbsd_bed
5341
5342 #include "elf64-target.h"
5343
5344 /* Intel K1OM support. */
5345
5346 static bfd_boolean
5347 elf64_k1om_elf_object_p (bfd *abfd)
5348 {
5349 /* Set the right machine number for an K1OM elf64 file. */
5350 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5351 return TRUE;
5352 }
5353
5354 #undef TARGET_LITTLE_SYM
5355 #define TARGET_LITTLE_SYM k1om_elf64_vec
5356 #undef TARGET_LITTLE_NAME
5357 #define TARGET_LITTLE_NAME "elf64-k1om"
5358 #undef ELF_ARCH
5359 #define ELF_ARCH bfd_arch_k1om
5360
5361 #undef ELF_MACHINE_CODE
5362 #define ELF_MACHINE_CODE EM_K1OM
5363
5364 #undef ELF_OSABI
5365
5366 #undef elf64_bed
5367 #define elf64_bed elf64_k1om_bed
5368
5369 #undef elf_backend_object_p
5370 #define elf_backend_object_p elf64_k1om_elf_object_p
5371
5372 #undef elf_backend_static_tls_alignment
5373
5374 #undef elf_backend_want_plt_sym
5375 #define elf_backend_want_plt_sym 0
5376
5377 #include "elf64-target.h"
5378
5379 /* FreeBSD K1OM support. */
5380
5381 #undef TARGET_LITTLE_SYM
5382 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5383 #undef TARGET_LITTLE_NAME
5384 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5385
5386 #undef ELF_OSABI
5387 #define ELF_OSABI ELFOSABI_FREEBSD
5388
5389 #undef elf64_bed
5390 #define elf64_bed elf64_k1om_fbsd_bed
5391
5392 #include "elf64-target.h"
5393
5394 /* 32bit x86-64 support. */
5395
5396 #undef TARGET_LITTLE_SYM
5397 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5398 #undef TARGET_LITTLE_NAME
5399 #define TARGET_LITTLE_NAME "elf32-x86-64"
5400 #undef elf32_bed
5401
5402 #undef ELF_ARCH
5403 #define ELF_ARCH bfd_arch_i386
5404
5405 #undef ELF_MACHINE_CODE
5406 #define ELF_MACHINE_CODE EM_X86_64
5407
5408 #undef ELF_OSABI
5409
5410 #undef elf_backend_object_p
5411 #define elf_backend_object_p \
5412 elf32_x86_64_elf_object_p
5413
5414 #undef elf_backend_bfd_from_remote_memory
5415 #define elf_backend_bfd_from_remote_memory \
5416 _bfd_elf32_bfd_from_remote_memory
5417
5418 #undef elf_backend_size_info
5419 #define elf_backend_size_info \
5420 _bfd_elf32_size_info
5421
5422 #include "elf32-target.h"
This page took 0.225928 seconds and 5 git commands to generate.