BFD whitespace fixes
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
825 {
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
843 };
844
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
846 {
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
854 };
855
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
857 {
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
878 {
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
886 };
887
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
889 {
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
928 };
929
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
931 {
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
939 };
940
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
942 {
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
953 {
954 is_normal /* os */
955 };
956
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
958
959 static bfd_boolean
960 elf64_x86_64_elf_object_p (bfd *abfd)
961 {
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
964 return TRUE;
965 }
966
967 static bfd_boolean
968 elf32_x86_64_elf_object_p (bfd *abfd)
969 {
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
972 return TRUE;
973 }
974
975 /* Return TRUE if the TLS access code sequence support transition
976 from R_TYPE. */
977
978 static bfd_boolean
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
981 asection *sec,
982 bfd_byte *contents,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
985 unsigned int r_type,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
988 {
989 unsigned int val;
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
993 bfd_vma offset;
994 struct elf_x86_link_hash_table *htab;
995 bfd_byte *call;
996 bfd_boolean indirect_call;
997
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1000 switch (r_type)
1001 {
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1005 return FALSE;
1006
1007 if (r_type == R_X86_64_TLSGD)
1008 {
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1012 or
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .byte 0x66; rex64
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1021 or
1022 leaq foo@tlsgd(%rip), %rdi
1023 .byte 0x66; rex64
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1028 we also support:
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1031 addq $r15, %rax
1032 call *%rax
1033 or
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1036 addq $rbx, %rax
1037 call *%rax */
1038
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1040
1041 if ((offset + 12) > sec->size)
1042 return FALSE;
1043
1044 call = contents + offset + 4;
1045 if (call[0] != 0x66
1046 || !((call[1] == 0x48
1047 && call[2] == 0xff
1048 && call[3] == 0x15)
1049 || (call[1] == 0x48
1050 && call[2] == 0x67
1051 && call[3] == 0xe8)
1052 || (call[1] == 0x66
1053 && call[2] == 0x48
1054 && call[3] == 0xe8)))
1055 {
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1058 || offset < 3
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1061 || call[11] != 0x01
1062 || call[13] != 0xff
1063 || call[14] != 0xd0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1066 return FALSE;
1067 largepic = TRUE;
1068 }
1069 else if (ABI_64_P (abfd))
1070 {
1071 if (offset < 4
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1073 return FALSE;
1074 }
1075 else
1076 {
1077 if (offset < 3
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1079 return FALSE;
1080 }
1081 indirect_call = call[2] == 0xff;
1082 }
1083 else
1084 {
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1088 or
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1094 we also support:
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1097 addq $r15, %rax
1098 call *%rax
1099 or
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1102 addq $rbx, %rax
1103 call *%rax */
1104
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1106
1107 if (offset < 3 || (offset + 9) > sec->size)
1108 return FALSE;
1109
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1111 return FALSE;
1112
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1117 {
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1121 || call[11] != 0x01
1122 || call[13] != 0xff
1123 || call[14] != 0xd0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1126 return FALSE;
1127 largepic = TRUE;
1128 }
1129 indirect_call = call[0] == 0xff;
1130 }
1131
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1134 return FALSE;
1135
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1137 if (h == NULL
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1139 return FALSE;
1140 else
1141 {
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1144 if (largepic)
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1148 else
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1150 }
1151
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1156 */
1157
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1160 {
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1163 {
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1166 return FALSE;
1167 }
1168 }
1169 else
1170 {
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 if (offset < 2 || (offset + 3) > sec->size)
1175 return FALSE;
1176 }
1177
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1180 return FALSE;
1181
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1184
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1188
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1191 going to be rax. */
1192
1193 if (offset < 3 || (offset + 4) > sec->size)
1194 return FALSE;
1195
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1198 return FALSE;
1199
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1205
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1209 */
1210 if (offset + 2 <= sec->size)
1211 {
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1215 }
1216
1217 return FALSE;
1218
1219 default:
1220 abort ();
1221 }
1222 }
1223
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1226
1227 static bfd_boolean
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1238 {
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1242
1243 /* Skip TLS transition for functions. */
1244 if (h != NULL
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1247 return TRUE;
1248
1249 switch (from_type)
1250 {
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1256 {
1257 if (h == NULL)
1258 to_type = R_X86_64_TPOFF32;
1259 else
1260 to_type = R_X86_64_GOTTPOFF;
1261 }
1262
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1266 {
1267 unsigned int new_to_type = to_type;
1268
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1271
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1275 {
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1278 }
1279
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1285 }
1286
1287 break;
1288
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1292 break;
1293
1294 default:
1295 return TRUE;
1296 }
1297
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1300 return TRUE;
1301
1302 /* Check if the transition can be performed. */
1303 if (check
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1307 {
1308 reloc_howto_type *from, *to;
1309 const char *name;
1310
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1313
1314 if (h)
1315 name = h->root.root.string;
1316 else
1317 {
1318 struct elf_x86_link_hash_table *htab;
1319
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1321 if (htab == NULL)
1322 name = "*unknown*";
1323 else
1324 {
1325 Elf_Internal_Sym *isym;
1326
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1328 abfd, r_symndx);
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1330 }
1331 }
1332
1333 _bfd_error_handler
1334 /* xgettext:c-format */
1335 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%A' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1339 return FALSE;
1340 }
1341
1342 *r_type = to_type;
1343 return TRUE;
1344 }
1345
1346 /* Rename some of the generic section flags to better document how they
1347 are used here. */
1348 #define check_relocs_failed sec_flg0
1349
1350 static bfd_boolean
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1357 {
1358 const char *v = "";
1359 const char *und = "";
1360 const char *pic = "";
1361 const char *object;
1362
1363 const char *name;
1364 if (h)
1365 {
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1368 {
1369 case STV_HIDDEN:
1370 v = _("hidden symbol ");
1371 break;
1372 case STV_INTERNAL:
1373 v = _("internal symbol ");
1374 break;
1375 case STV_PROTECTED:
1376 v = _("protected symbol ");
1377 break;
1378 default:
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1381 else
1382 v = _("symbol ");
1383 pic = _("; recompile with -fPIC");
1384 break;
1385 }
1386
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1389 }
1390 else
1391 {
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1394 }
1395
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1400 else
1401 object = _("a PDE object");
1402
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1407 object, pic);
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1410 return FALSE;
1411 }
1412
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1415 to
1416 lea foo(%rip), %reg
1417 and convert
1418 call/jmp *foo@GOTPCREL(%rip)
1419 to
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1423 to
1424 test $foo, %reg
1425 and convert
1426 binop foo@GOTPCREL(%rip), %reg
1427 to
1428 binop $foo, %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1430 instructions. */
1431
1432 static bfd_boolean
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1434 bfd_byte *contents,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1440 {
1441 struct elf_x86_link_hash_table *htab;
1442 bfd_boolean is_pic;
1443 bfd_boolean no_overflow;
1444 bfd_boolean relocx;
1445 bfd_boolean to_reloc_pc32;
1446 asection *tsec;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1449 unsigned int modrm;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1453
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1455 return TRUE;
1456
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1459 if (raddend != -4)
1460 return TRUE;
1461
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1464
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1467
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1470
1471 r_symndx = htab->r_sym (irel->r_info);
1472
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1474
1475 /* Convert mov to lea since it has been done for a while. */
1476 if (opcode != 0x8b)
1477 {
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1481 if (!relocx)
1482 return TRUE;
1483 }
1484
1485 /* We convert only to R_X86_64_PC32:
1486 1. Branch.
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1489 4. PIC.
1490 */
1491 to_reloc_pc32 = (opcode == 0xff
1492 || !relocx
1493 || no_overflow
1494 || is_pic);
1495
1496 /* Get the symbol referred to by the reloc. */
1497 if (h == NULL)
1498 {
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1501
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1504 return TRUE;
1505
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1512 else
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1514 }
1515 else
1516 {
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1522 R_X86_64_PC32. */
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1525
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1530 && !eh->linker_def
1531 && local_ref))
1532 {
1533 if (opcode == 0xff)
1534 {
1535 /* Skip for branch instructions since R_X86_64_PC32
1536 may overflow. */
1537 if (no_overflow)
1538 return TRUE;
1539 }
1540 else if (relocx)
1541 {
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1544 is a REX byte. */
1545 to_reloc_pc32 = FALSE;
1546 }
1547
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1551 return TRUE;
1552
1553 goto convert;
1554 }
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1558 || eh->linker_def
1559 || ((h->def_regular
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1563 && local_ref))
1564 {
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1569 SECNAME. */
1570 if (h->start_stop
1571 || eh->linker_def
1572 || (h->def_regular
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1578 {
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1580 if (no_overflow)
1581 return TRUE;
1582 goto convert;
1583 }
1584 tsec = h->root.u.def.section;
1585 }
1586 else
1587 return TRUE;
1588 }
1589
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1593 return TRUE;
1594
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1596 if (no_overflow)
1597 return TRUE;
1598
1599 convert:
1600 if (opcode == 0xff)
1601 {
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1603 unsigned int nop;
1604 unsigned int disp;
1605 bfd_vma nop_offset;
1606
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1608 R_X86_64_PC32. */
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1610 if (modrm == 0x25)
1611 {
1612 /* Convert to "jmp foo nop". */
1613 modrm = 0xe9;
1614 nop = NOP_OPCODE;
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1619 }
1620 else
1621 {
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1624
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1626 is a nop prefix. */
1627 modrm = 0xe8;
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1631 {
1632 nop = 0x67;
1633 nop_offset = irel->r_offset - 2;
1634 }
1635 else
1636 {
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1639 {
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1644 }
1645 else
1646 nop_offset = irel->r_offset - 2;
1647 }
1648 }
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1652 }
1653 else
1654 {
1655 unsigned int rex;
1656 unsigned int rex_mask = REX_R;
1657
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1660 else
1661 rex = 0;
1662
1663 if (opcode == 0x8b)
1664 {
1665 if (to_reloc_pc32)
1666 {
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1669 opcode = 0x8d;
1670 r_type = R_X86_64_PC32;
1671 }
1672 else
1673 {
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1676 opcode = 0xc7;
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1681 {
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1685 }
1686 else
1687 {
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1693 rex_mask |= REX_W;
1694 goto rewrite_modrm_rex;
1695 }
1696 }
1697 }
1698 else
1699 {
1700 /* R_X86_64_PC32 isn't supported. */
1701 if (to_reloc_pc32)
1702 return TRUE;
1703
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1705 if (opcode == 0x85)
1706 {
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1710 opcode = 0xf7;
1711 }
1712 else
1713 {
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1717 opcode = 0x81;
1718 }
1719
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1723
1724 rewrite_modrm_rex:
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1726
1727 if (rex)
1728 {
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1732 }
1733
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1735 irel->r_addend = 0;
1736 }
1737
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1739 }
1740
1741 *r_type_p = r_type;
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1744
1745 *converted = TRUE;
1746
1747 return TRUE;
1748 }
1749
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1753
1754 static bfd_boolean
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1756 asection *sec,
1757 const Elf_Internal_Rela *relocs)
1758 {
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1764 asection *sreloc;
1765 bfd_byte *contents;
1766 bfd_boolean converted;
1767
1768 if (bfd_link_relocatable (info))
1769 return TRUE;
1770
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1778 return TRUE;
1779
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1781 if (htab == NULL)
1782 {
1783 sec->check_relocs_failed = 1;
1784 return FALSE;
1785 }
1786
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1788
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1793 {
1794 sec->check_relocs_failed = 1;
1795 return FALSE;
1796 }
1797
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1800
1801 converted = FALSE;
1802
1803 sreloc = NULL;
1804
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1807 {
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1813 const char *name;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1816
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1819
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1821 {
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%B: bad symbol index: %d"),
1824 abfd, r_symndx);
1825 goto error_return;
1826 }
1827
1828 if (r_symndx < symtab_hdr->sh_info)
1829 {
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1832 abfd, r_symndx);
1833 if (isym == NULL)
1834 goto error_return;
1835
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1838 {
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1840 TRUE);
1841 if (h == NULL)
1842 goto error_return;
1843
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1846 isym, NULL);
1847 h->type = STT_GNU_IFUNC;
1848 h->def_regular = 1;
1849 h->ref_regular = 1;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1852 }
1853 else
1854 h = NULL;
1855 }
1856 else
1857 {
1858 isym = NULL;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1863 }
1864
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1867 switch (r_type)
1868 {
1869 default:
1870 break;
1871
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1874 case R_X86_64_PC64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1881 {
1882 if (h)
1883 name = h->root.root.string;
1884 else
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1886 NULL);
1887 _bfd_error_handler
1888 /* xgettext:c-format */
1889 (_("%B: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1893 goto error_return;
1894 }
1895 break;
1896 }
1897
1898 if (h != NULL)
1899 {
1900 /* It is referenced by a non-shared object. */
1901 h->ref_regular = 1;
1902
1903 if (h->type == STT_GNU_IFUNC)
1904 elf_tdata (info->output_bfd)->has_gnu_symbols
1905 |= elf_gnu_symbol_ifunc;
1906 }
1907
1908 converted_reloc = FALSE;
1909 if ((r_type == R_X86_64_GOTPCREL
1910 || r_type == R_X86_64_GOTPCRELX
1911 || r_type == R_X86_64_REX_GOTPCRELX)
1912 && (h == NULL || h->type != STT_GNU_IFUNC))
1913 {
1914 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1915 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1916 irel, h, &converted_reloc,
1917 info))
1918 goto error_return;
1919
1920 if (converted_reloc)
1921 converted = TRUE;
1922 }
1923
1924 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1925 symtab_hdr, sym_hashes,
1926 &r_type, GOT_UNKNOWN,
1927 rel, rel_end, h, r_symndx, FALSE))
1928 goto error_return;
1929
1930 eh = (struct elf_x86_link_hash_entry *) h;
1931 switch (r_type)
1932 {
1933 case R_X86_64_TLSLD:
1934 htab->tls_ld_or_ldm_got.refcount = 1;
1935 goto create_got;
1936
1937 case R_X86_64_TPOFF32:
1938 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1939 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1940 &x86_64_elf_howto_table[r_type]);
1941 if (eh != NULL)
1942 eh->zero_undefweak &= 0x2;
1943 break;
1944
1945 case R_X86_64_GOTTPOFF:
1946 if (!bfd_link_executable (info))
1947 info->flags |= DF_STATIC_TLS;
1948 /* Fall through */
1949
1950 case R_X86_64_GOT32:
1951 case R_X86_64_GOTPCREL:
1952 case R_X86_64_GOTPCRELX:
1953 case R_X86_64_REX_GOTPCRELX:
1954 case R_X86_64_TLSGD:
1955 case R_X86_64_GOT64:
1956 case R_X86_64_GOTPCREL64:
1957 case R_X86_64_GOTPLT64:
1958 case R_X86_64_GOTPC32_TLSDESC:
1959 case R_X86_64_TLSDESC_CALL:
1960 /* This symbol requires a global offset table entry. */
1961 {
1962 int tls_type, old_tls_type;
1963
1964 switch (r_type)
1965 {
1966 default: tls_type = GOT_NORMAL; break;
1967 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1968 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1969 case R_X86_64_GOTPC32_TLSDESC:
1970 case R_X86_64_TLSDESC_CALL:
1971 tls_type = GOT_TLS_GDESC; break;
1972 }
1973
1974 if (h != NULL)
1975 {
1976 h->got.refcount = 1;
1977 old_tls_type = eh->tls_type;
1978 }
1979 else
1980 {
1981 bfd_signed_vma *local_got_refcounts;
1982
1983 /* This is a global offset table entry for a local symbol. */
1984 local_got_refcounts = elf_local_got_refcounts (abfd);
1985 if (local_got_refcounts == NULL)
1986 {
1987 bfd_size_type size;
1988
1989 size = symtab_hdr->sh_info;
1990 size *= sizeof (bfd_signed_vma)
1991 + sizeof (bfd_vma) + sizeof (char);
1992 local_got_refcounts = ((bfd_signed_vma *)
1993 bfd_zalloc (abfd, size));
1994 if (local_got_refcounts == NULL)
1995 goto error_return;
1996 elf_local_got_refcounts (abfd) = local_got_refcounts;
1997 elf_x86_local_tlsdesc_gotent (abfd)
1998 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1999 elf_x86_local_got_tls_type (abfd)
2000 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2001 }
2002 local_got_refcounts[r_symndx] = 1;
2003 old_tls_type
2004 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2005 }
2006
2007 /* If a TLS symbol is accessed using IE at least once,
2008 there is no point to use dynamic model for it. */
2009 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2010 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2011 || tls_type != GOT_TLS_IE))
2012 {
2013 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2014 tls_type = old_tls_type;
2015 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2016 && GOT_TLS_GD_ANY_P (tls_type))
2017 tls_type |= old_tls_type;
2018 else
2019 {
2020 if (h)
2021 name = h->root.root.string;
2022 else
2023 name = bfd_elf_sym_name (abfd, symtab_hdr,
2024 isym, NULL);
2025 _bfd_error_handler
2026 /* xgettext:c-format */
2027 (_("%B: '%s' accessed both as normal and"
2028 " thread local symbol"),
2029 abfd, name);
2030 bfd_set_error (bfd_error_bad_value);
2031 goto error_return;
2032 }
2033 }
2034
2035 if (old_tls_type != tls_type)
2036 {
2037 if (eh != NULL)
2038 eh->tls_type = tls_type;
2039 else
2040 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2041 }
2042 }
2043 /* Fall through */
2044
2045 case R_X86_64_GOTOFF64:
2046 case R_X86_64_GOTPC32:
2047 case R_X86_64_GOTPC64:
2048 create_got:
2049 if (eh != NULL)
2050 eh->zero_undefweak &= 0x2;
2051 break;
2052
2053 case R_X86_64_PLT32:
2054 case R_X86_64_PLT32_BND:
2055 /* This symbol requires a procedure linkage table entry. We
2056 actually build the entry in adjust_dynamic_symbol,
2057 because this might be a case of linking PIC code which is
2058 never referenced by a dynamic object, in which case we
2059 don't need to generate a procedure linkage table entry
2060 after all. */
2061
2062 /* If this is a local symbol, we resolve it directly without
2063 creating a procedure linkage table entry. */
2064 if (h == NULL)
2065 continue;
2066
2067 eh->zero_undefweak &= 0x2;
2068 h->needs_plt = 1;
2069 h->plt.refcount = 1;
2070 break;
2071
2072 case R_X86_64_PLTOFF64:
2073 /* This tries to form the 'address' of a function relative
2074 to GOT. For global symbols we need a PLT entry. */
2075 if (h != NULL)
2076 {
2077 h->needs_plt = 1;
2078 h->plt.refcount = 1;
2079 }
2080 goto create_got;
2081
2082 case R_X86_64_SIZE32:
2083 case R_X86_64_SIZE64:
2084 size_reloc = TRUE;
2085 goto do_size;
2086
2087 case R_X86_64_32:
2088 if (!ABI_64_P (abfd))
2089 goto pointer;
2090 /* Fall through. */
2091 case R_X86_64_8:
2092 case R_X86_64_16:
2093 case R_X86_64_32S:
2094 /* Check relocation overflow as these relocs may lead to
2095 run-time relocation overflow. Don't error out for
2096 sections we don't care about, such as debug sections or
2097 when relocation overflow check is disabled. */
2098 if (!info->no_reloc_overflow_check
2099 && !converted_reloc
2100 && (bfd_link_pic (info)
2101 || (bfd_link_executable (info)
2102 && h != NULL
2103 && !h->def_regular
2104 && h->def_dynamic
2105 && (sec->flags & SEC_READONLY) == 0)))
2106 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2107 &x86_64_elf_howto_table[r_type]);
2108 /* Fall through. */
2109
2110 case R_X86_64_PC8:
2111 case R_X86_64_PC16:
2112 case R_X86_64_PC32:
2113 case R_X86_64_PC32_BND:
2114 case R_X86_64_PC64:
2115 case R_X86_64_64:
2116 pointer:
2117 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2118 eh->zero_undefweak |= 0x2;
2119 /* We are called after all symbols have been resolved. Only
2120 relocation against STT_GNU_IFUNC symbol must go through
2121 PLT. */
2122 if (h != NULL
2123 && (bfd_link_executable (info)
2124 || h->type == STT_GNU_IFUNC))
2125 {
2126 bfd_boolean func_pointer_ref = FALSE;
2127
2128 if (r_type == R_X86_64_PC32)
2129 {
2130 /* Since something like ".long foo - ." may be used
2131 as pointer, make sure that PLT is used if foo is
2132 a function defined in a shared library. */
2133 if ((sec->flags & SEC_CODE) == 0)
2134 h->pointer_equality_needed = 1;
2135 }
2136 else if (r_type != R_X86_64_PC32_BND
2137 && r_type != R_X86_64_PC64)
2138 {
2139 h->pointer_equality_needed = 1;
2140 /* At run-time, R_X86_64_64 can be resolved for both
2141 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2142 can only be resolved for x32. */
2143 if ((sec->flags & SEC_READONLY) == 0
2144 && (r_type == R_X86_64_64
2145 || (!ABI_64_P (abfd)
2146 && (r_type == R_X86_64_32
2147 || r_type == R_X86_64_32S))))
2148 func_pointer_ref = TRUE;
2149 }
2150
2151 if (!func_pointer_ref)
2152 {
2153 /* If this reloc is in a read-only section, we might
2154 need a copy reloc. We can't check reliably at this
2155 stage whether the section is read-only, as input
2156 sections have not yet been mapped to output sections.
2157 Tentatively set the flag for now, and correct in
2158 adjust_dynamic_symbol. */
2159 h->non_got_ref = 1;
2160
2161 /* We may need a .plt entry if the symbol is a function
2162 defined in a shared lib or is a function referenced
2163 from the code or read-only section. */
2164 if (!h->def_regular
2165 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2166 h->plt.refcount = 1;
2167 }
2168 }
2169
2170 size_reloc = FALSE;
2171 do_size:
2172 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2173 htab->pointer_r_type))
2174 {
2175 struct elf_dyn_relocs *p;
2176 struct elf_dyn_relocs **head;
2177
2178 /* We must copy these reloc types into the output file.
2179 Create a reloc section in dynobj and make room for
2180 this reloc. */
2181 if (sreloc == NULL)
2182 {
2183 sreloc = _bfd_elf_make_dynamic_reloc_section
2184 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2185 abfd, /*rela?*/ TRUE);
2186
2187 if (sreloc == NULL)
2188 goto error_return;
2189 }
2190
2191 /* If this is a global symbol, we count the number of
2192 relocations we need for this symbol. */
2193 if (h != NULL)
2194 head = &eh->dyn_relocs;
2195 else
2196 {
2197 /* Track dynamic relocs needed for local syms too.
2198 We really need local syms available to do this
2199 easily. Oh well. */
2200 asection *s;
2201 void **vpp;
2202
2203 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2204 abfd, r_symndx);
2205 if (isym == NULL)
2206 goto error_return;
2207
2208 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2209 if (s == NULL)
2210 s = sec;
2211
2212 /* Beware of type punned pointers vs strict aliasing
2213 rules. */
2214 vpp = &(elf_section_data (s)->local_dynrel);
2215 head = (struct elf_dyn_relocs **)vpp;
2216 }
2217
2218 p = *head;
2219 if (p == NULL || p->sec != sec)
2220 {
2221 bfd_size_type amt = sizeof *p;
2222
2223 p = ((struct elf_dyn_relocs *)
2224 bfd_alloc (htab->elf.dynobj, amt));
2225 if (p == NULL)
2226 goto error_return;
2227 p->next = *head;
2228 *head = p;
2229 p->sec = sec;
2230 p->count = 0;
2231 p->pc_count = 0;
2232 }
2233
2234 p->count += 1;
2235 /* Count size relocation as PC-relative relocation. */
2236 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2237 p->pc_count += 1;
2238 }
2239 break;
2240
2241 /* This relocation describes the C++ object vtable hierarchy.
2242 Reconstruct it for later use during GC. */
2243 case R_X86_64_GNU_VTINHERIT:
2244 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2245 goto error_return;
2246 break;
2247
2248 /* This relocation describes which C++ vtable entries are actually
2249 used. Record for later use during GC. */
2250 case R_X86_64_GNU_VTENTRY:
2251 BFD_ASSERT (h != NULL);
2252 if (h != NULL
2253 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2254 goto error_return;
2255 break;
2256
2257 default:
2258 break;
2259 }
2260 }
2261
2262 if (elf_section_data (sec)->this_hdr.contents != contents)
2263 {
2264 if (!converted && !info->keep_memory)
2265 free (contents);
2266 else
2267 {
2268 /* Cache the section contents for elf_link_input_bfd if any
2269 load is converted or --no-keep-memory isn't used. */
2270 elf_section_data (sec)->this_hdr.contents = contents;
2271 }
2272 }
2273
2274 /* Cache relocations if any load is converted. */
2275 if (elf_section_data (sec)->relocs != relocs && converted)
2276 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2277
2278 return TRUE;
2279
2280 error_return:
2281 if (elf_section_data (sec)->this_hdr.contents != contents)
2282 free (contents);
2283 sec->check_relocs_failed = 1;
2284 return FALSE;
2285 }
2286
2287 /* Return the relocation value for @tpoff relocation
2288 if STT_TLS virtual address is ADDRESS. */
2289
2290 static bfd_vma
2291 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2292 {
2293 struct elf_link_hash_table *htab = elf_hash_table (info);
2294 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2295 bfd_vma static_tls_size;
2296
2297 /* If tls_segment is NULL, we should have signalled an error already. */
2298 if (htab->tls_sec == NULL)
2299 return 0;
2300
2301 /* Consider special static TLS alignment requirements. */
2302 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2303 return address - static_tls_size - htab->tls_sec->vma;
2304 }
2305
2306 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2307 branch? */
2308
2309 static bfd_boolean
2310 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2311 {
2312 /* Opcode Instruction
2313 0xe8 call
2314 0xe9 jump
2315 0x0f 0x8x conditional jump */
2316 return ((offset > 0
2317 && (contents [offset - 1] == 0xe8
2318 || contents [offset - 1] == 0xe9))
2319 || (offset > 1
2320 && contents [offset - 2] == 0x0f
2321 && (contents [offset - 1] & 0xf0) == 0x80));
2322 }
2323
2324 /* Relocate an x86_64 ELF section. */
2325
2326 static bfd_boolean
2327 elf_x86_64_relocate_section (bfd *output_bfd,
2328 struct bfd_link_info *info,
2329 bfd *input_bfd,
2330 asection *input_section,
2331 bfd_byte *contents,
2332 Elf_Internal_Rela *relocs,
2333 Elf_Internal_Sym *local_syms,
2334 asection **local_sections)
2335 {
2336 struct elf_x86_link_hash_table *htab;
2337 Elf_Internal_Shdr *symtab_hdr;
2338 struct elf_link_hash_entry **sym_hashes;
2339 bfd_vma *local_got_offsets;
2340 bfd_vma *local_tlsdesc_gotents;
2341 Elf_Internal_Rela *rel;
2342 Elf_Internal_Rela *wrel;
2343 Elf_Internal_Rela *relend;
2344 unsigned int plt_entry_size;
2345
2346 /* Skip if check_relocs failed. */
2347 if (input_section->check_relocs_failed)
2348 return FALSE;
2349
2350 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2351 if (htab == NULL)
2352 return FALSE;
2353
2354 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2355
2356 plt_entry_size = htab->plt.plt_entry_size;
2357 symtab_hdr = &elf_symtab_hdr (input_bfd);
2358 sym_hashes = elf_sym_hashes (input_bfd);
2359 local_got_offsets = elf_local_got_offsets (input_bfd);
2360 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2361
2362 _bfd_x86_elf_set_tls_module_base (info);
2363
2364 rel = wrel = relocs;
2365 relend = relocs + input_section->reloc_count;
2366 for (; rel < relend; wrel++, rel++)
2367 {
2368 unsigned int r_type, r_type_tls;
2369 reloc_howto_type *howto;
2370 unsigned long r_symndx;
2371 struct elf_link_hash_entry *h;
2372 struct elf_x86_link_hash_entry *eh;
2373 Elf_Internal_Sym *sym;
2374 asection *sec;
2375 bfd_vma off, offplt, plt_offset;
2376 bfd_vma relocation;
2377 bfd_boolean unresolved_reloc;
2378 bfd_reloc_status_type r;
2379 int tls_type;
2380 asection *base_got, *resolved_plt;
2381 bfd_vma st_size;
2382 bfd_boolean resolved_to_zero;
2383 bfd_boolean relative_reloc;
2384 bfd_boolean converted_reloc;
2385 bfd_boolean need_copy_reloc_in_pie;
2386
2387 r_type = ELF32_R_TYPE (rel->r_info);
2388 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2389 || r_type == (int) R_X86_64_GNU_VTENTRY)
2390 {
2391 if (wrel != rel)
2392 *wrel = *rel;
2393 continue;
2394 }
2395
2396 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2397 r_type &= ~R_X86_64_converted_reloc_bit;
2398
2399 if (r_type >= (int) R_X86_64_standard)
2400 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2401
2402 if (r_type != (int) R_X86_64_32
2403 || ABI_64_P (output_bfd))
2404 howto = x86_64_elf_howto_table + r_type;
2405 else
2406 howto = (x86_64_elf_howto_table
2407 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2408 r_symndx = htab->r_sym (rel->r_info);
2409 h = NULL;
2410 sym = NULL;
2411 sec = NULL;
2412 unresolved_reloc = FALSE;
2413 if (r_symndx < symtab_hdr->sh_info)
2414 {
2415 sym = local_syms + r_symndx;
2416 sec = local_sections[r_symndx];
2417
2418 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2419 &sec, rel);
2420 st_size = sym->st_size;
2421
2422 /* Relocate against local STT_GNU_IFUNC symbol. */
2423 if (!bfd_link_relocatable (info)
2424 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2425 {
2426 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2427 rel, FALSE);
2428 if (h == NULL)
2429 abort ();
2430
2431 /* Set STT_GNU_IFUNC symbol value. */
2432 h->root.u.def.value = sym->st_value;
2433 h->root.u.def.section = sec;
2434 }
2435 }
2436 else
2437 {
2438 bfd_boolean warned ATTRIBUTE_UNUSED;
2439 bfd_boolean ignored ATTRIBUTE_UNUSED;
2440
2441 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2442 r_symndx, symtab_hdr, sym_hashes,
2443 h, sec, relocation,
2444 unresolved_reloc, warned, ignored);
2445 st_size = h->size;
2446 }
2447
2448 if (sec != NULL && discarded_section (sec))
2449 {
2450 _bfd_clear_contents (howto, input_bfd, input_section,
2451 contents + rel->r_offset);
2452 wrel->r_offset = rel->r_offset;
2453 wrel->r_info = 0;
2454 wrel->r_addend = 0;
2455
2456 /* For ld -r, remove relocations in debug sections against
2457 sections defined in discarded sections. Not done for
2458 eh_frame editing code expects to be present. */
2459 if (bfd_link_relocatable (info)
2460 && (input_section->flags & SEC_DEBUGGING))
2461 wrel--;
2462
2463 continue;
2464 }
2465
2466 if (bfd_link_relocatable (info))
2467 {
2468 if (wrel != rel)
2469 *wrel = *rel;
2470 continue;
2471 }
2472
2473 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2474 {
2475 if (r_type == R_X86_64_64)
2476 {
2477 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2478 zero-extend it to 64bit if addend is zero. */
2479 r_type = R_X86_64_32;
2480 memset (contents + rel->r_offset + 4, 0, 4);
2481 }
2482 else if (r_type == R_X86_64_SIZE64)
2483 {
2484 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2485 zero-extend it to 64bit if addend is zero. */
2486 r_type = R_X86_64_SIZE32;
2487 memset (contents + rel->r_offset + 4, 0, 4);
2488 }
2489 }
2490
2491 eh = (struct elf_x86_link_hash_entry *) h;
2492
2493 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2494 it here if it is defined in a non-shared object. */
2495 if (h != NULL
2496 && h->type == STT_GNU_IFUNC
2497 && h->def_regular)
2498 {
2499 bfd_vma plt_index;
2500 const char *name;
2501
2502 if ((input_section->flags & SEC_ALLOC) == 0)
2503 {
2504 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2505 sections because such sections are not SEC_ALLOC and
2506 thus ld.so will not process them. */
2507 if ((input_section->flags & SEC_DEBUGGING) != 0)
2508 continue;
2509 abort ();
2510 }
2511
2512 switch (r_type)
2513 {
2514 default:
2515 break;
2516
2517 case R_X86_64_GOTPCREL:
2518 case R_X86_64_GOTPCRELX:
2519 case R_X86_64_REX_GOTPCRELX:
2520 case R_X86_64_GOTPCREL64:
2521 base_got = htab->elf.sgot;
2522 off = h->got.offset;
2523
2524 if (base_got == NULL)
2525 abort ();
2526
2527 if (off == (bfd_vma) -1)
2528 {
2529 /* We can't use h->got.offset here to save state, or
2530 even just remember the offset, as finish_dynamic_symbol
2531 would use that as offset into .got. */
2532
2533 if (h->plt.offset == (bfd_vma) -1)
2534 abort ();
2535
2536 if (htab->elf.splt != NULL)
2537 {
2538 plt_index = (h->plt.offset / plt_entry_size
2539 - htab->plt.has_plt0);
2540 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2541 base_got = htab->elf.sgotplt;
2542 }
2543 else
2544 {
2545 plt_index = h->plt.offset / plt_entry_size;
2546 off = plt_index * GOT_ENTRY_SIZE;
2547 base_got = htab->elf.igotplt;
2548 }
2549
2550 if (h->dynindx == -1
2551 || h->forced_local
2552 || info->symbolic)
2553 {
2554 /* This references the local defitionion. We must
2555 initialize this entry in the global offset table.
2556 Since the offset must always be a multiple of 8,
2557 we use the least significant bit to record
2558 whether we have initialized it already.
2559
2560 When doing a dynamic link, we create a .rela.got
2561 relocation entry to initialize the value. This
2562 is done in the finish_dynamic_symbol routine. */
2563 if ((off & 1) != 0)
2564 off &= ~1;
2565 else
2566 {
2567 bfd_put_64 (output_bfd, relocation,
2568 base_got->contents + off);
2569 /* Note that this is harmless for the GOTPLT64
2570 case, as -1 | 1 still is -1. */
2571 h->got.offset |= 1;
2572 }
2573 }
2574 }
2575
2576 relocation = (base_got->output_section->vma
2577 + base_got->output_offset + off);
2578
2579 goto do_relocation;
2580 }
2581
2582 if (h->plt.offset == (bfd_vma) -1)
2583 {
2584 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2585 if (r_type == htab->pointer_r_type
2586 && (input_section->flags & SEC_CODE) == 0)
2587 goto do_ifunc_pointer;
2588 goto bad_ifunc_reloc;
2589 }
2590
2591 /* STT_GNU_IFUNC symbol must go through PLT. */
2592 if (htab->elf.splt != NULL)
2593 {
2594 if (htab->plt_second != NULL)
2595 {
2596 resolved_plt = htab->plt_second;
2597 plt_offset = eh->plt_second.offset;
2598 }
2599 else
2600 {
2601 resolved_plt = htab->elf.splt;
2602 plt_offset = h->plt.offset;
2603 }
2604 }
2605 else
2606 {
2607 resolved_plt = htab->elf.iplt;
2608 plt_offset = h->plt.offset;
2609 }
2610
2611 relocation = (resolved_plt->output_section->vma
2612 + resolved_plt->output_offset + plt_offset);
2613
2614 switch (r_type)
2615 {
2616 default:
2617 bad_ifunc_reloc:
2618 if (h->root.root.string)
2619 name = h->root.root.string;
2620 else
2621 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2622 NULL);
2623 _bfd_error_handler
2624 /* xgettext:c-format */
2625 (_("%B: relocation %s against STT_GNU_IFUNC "
2626 "symbol `%s' isn't supported"), input_bfd,
2627 howto->name, name);
2628 bfd_set_error (bfd_error_bad_value);
2629 return FALSE;
2630
2631 case R_X86_64_32S:
2632 if (bfd_link_pic (info))
2633 abort ();
2634 goto do_relocation;
2635
2636 case R_X86_64_32:
2637 if (ABI_64_P (output_bfd))
2638 goto do_relocation;
2639 /* FALLTHROUGH */
2640 case R_X86_64_64:
2641 do_ifunc_pointer:
2642 if (rel->r_addend != 0)
2643 {
2644 if (h->root.root.string)
2645 name = h->root.root.string;
2646 else
2647 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2648 sym, NULL);
2649 _bfd_error_handler
2650 /* xgettext:c-format */
2651 (_("%B: relocation %s against STT_GNU_IFUNC "
2652 "symbol `%s' has non-zero addend: %Ld"),
2653 input_bfd, howto->name, name, rel->r_addend);
2654 bfd_set_error (bfd_error_bad_value);
2655 return FALSE;
2656 }
2657
2658 /* Generate dynamic relcoation only when there is a
2659 non-GOT reference in a shared object or there is no
2660 PLT. */
2661 if ((bfd_link_pic (info) && h->non_got_ref)
2662 || h->plt.offset == (bfd_vma) -1)
2663 {
2664 Elf_Internal_Rela outrel;
2665 asection *sreloc;
2666
2667 /* Need a dynamic relocation to get the real function
2668 address. */
2669 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2670 info,
2671 input_section,
2672 rel->r_offset);
2673 if (outrel.r_offset == (bfd_vma) -1
2674 || outrel.r_offset == (bfd_vma) -2)
2675 abort ();
2676
2677 outrel.r_offset += (input_section->output_section->vma
2678 + input_section->output_offset);
2679
2680 if (POINTER_LOCAL_IFUNC_P (info, h))
2681 {
2682 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2683 h->root.root.string,
2684 h->root.u.def.section->owner);
2685
2686 /* This symbol is resolved locally. */
2687 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2688 outrel.r_addend = (h->root.u.def.value
2689 + h->root.u.def.section->output_section->vma
2690 + h->root.u.def.section->output_offset);
2691 }
2692 else
2693 {
2694 outrel.r_info = htab->r_info (h->dynindx, r_type);
2695 outrel.r_addend = 0;
2696 }
2697
2698 /* Dynamic relocations are stored in
2699 1. .rela.ifunc section in PIC object.
2700 2. .rela.got section in dynamic executable.
2701 3. .rela.iplt section in static executable. */
2702 if (bfd_link_pic (info))
2703 sreloc = htab->elf.irelifunc;
2704 else if (htab->elf.splt != NULL)
2705 sreloc = htab->elf.srelgot;
2706 else
2707 sreloc = htab->elf.irelplt;
2708 elf_append_rela (output_bfd, sreloc, &outrel);
2709
2710 /* If this reloc is against an external symbol, we
2711 do not want to fiddle with the addend. Otherwise,
2712 we need to include the symbol value so that it
2713 becomes an addend for the dynamic reloc. For an
2714 internal symbol, we have updated addend. */
2715 continue;
2716 }
2717 /* FALLTHROUGH */
2718 case R_X86_64_PC32:
2719 case R_X86_64_PC32_BND:
2720 case R_X86_64_PC64:
2721 case R_X86_64_PLT32:
2722 case R_X86_64_PLT32_BND:
2723 goto do_relocation;
2724 }
2725 }
2726
2727 resolved_to_zero = (eh != NULL
2728 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2729
2730 /* When generating a shared object, the relocations handled here are
2731 copied into the output file to be resolved at run time. */
2732 switch (r_type)
2733 {
2734 case R_X86_64_GOT32:
2735 case R_X86_64_GOT64:
2736 /* Relocation is to the entry for this symbol in the global
2737 offset table. */
2738 case R_X86_64_GOTPCREL:
2739 case R_X86_64_GOTPCRELX:
2740 case R_X86_64_REX_GOTPCRELX:
2741 case R_X86_64_GOTPCREL64:
2742 /* Use global offset table entry as symbol value. */
2743 case R_X86_64_GOTPLT64:
2744 /* This is obsolete and treated the same as GOT64. */
2745 base_got = htab->elf.sgot;
2746
2747 if (htab->elf.sgot == NULL)
2748 abort ();
2749
2750 relative_reloc = FALSE;
2751 if (h != NULL)
2752 {
2753 off = h->got.offset;
2754 if (h->needs_plt
2755 && h->plt.offset != (bfd_vma)-1
2756 && off == (bfd_vma)-1)
2757 {
2758 /* We can't use h->got.offset here to save
2759 state, or even just remember the offset, as
2760 finish_dynamic_symbol would use that as offset into
2761 .got. */
2762 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2763 - htab->plt.has_plt0);
2764 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2765 base_got = htab->elf.sgotplt;
2766 }
2767
2768 if (RESOLVED_LOCALLY_P (info, h, htab))
2769 {
2770 /* We must initialize this entry in the global offset
2771 table. Since the offset must always be a multiple
2772 of 8, we use the least significant bit to record
2773 whether we have initialized it already.
2774
2775 When doing a dynamic link, we create a .rela.got
2776 relocation entry to initialize the value. This is
2777 done in the finish_dynamic_symbol routine. */
2778 if ((off & 1) != 0)
2779 off &= ~1;
2780 else
2781 {
2782 bfd_put_64 (output_bfd, relocation,
2783 base_got->contents + off);
2784 /* Note that this is harmless for the GOTPLT64 case,
2785 as -1 | 1 still is -1. */
2786 h->got.offset |= 1;
2787
2788 if (GENERATE_RELATIVE_RELOC_P (info, h))
2789 {
2790 /* If this symbol isn't dynamic in PIC,
2791 generate R_X86_64_RELATIVE here. */
2792 eh->no_finish_dynamic_symbol = 1;
2793 relative_reloc = TRUE;
2794 }
2795 }
2796 }
2797 else
2798 unresolved_reloc = FALSE;
2799 }
2800 else
2801 {
2802 if (local_got_offsets == NULL)
2803 abort ();
2804
2805 off = local_got_offsets[r_symndx];
2806
2807 /* The offset must always be a multiple of 8. We use
2808 the least significant bit to record whether we have
2809 already generated the necessary reloc. */
2810 if ((off & 1) != 0)
2811 off &= ~1;
2812 else
2813 {
2814 bfd_put_64 (output_bfd, relocation,
2815 base_got->contents + off);
2816 local_got_offsets[r_symndx] |= 1;
2817
2818 if (bfd_link_pic (info))
2819 relative_reloc = TRUE;
2820 }
2821 }
2822
2823 if (relative_reloc)
2824 {
2825 asection *s;
2826 Elf_Internal_Rela outrel;
2827
2828 /* We need to generate a R_X86_64_RELATIVE reloc
2829 for the dynamic linker. */
2830 s = htab->elf.srelgot;
2831 if (s == NULL)
2832 abort ();
2833
2834 outrel.r_offset = (base_got->output_section->vma
2835 + base_got->output_offset
2836 + off);
2837 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2838 outrel.r_addend = relocation;
2839 elf_append_rela (output_bfd, s, &outrel);
2840 }
2841
2842 if (off >= (bfd_vma) -2)
2843 abort ();
2844
2845 relocation = base_got->output_section->vma
2846 + base_got->output_offset + off;
2847 if (r_type != R_X86_64_GOTPCREL
2848 && r_type != R_X86_64_GOTPCRELX
2849 && r_type != R_X86_64_REX_GOTPCRELX
2850 && r_type != R_X86_64_GOTPCREL64)
2851 relocation -= htab->elf.sgotplt->output_section->vma
2852 - htab->elf.sgotplt->output_offset;
2853
2854 break;
2855
2856 case R_X86_64_GOTOFF64:
2857 /* Relocation is relative to the start of the global offset
2858 table. */
2859
2860 /* Check to make sure it isn't a protected function or data
2861 symbol for shared library since it may not be local when
2862 used as function address or with copy relocation. We also
2863 need to make sure that a symbol is referenced locally. */
2864 if (bfd_link_pic (info) && h)
2865 {
2866 if (!h->def_regular)
2867 {
2868 const char *v;
2869
2870 switch (ELF_ST_VISIBILITY (h->other))
2871 {
2872 case STV_HIDDEN:
2873 v = _("hidden symbol");
2874 break;
2875 case STV_INTERNAL:
2876 v = _("internal symbol");
2877 break;
2878 case STV_PROTECTED:
2879 v = _("protected symbol");
2880 break;
2881 default:
2882 v = _("symbol");
2883 break;
2884 }
2885
2886 _bfd_error_handler
2887 /* xgettext:c-format */
2888 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2889 " `%s' can not be used when making a shared object"),
2890 input_bfd, v, h->root.root.string);
2891 bfd_set_error (bfd_error_bad_value);
2892 return FALSE;
2893 }
2894 else if (!bfd_link_executable (info)
2895 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2896 && (h->type == STT_FUNC
2897 || h->type == STT_OBJECT)
2898 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2899 {
2900 _bfd_error_handler
2901 /* xgettext:c-format */
2902 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2903 " `%s' can not be used when making a shared object"),
2904 input_bfd,
2905 h->type == STT_FUNC ? "function" : "data",
2906 h->root.root.string);
2907 bfd_set_error (bfd_error_bad_value);
2908 return FALSE;
2909 }
2910 }
2911
2912 /* Note that sgot is not involved in this
2913 calculation. We always want the start of .got.plt. If we
2914 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2915 permitted by the ABI, we might have to change this
2916 calculation. */
2917 relocation -= htab->elf.sgotplt->output_section->vma
2918 + htab->elf.sgotplt->output_offset;
2919 break;
2920
2921 case R_X86_64_GOTPC32:
2922 case R_X86_64_GOTPC64:
2923 /* Use global offset table as symbol value. */
2924 relocation = htab->elf.sgotplt->output_section->vma
2925 + htab->elf.sgotplt->output_offset;
2926 unresolved_reloc = FALSE;
2927 break;
2928
2929 case R_X86_64_PLTOFF64:
2930 /* Relocation is PLT entry relative to GOT. For local
2931 symbols it's the symbol itself relative to GOT. */
2932 if (h != NULL
2933 /* See PLT32 handling. */
2934 && (h->plt.offset != (bfd_vma) -1
2935 || eh->plt_got.offset != (bfd_vma) -1)
2936 && htab->elf.splt != NULL)
2937 {
2938 if (eh->plt_got.offset != (bfd_vma) -1)
2939 {
2940 /* Use the GOT PLT. */
2941 resolved_plt = htab->plt_got;
2942 plt_offset = eh->plt_got.offset;
2943 }
2944 else if (htab->plt_second != NULL)
2945 {
2946 resolved_plt = htab->plt_second;
2947 plt_offset = eh->plt_second.offset;
2948 }
2949 else
2950 {
2951 resolved_plt = htab->elf.splt;
2952 plt_offset = h->plt.offset;
2953 }
2954
2955 relocation = (resolved_plt->output_section->vma
2956 + resolved_plt->output_offset
2957 + plt_offset);
2958 unresolved_reloc = FALSE;
2959 }
2960
2961 relocation -= htab->elf.sgotplt->output_section->vma
2962 + htab->elf.sgotplt->output_offset;
2963 break;
2964
2965 case R_X86_64_PLT32:
2966 case R_X86_64_PLT32_BND:
2967 /* Relocation is to the entry for this symbol in the
2968 procedure linkage table. */
2969
2970 /* Resolve a PLT32 reloc against a local symbol directly,
2971 without using the procedure linkage table. */
2972 if (h == NULL)
2973 break;
2974
2975 if ((h->plt.offset == (bfd_vma) -1
2976 && eh->plt_got.offset == (bfd_vma) -1)
2977 || htab->elf.splt == NULL)
2978 {
2979 /* We didn't make a PLT entry for this symbol. This
2980 happens when statically linking PIC code, or when
2981 using -Bsymbolic. */
2982 break;
2983 }
2984
2985 if (h->plt.offset != (bfd_vma) -1)
2986 {
2987 if (htab->plt_second != NULL)
2988 {
2989 resolved_plt = htab->plt_second;
2990 plt_offset = eh->plt_second.offset;
2991 }
2992 else
2993 {
2994 resolved_plt = htab->elf.splt;
2995 plt_offset = h->plt.offset;
2996 }
2997 }
2998 else
2999 {
3000 /* Use the GOT PLT. */
3001 resolved_plt = htab->plt_got;
3002 plt_offset = eh->plt_got.offset;
3003 }
3004
3005 relocation = (resolved_plt->output_section->vma
3006 + resolved_plt->output_offset
3007 + plt_offset);
3008 unresolved_reloc = FALSE;
3009 break;
3010
3011 case R_X86_64_SIZE32:
3012 case R_X86_64_SIZE64:
3013 /* Set to symbol size. */
3014 relocation = st_size;
3015 goto direct;
3016
3017 case R_X86_64_PC8:
3018 case R_X86_64_PC16:
3019 case R_X86_64_PC32:
3020 case R_X86_64_PC32_BND:
3021 /* Don't complain about -fPIC if the symbol is undefined when
3022 building executable unless it is unresolved weak symbol or
3023 -z nocopyreloc is used. */
3024 if ((input_section->flags & SEC_ALLOC) != 0
3025 && (input_section->flags & SEC_READONLY) != 0
3026 && h != NULL
3027 && ((bfd_link_executable (info)
3028 && ((h->root.type == bfd_link_hash_undefweak
3029 && !resolved_to_zero)
3030 || ((info->nocopyreloc
3031 || (eh->def_protected
3032 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3033 && h->def_dynamic
3034 && !(h->root.u.def.section->flags & SEC_CODE))))
3035 || bfd_link_dll (info)))
3036 {
3037 bfd_boolean fail = FALSE;
3038 bfd_boolean branch
3039 = ((r_type == R_X86_64_PC32
3040 || r_type == R_X86_64_PC32_BND)
3041 && is_32bit_relative_branch (contents, rel->r_offset));
3042
3043 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3044 {
3045 /* Symbol is referenced locally. Make sure it is
3046 defined locally or for a branch. */
3047 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3048 && !branch);
3049 }
3050 else if (!(bfd_link_pie (info)
3051 && (h->needs_copy || eh->needs_copy)))
3052 {
3053 /* Symbol doesn't need copy reloc and isn't referenced
3054 locally. We only allow branch to symbol with
3055 non-default visibility. */
3056 fail = (!branch
3057 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3058 }
3059
3060 if (fail)
3061 return elf_x86_64_need_pic (info, input_bfd, input_section,
3062 h, NULL, NULL, howto);
3063 }
3064 /* Fall through. */
3065
3066 case R_X86_64_8:
3067 case R_X86_64_16:
3068 case R_X86_64_32:
3069 case R_X86_64_PC64:
3070 case R_X86_64_64:
3071 /* FIXME: The ABI says the linker should make sure the value is
3072 the same when it's zeroextended to 64 bit. */
3073
3074 direct:
3075 if ((input_section->flags & SEC_ALLOC) == 0)
3076 break;
3077
3078 need_copy_reloc_in_pie = (bfd_link_pie (info)
3079 && h != NULL
3080 && (h->needs_copy
3081 || eh->needs_copy
3082 || (h->root.type
3083 == bfd_link_hash_undefined))
3084 && (X86_PCREL_TYPE_P (r_type)
3085 || X86_SIZE_TYPE_P (r_type)));
3086
3087 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3088 need_copy_reloc_in_pie,
3089 resolved_to_zero, FALSE))
3090 {
3091 Elf_Internal_Rela outrel;
3092 bfd_boolean skip, relocate;
3093 asection *sreloc;
3094
3095 /* When generating a shared object, these relocations
3096 are copied into the output file to be resolved at run
3097 time. */
3098 skip = FALSE;
3099 relocate = FALSE;
3100
3101 outrel.r_offset =
3102 _bfd_elf_section_offset (output_bfd, info, input_section,
3103 rel->r_offset);
3104 if (outrel.r_offset == (bfd_vma) -1)
3105 skip = TRUE;
3106 else if (outrel.r_offset == (bfd_vma) -2)
3107 skip = TRUE, relocate = TRUE;
3108
3109 outrel.r_offset += (input_section->output_section->vma
3110 + input_section->output_offset);
3111
3112 if (skip)
3113 memset (&outrel, 0, sizeof outrel);
3114
3115 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3116 {
3117 outrel.r_info = htab->r_info (h->dynindx, r_type);
3118 outrel.r_addend = rel->r_addend;
3119 }
3120 else
3121 {
3122 /* This symbol is local, or marked to become local.
3123 When relocation overflow check is disabled, we
3124 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3125 if (r_type == htab->pointer_r_type
3126 || (r_type == R_X86_64_32
3127 && info->no_reloc_overflow_check))
3128 {
3129 relocate = TRUE;
3130 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3131 outrel.r_addend = relocation + rel->r_addend;
3132 }
3133 else if (r_type == R_X86_64_64
3134 && !ABI_64_P (output_bfd))
3135 {
3136 relocate = TRUE;
3137 outrel.r_info = htab->r_info (0,
3138 R_X86_64_RELATIVE64);
3139 outrel.r_addend = relocation + rel->r_addend;
3140 /* Check addend overflow. */
3141 if ((outrel.r_addend & 0x80000000)
3142 != (rel->r_addend & 0x80000000))
3143 {
3144 const char *name;
3145 int addend = rel->r_addend;
3146 if (h && h->root.root.string)
3147 name = h->root.root.string;
3148 else
3149 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3150 sym, NULL);
3151 _bfd_error_handler
3152 /* xgettext:c-format */
3153 (_("%B: addend %s%#x in relocation %s against "
3154 "symbol `%s' at %#Lx in section `%A' is "
3155 "out of range"),
3156 input_bfd, addend < 0 ? "-" : "", addend,
3157 howto->name, name, rel->r_offset, input_section);
3158 bfd_set_error (bfd_error_bad_value);
3159 return FALSE;
3160 }
3161 }
3162 else
3163 {
3164 long sindx;
3165
3166 if (bfd_is_abs_section (sec))
3167 sindx = 0;
3168 else if (sec == NULL || sec->owner == NULL)
3169 {
3170 bfd_set_error (bfd_error_bad_value);
3171 return FALSE;
3172 }
3173 else
3174 {
3175 asection *osec;
3176
3177 /* We are turning this relocation into one
3178 against a section symbol. It would be
3179 proper to subtract the symbol's value,
3180 osec->vma, from the emitted reloc addend,
3181 but ld.so expects buggy relocs. */
3182 osec = sec->output_section;
3183 sindx = elf_section_data (osec)->dynindx;
3184 if (sindx == 0)
3185 {
3186 asection *oi = htab->elf.text_index_section;
3187 sindx = elf_section_data (oi)->dynindx;
3188 }
3189 BFD_ASSERT (sindx != 0);
3190 }
3191
3192 outrel.r_info = htab->r_info (sindx, r_type);
3193 outrel.r_addend = relocation + rel->r_addend;
3194 }
3195 }
3196
3197 sreloc = elf_section_data (input_section)->sreloc;
3198
3199 if (sreloc == NULL || sreloc->contents == NULL)
3200 {
3201 r = bfd_reloc_notsupported;
3202 goto check_relocation_error;
3203 }
3204
3205 elf_append_rela (output_bfd, sreloc, &outrel);
3206
3207 /* If this reloc is against an external symbol, we do
3208 not want to fiddle with the addend. Otherwise, we
3209 need to include the symbol value so that it becomes
3210 an addend for the dynamic reloc. */
3211 if (! relocate)
3212 continue;
3213 }
3214
3215 break;
3216
3217 case R_X86_64_TLSGD:
3218 case R_X86_64_GOTPC32_TLSDESC:
3219 case R_X86_64_TLSDESC_CALL:
3220 case R_X86_64_GOTTPOFF:
3221 tls_type = GOT_UNKNOWN;
3222 if (h == NULL && local_got_offsets)
3223 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3224 else if (h != NULL)
3225 tls_type = elf_x86_hash_entry (h)->tls_type;
3226
3227 r_type_tls = r_type;
3228 if (! elf_x86_64_tls_transition (info, input_bfd,
3229 input_section, contents,
3230 symtab_hdr, sym_hashes,
3231 &r_type_tls, tls_type, rel,
3232 relend, h, r_symndx, TRUE))
3233 return FALSE;
3234
3235 if (r_type_tls == R_X86_64_TPOFF32)
3236 {
3237 bfd_vma roff = rel->r_offset;
3238
3239 BFD_ASSERT (! unresolved_reloc);
3240
3241 if (r_type == R_X86_64_TLSGD)
3242 {
3243 /* GD->LE transition. For 64bit, change
3244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3245 .word 0x6666; rex64; call __tls_get_addr@PLT
3246 or
3247 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3248 .byte 0x66; rex64
3249 call *__tls_get_addr@GOTPCREL(%rip)
3250 which may be converted to
3251 addr32 call __tls_get_addr
3252 into:
3253 movq %fs:0, %rax
3254 leaq foo@tpoff(%rax), %rax
3255 For 32bit, change
3256 leaq foo@tlsgd(%rip), %rdi
3257 .word 0x6666; rex64; call __tls_get_addr@PLT
3258 or
3259 leaq foo@tlsgd(%rip), %rdi
3260 .byte 0x66; rex64
3261 call *__tls_get_addr@GOTPCREL(%rip)
3262 which may be converted to
3263 addr32 call __tls_get_addr
3264 into:
3265 movl %fs:0, %eax
3266 leaq foo@tpoff(%rax), %rax
3267 For largepic, change:
3268 leaq foo@tlsgd(%rip), %rdi
3269 movabsq $__tls_get_addr@pltoff, %rax
3270 addq %r15, %rax
3271 call *%rax
3272 into:
3273 movq %fs:0, %rax
3274 leaq foo@tpoff(%rax), %rax
3275 nopw 0x0(%rax,%rax,1) */
3276 int largepic = 0;
3277 if (ABI_64_P (output_bfd))
3278 {
3279 if (contents[roff + 5] == 0xb8)
3280 {
3281 memcpy (contents + roff - 3,
3282 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3283 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3284 largepic = 1;
3285 }
3286 else
3287 memcpy (contents + roff - 4,
3288 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3289 16);
3290 }
3291 else
3292 memcpy (contents + roff - 3,
3293 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3294 15);
3295 bfd_put_32 (output_bfd,
3296 elf_x86_64_tpoff (info, relocation),
3297 contents + roff + 8 + largepic);
3298 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3299 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3300 rel++;
3301 wrel++;
3302 continue;
3303 }
3304 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3305 {
3306 /* GDesc -> LE transition.
3307 It's originally something like:
3308 leaq x@tlsdesc(%rip), %rax
3309
3310 Change it to:
3311 movl $x@tpoff, %rax. */
3312
3313 unsigned int val, type;
3314
3315 type = bfd_get_8 (input_bfd, contents + roff - 3);
3316 val = bfd_get_8 (input_bfd, contents + roff - 1);
3317 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3318 contents + roff - 3);
3319 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3320 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3321 contents + roff - 1);
3322 bfd_put_32 (output_bfd,
3323 elf_x86_64_tpoff (info, relocation),
3324 contents + roff);
3325 continue;
3326 }
3327 else if (r_type == R_X86_64_TLSDESC_CALL)
3328 {
3329 /* GDesc -> LE transition.
3330 It's originally:
3331 call *(%rax)
3332 Turn it into:
3333 xchg %ax,%ax. */
3334 bfd_put_8 (output_bfd, 0x66, contents + roff);
3335 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3336 continue;
3337 }
3338 else if (r_type == R_X86_64_GOTTPOFF)
3339 {
3340 /* IE->LE transition:
3341 For 64bit, originally it can be one of:
3342 movq foo@gottpoff(%rip), %reg
3343 addq foo@gottpoff(%rip), %reg
3344 We change it into:
3345 movq $foo, %reg
3346 leaq foo(%reg), %reg
3347 addq $foo, %reg.
3348 For 32bit, originally it can be one of:
3349 movq foo@gottpoff(%rip), %reg
3350 addl foo@gottpoff(%rip), %reg
3351 We change it into:
3352 movq $foo, %reg
3353 leal foo(%reg), %reg
3354 addl $foo, %reg. */
3355
3356 unsigned int val, type, reg;
3357
3358 if (roff >= 3)
3359 val = bfd_get_8 (input_bfd, contents + roff - 3);
3360 else
3361 val = 0;
3362 type = bfd_get_8 (input_bfd, contents + roff - 2);
3363 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3364 reg >>= 3;
3365 if (type == 0x8b)
3366 {
3367 /* movq */
3368 if (val == 0x4c)
3369 bfd_put_8 (output_bfd, 0x49,
3370 contents + roff - 3);
3371 else if (!ABI_64_P (output_bfd) && val == 0x44)
3372 bfd_put_8 (output_bfd, 0x41,
3373 contents + roff - 3);
3374 bfd_put_8 (output_bfd, 0xc7,
3375 contents + roff - 2);
3376 bfd_put_8 (output_bfd, 0xc0 | reg,
3377 contents + roff - 1);
3378 }
3379 else if (reg == 4)
3380 {
3381 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3382 is special */
3383 if (val == 0x4c)
3384 bfd_put_8 (output_bfd, 0x49,
3385 contents + roff - 3);
3386 else if (!ABI_64_P (output_bfd) && val == 0x44)
3387 bfd_put_8 (output_bfd, 0x41,
3388 contents + roff - 3);
3389 bfd_put_8 (output_bfd, 0x81,
3390 contents + roff - 2);
3391 bfd_put_8 (output_bfd, 0xc0 | reg,
3392 contents + roff - 1);
3393 }
3394 else
3395 {
3396 /* addq/addl -> leaq/leal */
3397 if (val == 0x4c)
3398 bfd_put_8 (output_bfd, 0x4d,
3399 contents + roff - 3);
3400 else if (!ABI_64_P (output_bfd) && val == 0x44)
3401 bfd_put_8 (output_bfd, 0x45,
3402 contents + roff - 3);
3403 bfd_put_8 (output_bfd, 0x8d,
3404 contents + roff - 2);
3405 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3406 contents + roff - 1);
3407 }
3408 bfd_put_32 (output_bfd,
3409 elf_x86_64_tpoff (info, relocation),
3410 contents + roff);
3411 continue;
3412 }
3413 else
3414 BFD_ASSERT (FALSE);
3415 }
3416
3417 if (htab->elf.sgot == NULL)
3418 abort ();
3419
3420 if (h != NULL)
3421 {
3422 off = h->got.offset;
3423 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3424 }
3425 else
3426 {
3427 if (local_got_offsets == NULL)
3428 abort ();
3429
3430 off = local_got_offsets[r_symndx];
3431 offplt = local_tlsdesc_gotents[r_symndx];
3432 }
3433
3434 if ((off & 1) != 0)
3435 off &= ~1;
3436 else
3437 {
3438 Elf_Internal_Rela outrel;
3439 int dr_type, indx;
3440 asection *sreloc;
3441
3442 if (htab->elf.srelgot == NULL)
3443 abort ();
3444
3445 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3446
3447 if (GOT_TLS_GDESC_P (tls_type))
3448 {
3449 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3450 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3451 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3452 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3453 + htab->elf.sgotplt->output_offset
3454 + offplt
3455 + htab->sgotplt_jump_table_size);
3456 sreloc = htab->elf.srelplt;
3457 if (indx == 0)
3458 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3459 else
3460 outrel.r_addend = 0;
3461 elf_append_rela (output_bfd, sreloc, &outrel);
3462 }
3463
3464 sreloc = htab->elf.srelgot;
3465
3466 outrel.r_offset = (htab->elf.sgot->output_section->vma
3467 + htab->elf.sgot->output_offset + off);
3468
3469 if (GOT_TLS_GD_P (tls_type))
3470 dr_type = R_X86_64_DTPMOD64;
3471 else if (GOT_TLS_GDESC_P (tls_type))
3472 goto dr_done;
3473 else
3474 dr_type = R_X86_64_TPOFF64;
3475
3476 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3477 outrel.r_addend = 0;
3478 if ((dr_type == R_X86_64_TPOFF64
3479 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3480 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3481 outrel.r_info = htab->r_info (indx, dr_type);
3482
3483 elf_append_rela (output_bfd, sreloc, &outrel);
3484
3485 if (GOT_TLS_GD_P (tls_type))
3486 {
3487 if (indx == 0)
3488 {
3489 BFD_ASSERT (! unresolved_reloc);
3490 bfd_put_64 (output_bfd,
3491 relocation - _bfd_x86_elf_dtpoff_base (info),
3492 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3493 }
3494 else
3495 {
3496 bfd_put_64 (output_bfd, 0,
3497 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3498 outrel.r_info = htab->r_info (indx,
3499 R_X86_64_DTPOFF64);
3500 outrel.r_offset += GOT_ENTRY_SIZE;
3501 elf_append_rela (output_bfd, sreloc,
3502 &outrel);
3503 }
3504 }
3505
3506 dr_done:
3507 if (h != NULL)
3508 h->got.offset |= 1;
3509 else
3510 local_got_offsets[r_symndx] |= 1;
3511 }
3512
3513 if (off >= (bfd_vma) -2
3514 && ! GOT_TLS_GDESC_P (tls_type))
3515 abort ();
3516 if (r_type_tls == r_type)
3517 {
3518 if (r_type == R_X86_64_GOTPC32_TLSDESC
3519 || r_type == R_X86_64_TLSDESC_CALL)
3520 relocation = htab->elf.sgotplt->output_section->vma
3521 + htab->elf.sgotplt->output_offset
3522 + offplt + htab->sgotplt_jump_table_size;
3523 else
3524 relocation = htab->elf.sgot->output_section->vma
3525 + htab->elf.sgot->output_offset + off;
3526 unresolved_reloc = FALSE;
3527 }
3528 else
3529 {
3530 bfd_vma roff = rel->r_offset;
3531
3532 if (r_type == R_X86_64_TLSGD)
3533 {
3534 /* GD->IE transition. For 64bit, change
3535 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3536 .word 0x6666; rex64; call __tls_get_addr@PLT
3537 or
3538 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3539 .byte 0x66; rex64
3540 call *__tls_get_addr@GOTPCREL(%rip
3541 which may be converted to
3542 addr32 call __tls_get_addr
3543 into:
3544 movq %fs:0, %rax
3545 addq foo@gottpoff(%rip), %rax
3546 For 32bit, change
3547 leaq foo@tlsgd(%rip), %rdi
3548 .word 0x6666; rex64; call __tls_get_addr@PLT
3549 or
3550 leaq foo@tlsgd(%rip), %rdi
3551 .byte 0x66; rex64;
3552 call *__tls_get_addr@GOTPCREL(%rip)
3553 which may be converted to
3554 addr32 call __tls_get_addr
3555 into:
3556 movl %fs:0, %eax
3557 addq foo@gottpoff(%rip), %rax
3558 For largepic, change:
3559 leaq foo@tlsgd(%rip), %rdi
3560 movabsq $__tls_get_addr@pltoff, %rax
3561 addq %r15, %rax
3562 call *%rax
3563 into:
3564 movq %fs:0, %rax
3565 addq foo@gottpoff(%rax), %rax
3566 nopw 0x0(%rax,%rax,1) */
3567 int largepic = 0;
3568 if (ABI_64_P (output_bfd))
3569 {
3570 if (contents[roff + 5] == 0xb8)
3571 {
3572 memcpy (contents + roff - 3,
3573 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3574 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3575 largepic = 1;
3576 }
3577 else
3578 memcpy (contents + roff - 4,
3579 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3580 16);
3581 }
3582 else
3583 memcpy (contents + roff - 3,
3584 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3585 15);
3586
3587 relocation = (htab->elf.sgot->output_section->vma
3588 + htab->elf.sgot->output_offset + off
3589 - roff
3590 - largepic
3591 - input_section->output_section->vma
3592 - input_section->output_offset
3593 - 12);
3594 bfd_put_32 (output_bfd, relocation,
3595 contents + roff + 8 + largepic);
3596 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3597 rel++;
3598 wrel++;
3599 continue;
3600 }
3601 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3602 {
3603 /* GDesc -> IE transition.
3604 It's originally something like:
3605 leaq x@tlsdesc(%rip), %rax
3606
3607 Change it to:
3608 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3609
3610 /* Now modify the instruction as appropriate. To
3611 turn a leaq into a movq in the form we use it, it
3612 suffices to change the second byte from 0x8d to
3613 0x8b. */
3614 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3615
3616 bfd_put_32 (output_bfd,
3617 htab->elf.sgot->output_section->vma
3618 + htab->elf.sgot->output_offset + off
3619 - rel->r_offset
3620 - input_section->output_section->vma
3621 - input_section->output_offset
3622 - 4,
3623 contents + roff);
3624 continue;
3625 }
3626 else if (r_type == R_X86_64_TLSDESC_CALL)
3627 {
3628 /* GDesc -> IE transition.
3629 It's originally:
3630 call *(%rax)
3631
3632 Change it to:
3633 xchg %ax, %ax. */
3634
3635 bfd_put_8 (output_bfd, 0x66, contents + roff);
3636 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3637 continue;
3638 }
3639 else
3640 BFD_ASSERT (FALSE);
3641 }
3642 break;
3643
3644 case R_X86_64_TLSLD:
3645 if (! elf_x86_64_tls_transition (info, input_bfd,
3646 input_section, contents,
3647 symtab_hdr, sym_hashes,
3648 &r_type, GOT_UNKNOWN, rel,
3649 relend, h, r_symndx, TRUE))
3650 return FALSE;
3651
3652 if (r_type != R_X86_64_TLSLD)
3653 {
3654 /* LD->LE transition:
3655 leaq foo@tlsld(%rip), %rdi
3656 call __tls_get_addr@PLT
3657 For 64bit, we change it into:
3658 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3659 For 32bit, we change it into:
3660 nopl 0x0(%rax); movl %fs:0, %eax
3661 Or
3662 leaq foo@tlsld(%rip), %rdi;
3663 call *__tls_get_addr@GOTPCREL(%rip)
3664 which may be converted to
3665 addr32 call __tls_get_addr
3666 For 64bit, we change it into:
3667 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3668 For 32bit, we change it into:
3669 nopw 0x0(%rax); movl %fs:0, %eax
3670 For largepic, change:
3671 leaq foo@tlsgd(%rip), %rdi
3672 movabsq $__tls_get_addr@pltoff, %rax
3673 addq %rbx, %rax
3674 call *%rax
3675 into
3676 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3677 movq %fs:0, %eax */
3678
3679 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3680 if (ABI_64_P (output_bfd))
3681 {
3682 if (contents[rel->r_offset + 5] == 0xb8)
3683 memcpy (contents + rel->r_offset - 3,
3684 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3685 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3686 else if (contents[rel->r_offset + 4] == 0xff
3687 || contents[rel->r_offset + 4] == 0x67)
3688 memcpy (contents + rel->r_offset - 3,
3689 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3690 13);
3691 else
3692 memcpy (contents + rel->r_offset - 3,
3693 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3694 }
3695 else
3696 {
3697 if (contents[rel->r_offset + 4] == 0xff)
3698 memcpy (contents + rel->r_offset - 3,
3699 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3700 13);
3701 else
3702 memcpy (contents + rel->r_offset - 3,
3703 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3704 }
3705 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3706 and R_X86_64_PLTOFF64. */
3707 rel++;
3708 wrel++;
3709 continue;
3710 }
3711
3712 if (htab->elf.sgot == NULL)
3713 abort ();
3714
3715 off = htab->tls_ld_or_ldm_got.offset;
3716 if (off & 1)
3717 off &= ~1;
3718 else
3719 {
3720 Elf_Internal_Rela outrel;
3721
3722 if (htab->elf.srelgot == NULL)
3723 abort ();
3724
3725 outrel.r_offset = (htab->elf.sgot->output_section->vma
3726 + htab->elf.sgot->output_offset + off);
3727
3728 bfd_put_64 (output_bfd, 0,
3729 htab->elf.sgot->contents + off);
3730 bfd_put_64 (output_bfd, 0,
3731 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3732 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3733 outrel.r_addend = 0;
3734 elf_append_rela (output_bfd, htab->elf.srelgot,
3735 &outrel);
3736 htab->tls_ld_or_ldm_got.offset |= 1;
3737 }
3738 relocation = htab->elf.sgot->output_section->vma
3739 + htab->elf.sgot->output_offset + off;
3740 unresolved_reloc = FALSE;
3741 break;
3742
3743 case R_X86_64_DTPOFF32:
3744 if (!bfd_link_executable (info)
3745 || (input_section->flags & SEC_CODE) == 0)
3746 relocation -= _bfd_x86_elf_dtpoff_base (info);
3747 else
3748 relocation = elf_x86_64_tpoff (info, relocation);
3749 break;
3750
3751 case R_X86_64_TPOFF32:
3752 case R_X86_64_TPOFF64:
3753 BFD_ASSERT (bfd_link_executable (info));
3754 relocation = elf_x86_64_tpoff (info, relocation);
3755 break;
3756
3757 case R_X86_64_DTPOFF64:
3758 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3759 relocation -= _bfd_x86_elf_dtpoff_base (info);
3760 break;
3761
3762 default:
3763 break;
3764 }
3765
3766 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3767 because such sections are not SEC_ALLOC and thus ld.so will
3768 not process them. */
3769 if (unresolved_reloc
3770 && !((input_section->flags & SEC_DEBUGGING) != 0
3771 && h->def_dynamic)
3772 && _bfd_elf_section_offset (output_bfd, info, input_section,
3773 rel->r_offset) != (bfd_vma) -1)
3774 {
3775 switch (r_type)
3776 {
3777 case R_X86_64_32S:
3778 sec = h->root.u.def.section;
3779 if ((info->nocopyreloc
3780 || (eh->def_protected
3781 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3782 && !(h->root.u.def.section->flags & SEC_CODE))
3783 return elf_x86_64_need_pic (info, input_bfd, input_section,
3784 h, NULL, NULL, howto);
3785 /* Fall through. */
3786
3787 default:
3788 _bfd_error_handler
3789 /* xgettext:c-format */
3790 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3791 input_bfd,
3792 input_section,
3793 rel->r_offset,
3794 howto->name,
3795 h->root.root.string);
3796 return FALSE;
3797 }
3798 }
3799
3800 do_relocation:
3801 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3802 contents, rel->r_offset,
3803 relocation, rel->r_addend);
3804
3805 check_relocation_error:
3806 if (r != bfd_reloc_ok)
3807 {
3808 const char *name;
3809
3810 if (h != NULL)
3811 name = h->root.root.string;
3812 else
3813 {
3814 name = bfd_elf_string_from_elf_section (input_bfd,
3815 symtab_hdr->sh_link,
3816 sym->st_name);
3817 if (name == NULL)
3818 return FALSE;
3819 if (*name == '\0')
3820 name = bfd_section_name (input_bfd, sec);
3821 }
3822
3823 if (r == bfd_reloc_overflow)
3824 {
3825 if (converted_reloc)
3826 {
3827 info->callbacks->einfo
3828 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3829 return FALSE;
3830 }
3831 (*info->callbacks->reloc_overflow)
3832 (info, (h ? &h->root : NULL), name, howto->name,
3833 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3834 }
3835 else
3836 {
3837 _bfd_error_handler
3838 /* xgettext:c-format */
3839 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3840 input_bfd, input_section,
3841 rel->r_offset, name, (int) r);
3842 return FALSE;
3843 }
3844 }
3845
3846 if (wrel != rel)
3847 *wrel = *rel;
3848 }
3849
3850 if (wrel != rel)
3851 {
3852 Elf_Internal_Shdr *rel_hdr;
3853 size_t deleted = rel - wrel;
3854
3855 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3856 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3857 if (rel_hdr->sh_size == 0)
3858 {
3859 /* It is too late to remove an empty reloc section. Leave
3860 one NONE reloc.
3861 ??? What is wrong with an empty section??? */
3862 rel_hdr->sh_size = rel_hdr->sh_entsize;
3863 deleted -= 1;
3864 }
3865 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3866 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3867 input_section->reloc_count -= deleted;
3868 }
3869
3870 return TRUE;
3871 }
3872
3873 /* Finish up dynamic symbol handling. We set the contents of various
3874 dynamic sections here. */
3875
3876 static bfd_boolean
3877 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3878 struct bfd_link_info *info,
3879 struct elf_link_hash_entry *h,
3880 Elf_Internal_Sym *sym)
3881 {
3882 struct elf_x86_link_hash_table *htab;
3883 bfd_boolean use_plt_second;
3884 struct elf_x86_link_hash_entry *eh;
3885 bfd_boolean local_undefweak;
3886
3887 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3888 if (htab == NULL)
3889 return FALSE;
3890
3891 /* Use the second PLT section only if there is .plt section. */
3892 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3893
3894 eh = (struct elf_x86_link_hash_entry *) h;
3895 if (eh->no_finish_dynamic_symbol)
3896 abort ();
3897
3898 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3899 resolved undefined weak symbols in executable so that their
3900 references have value 0 at run-time. */
3901 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3902
3903 if (h->plt.offset != (bfd_vma) -1)
3904 {
3905 bfd_vma plt_index;
3906 bfd_vma got_offset, plt_offset;
3907 Elf_Internal_Rela rela;
3908 bfd_byte *loc;
3909 asection *plt, *gotplt, *relplt, *resolved_plt;
3910 const struct elf_backend_data *bed;
3911 bfd_vma plt_got_pcrel_offset;
3912
3913 /* When building a static executable, use .iplt, .igot.plt and
3914 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3915 if (htab->elf.splt != NULL)
3916 {
3917 plt = htab->elf.splt;
3918 gotplt = htab->elf.sgotplt;
3919 relplt = htab->elf.srelplt;
3920 }
3921 else
3922 {
3923 plt = htab->elf.iplt;
3924 gotplt = htab->elf.igotplt;
3925 relplt = htab->elf.irelplt;
3926 }
3927
3928 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3929
3930 /* Get the index in the procedure linkage table which
3931 corresponds to this symbol. This is the index of this symbol
3932 in all the symbols for which we are making plt entries. The
3933 first entry in the procedure linkage table is reserved.
3934
3935 Get the offset into the .got table of the entry that
3936 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3937 bytes. The first three are reserved for the dynamic linker.
3938
3939 For static executables, we don't reserve anything. */
3940
3941 if (plt == htab->elf.splt)
3942 {
3943 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3944 - htab->plt.has_plt0);
3945 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3946 }
3947 else
3948 {
3949 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3950 got_offset = got_offset * GOT_ENTRY_SIZE;
3951 }
3952
3953 /* Fill in the entry in the procedure linkage table. */
3954 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3955 htab->plt.plt_entry_size);
3956 if (use_plt_second)
3957 {
3958 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3959 htab->non_lazy_plt->plt_entry,
3960 htab->non_lazy_plt->plt_entry_size);
3961
3962 resolved_plt = htab->plt_second;
3963 plt_offset = eh->plt_second.offset;
3964 }
3965 else
3966 {
3967 resolved_plt = plt;
3968 plt_offset = h->plt.offset;
3969 }
3970
3971 /* Insert the relocation positions of the plt section. */
3972
3973 /* Put offset the PC-relative instruction referring to the GOT entry,
3974 subtracting the size of that instruction. */
3975 plt_got_pcrel_offset = (gotplt->output_section->vma
3976 + gotplt->output_offset
3977 + got_offset
3978 - resolved_plt->output_section->vma
3979 - resolved_plt->output_offset
3980 - plt_offset
3981 - htab->plt.plt_got_insn_size);
3982
3983 /* Check PC-relative offset overflow in PLT entry. */
3984 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3985 /* xgettext:c-format */
3986 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
3987 output_bfd, h->root.root.string);
3988
3989 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3990 (resolved_plt->contents + plt_offset
3991 + htab->plt.plt_got_offset));
3992
3993 /* Fill in the entry in the global offset table, initially this
3994 points to the second part of the PLT entry. Leave the entry
3995 as zero for undefined weak symbol in PIE. No PLT relocation
3996 against undefined weak symbol in PIE. */
3997 if (!local_undefweak)
3998 {
3999 if (htab->plt.has_plt0)
4000 bfd_put_64 (output_bfd, (plt->output_section->vma
4001 + plt->output_offset
4002 + h->plt.offset
4003 + htab->lazy_plt->plt_lazy_offset),
4004 gotplt->contents + got_offset);
4005
4006 /* Fill in the entry in the .rela.plt section. */
4007 rela.r_offset = (gotplt->output_section->vma
4008 + gotplt->output_offset
4009 + got_offset);
4010 if (PLT_LOCAL_IFUNC_P (info, h))
4011 {
4012 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4013 h->root.root.string,
4014 h->root.u.def.section->owner);
4015
4016 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4017 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4018 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4019 rela.r_addend = (h->root.u.def.value
4020 + h->root.u.def.section->output_section->vma
4021 + h->root.u.def.section->output_offset);
4022 /* R_X86_64_IRELATIVE comes last. */
4023 plt_index = htab->next_irelative_index--;
4024 }
4025 else
4026 {
4027 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4028 rela.r_addend = 0;
4029 plt_index = htab->next_jump_slot_index++;
4030 }
4031
4032 /* Don't fill the second and third slots in PLT entry for
4033 static executables nor without PLT0. */
4034 if (plt == htab->elf.splt && htab->plt.has_plt0)
4035 {
4036 bfd_vma plt0_offset
4037 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4038
4039 /* Put relocation index. */
4040 bfd_put_32 (output_bfd, plt_index,
4041 (plt->contents + h->plt.offset
4042 + htab->lazy_plt->plt_reloc_offset));
4043
4044 /* Put offset for jmp .PLT0 and check for overflow. We don't
4045 check relocation index for overflow since branch displacement
4046 will overflow first. */
4047 if (plt0_offset > 0x80000000)
4048 /* xgettext:c-format */
4049 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4050 output_bfd, h->root.root.string);
4051 bfd_put_32 (output_bfd, - plt0_offset,
4052 (plt->contents + h->plt.offset
4053 + htab->lazy_plt->plt_plt_offset));
4054 }
4055
4056 bed = get_elf_backend_data (output_bfd);
4057 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4058 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4059 }
4060 }
4061 else if (eh->plt_got.offset != (bfd_vma) -1)
4062 {
4063 bfd_vma got_offset, plt_offset;
4064 asection *plt, *got;
4065 bfd_boolean got_after_plt;
4066 int32_t got_pcrel_offset;
4067
4068 /* Set the entry in the GOT procedure linkage table. */
4069 plt = htab->plt_got;
4070 got = htab->elf.sgot;
4071 got_offset = h->got.offset;
4072
4073 if (got_offset == (bfd_vma) -1
4074 || (h->type == STT_GNU_IFUNC && h->def_regular)
4075 || plt == NULL
4076 || got == NULL)
4077 abort ();
4078
4079 /* Use the non-lazy PLT entry template for the GOT PLT since they
4080 are the identical. */
4081 /* Fill in the entry in the GOT procedure linkage table. */
4082 plt_offset = eh->plt_got.offset;
4083 memcpy (plt->contents + plt_offset,
4084 htab->non_lazy_plt->plt_entry,
4085 htab->non_lazy_plt->plt_entry_size);
4086
4087 /* Put offset the PC-relative instruction referring to the GOT
4088 entry, subtracting the size of that instruction. */
4089 got_pcrel_offset = (got->output_section->vma
4090 + got->output_offset
4091 + got_offset
4092 - plt->output_section->vma
4093 - plt->output_offset
4094 - plt_offset
4095 - htab->non_lazy_plt->plt_got_insn_size);
4096
4097 /* Check PC-relative offset overflow in GOT PLT entry. */
4098 got_after_plt = got->output_section->vma > plt->output_section->vma;
4099 if ((got_after_plt && got_pcrel_offset < 0)
4100 || (!got_after_plt && got_pcrel_offset > 0))
4101 /* xgettext:c-format */
4102 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4103 output_bfd, h->root.root.string);
4104
4105 bfd_put_32 (output_bfd, got_pcrel_offset,
4106 (plt->contents + plt_offset
4107 + htab->non_lazy_plt->plt_got_offset));
4108 }
4109
4110 if (!local_undefweak
4111 && !h->def_regular
4112 && (h->plt.offset != (bfd_vma) -1
4113 || eh->plt_got.offset != (bfd_vma) -1))
4114 {
4115 /* Mark the symbol as undefined, rather than as defined in
4116 the .plt section. Leave the value if there were any
4117 relocations where pointer equality matters (this is a clue
4118 for the dynamic linker, to make function pointer
4119 comparisons work between an application and shared
4120 library), otherwise set it to zero. If a function is only
4121 called from a binary, there is no need to slow down
4122 shared libraries because of that. */
4123 sym->st_shndx = SHN_UNDEF;
4124 if (!h->pointer_equality_needed)
4125 sym->st_value = 0;
4126 }
4127
4128 /* Don't generate dynamic GOT relocation against undefined weak
4129 symbol in executable. */
4130 if (h->got.offset != (bfd_vma) -1
4131 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4132 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4133 && !local_undefweak)
4134 {
4135 Elf_Internal_Rela rela;
4136 asection *relgot = htab->elf.srelgot;
4137
4138 /* This symbol has an entry in the global offset table. Set it
4139 up. */
4140 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4141 abort ();
4142
4143 rela.r_offset = (htab->elf.sgot->output_section->vma
4144 + htab->elf.sgot->output_offset
4145 + (h->got.offset &~ (bfd_vma) 1));
4146
4147 /* If this is a static link, or it is a -Bsymbolic link and the
4148 symbol is defined locally or was forced to be local because
4149 of a version file, we just want to emit a RELATIVE reloc.
4150 The entry in the global offset table will already have been
4151 initialized in the relocate_section function. */
4152 if (h->def_regular
4153 && h->type == STT_GNU_IFUNC)
4154 {
4155 if (h->plt.offset == (bfd_vma) -1)
4156 {
4157 /* STT_GNU_IFUNC is referenced without PLT. */
4158 if (htab->elf.splt == NULL)
4159 {
4160 /* use .rel[a].iplt section to store .got relocations
4161 in static executable. */
4162 relgot = htab->elf.irelplt;
4163 }
4164 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4165 {
4166 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4167 h->root.root.string,
4168 h->root.u.def.section->owner);
4169
4170 rela.r_info = htab->r_info (0,
4171 R_X86_64_IRELATIVE);
4172 rela.r_addend = (h->root.u.def.value
4173 + h->root.u.def.section->output_section->vma
4174 + h->root.u.def.section->output_offset);
4175 }
4176 else
4177 goto do_glob_dat;
4178 }
4179 else if (bfd_link_pic (info))
4180 {
4181 /* Generate R_X86_64_GLOB_DAT. */
4182 goto do_glob_dat;
4183 }
4184 else
4185 {
4186 asection *plt;
4187 bfd_vma plt_offset;
4188
4189 if (!h->pointer_equality_needed)
4190 abort ();
4191
4192 /* For non-shared object, we can't use .got.plt, which
4193 contains the real function addres if we need pointer
4194 equality. We load the GOT entry with the PLT entry. */
4195 if (htab->plt_second != NULL)
4196 {
4197 plt = htab->plt_second;
4198 plt_offset = eh->plt_second.offset;
4199 }
4200 else
4201 {
4202 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4203 plt_offset = h->plt.offset;
4204 }
4205 bfd_put_64 (output_bfd, (plt->output_section->vma
4206 + plt->output_offset
4207 + plt_offset),
4208 htab->elf.sgot->contents + h->got.offset);
4209 return TRUE;
4210 }
4211 }
4212 else if (bfd_link_pic (info)
4213 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4214 {
4215 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4216 return FALSE;
4217 BFD_ASSERT((h->got.offset & 1) != 0);
4218 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4219 rela.r_addend = (h->root.u.def.value
4220 + h->root.u.def.section->output_section->vma
4221 + h->root.u.def.section->output_offset);
4222 }
4223 else
4224 {
4225 BFD_ASSERT((h->got.offset & 1) == 0);
4226 do_glob_dat:
4227 bfd_put_64 (output_bfd, (bfd_vma) 0,
4228 htab->elf.sgot->contents + h->got.offset);
4229 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4230 rela.r_addend = 0;
4231 }
4232
4233 elf_append_rela (output_bfd, relgot, &rela);
4234 }
4235
4236 if (h->needs_copy)
4237 {
4238 Elf_Internal_Rela rela;
4239 asection *s;
4240
4241 /* This symbol needs a copy reloc. Set it up. */
4242 VERIFY_COPY_RELOC (h, htab)
4243
4244 rela.r_offset = (h->root.u.def.value
4245 + h->root.u.def.section->output_section->vma
4246 + h->root.u.def.section->output_offset);
4247 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4248 rela.r_addend = 0;
4249 if (h->root.u.def.section == htab->elf.sdynrelro)
4250 s = htab->elf.sreldynrelro;
4251 else
4252 s = htab->elf.srelbss;
4253 elf_append_rela (output_bfd, s, &rela);
4254 }
4255
4256 return TRUE;
4257 }
4258
4259 /* Finish up local dynamic symbol handling. We set the contents of
4260 various dynamic sections here. */
4261
4262 static bfd_boolean
4263 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4264 {
4265 struct elf_link_hash_entry *h
4266 = (struct elf_link_hash_entry *) *slot;
4267 struct bfd_link_info *info
4268 = (struct bfd_link_info *) inf;
4269
4270 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4271 info, h, NULL);
4272 }
4273
4274 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4275 here since undefined weak symbol may not be dynamic and may not be
4276 called for elf_x86_64_finish_dynamic_symbol. */
4277
4278 static bfd_boolean
4279 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4280 void *inf)
4281 {
4282 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4283 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4284
4285 if (h->root.type != bfd_link_hash_undefweak
4286 || h->dynindx != -1)
4287 return TRUE;
4288
4289 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4290 info, h, NULL);
4291 }
4292
4293 /* Used to decide how to sort relocs in an optimal manner for the
4294 dynamic linker, before writing them out. */
4295
4296 static enum elf_reloc_type_class
4297 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4298 const asection *rel_sec ATTRIBUTE_UNUSED,
4299 const Elf_Internal_Rela *rela)
4300 {
4301 bfd *abfd = info->output_bfd;
4302 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4303 struct elf_x86_link_hash_table *htab
4304 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4305
4306 if (htab->elf.dynsym != NULL
4307 && htab->elf.dynsym->contents != NULL)
4308 {
4309 /* Check relocation against STT_GNU_IFUNC symbol if there are
4310 dynamic symbols. */
4311 unsigned long r_symndx = htab->r_sym (rela->r_info);
4312 if (r_symndx != STN_UNDEF)
4313 {
4314 Elf_Internal_Sym sym;
4315 if (!bed->s->swap_symbol_in (abfd,
4316 (htab->elf.dynsym->contents
4317 + r_symndx * bed->s->sizeof_sym),
4318 0, &sym))
4319 abort ();
4320
4321 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4322 return reloc_class_ifunc;
4323 }
4324 }
4325
4326 switch ((int) ELF32_R_TYPE (rela->r_info))
4327 {
4328 case R_X86_64_IRELATIVE:
4329 return reloc_class_ifunc;
4330 case R_X86_64_RELATIVE:
4331 case R_X86_64_RELATIVE64:
4332 return reloc_class_relative;
4333 case R_X86_64_JUMP_SLOT:
4334 return reloc_class_plt;
4335 case R_X86_64_COPY:
4336 return reloc_class_copy;
4337 default:
4338 return reloc_class_normal;
4339 }
4340 }
4341
4342 /* Finish up the dynamic sections. */
4343
4344 static bfd_boolean
4345 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4346 struct bfd_link_info *info)
4347 {
4348 struct elf_x86_link_hash_table *htab;
4349
4350 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4351 if (htab == NULL)
4352 return FALSE;
4353
4354 if (! htab->elf.dynamic_sections_created)
4355 return TRUE;
4356
4357 if (htab->elf.splt && htab->elf.splt->size > 0)
4358 {
4359 elf_section_data (htab->elf.splt->output_section)
4360 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4361
4362 if (htab->plt.has_plt0)
4363 {
4364 /* Fill in the special first entry in the procedure linkage
4365 table. */
4366 memcpy (htab->elf.splt->contents,
4367 htab->lazy_plt->plt0_entry,
4368 htab->lazy_plt->plt0_entry_size);
4369 /* Add offset for pushq GOT+8(%rip), since the instruction
4370 uses 6 bytes subtract this value. */
4371 bfd_put_32 (output_bfd,
4372 (htab->elf.sgotplt->output_section->vma
4373 + htab->elf.sgotplt->output_offset
4374 + 8
4375 - htab->elf.splt->output_section->vma
4376 - htab->elf.splt->output_offset
4377 - 6),
4378 (htab->elf.splt->contents
4379 + htab->lazy_plt->plt0_got1_offset));
4380 /* Add offset for the PC-relative instruction accessing
4381 GOT+16, subtracting the offset to the end of that
4382 instruction. */
4383 bfd_put_32 (output_bfd,
4384 (htab->elf.sgotplt->output_section->vma
4385 + htab->elf.sgotplt->output_offset
4386 + 16
4387 - htab->elf.splt->output_section->vma
4388 - htab->elf.splt->output_offset
4389 - htab->lazy_plt->plt0_got2_insn_end),
4390 (htab->elf.splt->contents
4391 + htab->lazy_plt->plt0_got2_offset));
4392 }
4393
4394 if (htab->tlsdesc_plt)
4395 {
4396 bfd_put_64 (output_bfd, (bfd_vma) 0,
4397 htab->elf.sgot->contents + htab->tlsdesc_got);
4398
4399 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4400 htab->lazy_plt->plt0_entry,
4401 htab->lazy_plt->plt0_entry_size);
4402
4403 /* Add offset for pushq GOT+8(%rip), since the
4404 instruction uses 6 bytes subtract this value. */
4405 bfd_put_32 (output_bfd,
4406 (htab->elf.sgotplt->output_section->vma
4407 + htab->elf.sgotplt->output_offset
4408 + 8
4409 - htab->elf.splt->output_section->vma
4410 - htab->elf.splt->output_offset
4411 - htab->tlsdesc_plt
4412 - 6),
4413 (htab->elf.splt->contents
4414 + htab->tlsdesc_plt
4415 + htab->lazy_plt->plt0_got1_offset));
4416 /* Add offset for the PC-relative instruction accessing
4417 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4418 subtracting the offset to the end of that
4419 instruction. */
4420 bfd_put_32 (output_bfd,
4421 (htab->elf.sgot->output_section->vma
4422 + htab->elf.sgot->output_offset
4423 + htab->tlsdesc_got
4424 - htab->elf.splt->output_section->vma
4425 - htab->elf.splt->output_offset
4426 - htab->tlsdesc_plt
4427 - htab->lazy_plt->plt0_got2_insn_end),
4428 (htab->elf.splt->contents
4429 + htab->tlsdesc_plt
4430 + htab->lazy_plt->plt0_got2_offset));
4431 }
4432 }
4433
4434 /* Fill PLT entries for undefined weak symbols in PIE. */
4435 if (bfd_link_pie (info))
4436 bfd_hash_traverse (&info->hash->table,
4437 elf_x86_64_pie_finish_undefweak_symbol,
4438 info);
4439
4440 return TRUE;
4441 }
4442
4443 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4444 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4445 It has to be done before elf_link_sort_relocs is called so that
4446 dynamic relocations are properly sorted. */
4447
4448 static bfd_boolean
4449 elf_x86_64_output_arch_local_syms
4450 (bfd *output_bfd ATTRIBUTE_UNUSED,
4451 struct bfd_link_info *info,
4452 void *flaginfo ATTRIBUTE_UNUSED,
4453 int (*func) (void *, const char *,
4454 Elf_Internal_Sym *,
4455 asection *,
4456 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4457 {
4458 struct elf_x86_link_hash_table *htab
4459 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4460 if (htab == NULL)
4461 return FALSE;
4462
4463 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4464 htab_traverse (htab->loc_hash_table,
4465 elf_x86_64_finish_local_dynamic_symbol,
4466 info);
4467
4468 return TRUE;
4469 }
4470
4471 /* Forward declaration. */
4472 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4473
4474 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4475 dynamic relocations. */
4476
4477 static long
4478 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4479 long symcount ATTRIBUTE_UNUSED,
4480 asymbol **syms ATTRIBUTE_UNUSED,
4481 long dynsymcount,
4482 asymbol **dynsyms,
4483 asymbol **ret)
4484 {
4485 long count, i, n;
4486 int j;
4487 bfd_byte *plt_contents;
4488 long relsize;
4489 const struct elf_x86_lazy_plt_layout *lazy_plt;
4490 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4491 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4492 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4493 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4494 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4495 asection *plt;
4496 enum elf_x86_plt_type plt_type;
4497 struct elf_x86_plt plts[] =
4498 {
4499 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4500 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4501 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4502 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4503 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4504 };
4505
4506 *ret = NULL;
4507
4508 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4509 return 0;
4510
4511 if (dynsymcount <= 0)
4512 return 0;
4513
4514 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4515 if (relsize <= 0)
4516 return -1;
4517
4518 if (get_elf_x86_backend_data (abfd)->target_os == is_normal)
4519 {
4520 lazy_plt = &elf_x86_64_lazy_plt;
4521 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4522 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4523 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4524 if (ABI_64_P (abfd))
4525 {
4526 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4527 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4528 }
4529 else
4530 {
4531 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4532 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4533 }
4534 }
4535 else
4536 {
4537 lazy_plt = &elf_x86_64_nacl_plt;
4538 non_lazy_plt = NULL;
4539 lazy_bnd_plt = NULL;
4540 non_lazy_bnd_plt = NULL;
4541 lazy_ibt_plt = NULL;
4542 non_lazy_ibt_plt = NULL;
4543 }
4544
4545 count = 0;
4546 for (j = 0; plts[j].name != NULL; j++)
4547 {
4548 plt = bfd_get_section_by_name (abfd, plts[j].name);
4549 if (plt == NULL || plt->size == 0)
4550 continue;
4551
4552 /* Get the PLT section contents. */
4553 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4554 if (plt_contents == NULL)
4555 break;
4556 if (!bfd_get_section_contents (abfd, (asection *) plt,
4557 plt_contents, 0, plt->size))
4558 {
4559 free (plt_contents);
4560 break;
4561 }
4562
4563 /* Check what kind of PLT it is. */
4564 plt_type = plt_unknown;
4565 if (plts[j].type == plt_unknown
4566 && (plt->size >= (lazy_plt->plt_entry_size
4567 + lazy_plt->plt_entry_size)))
4568 {
4569 /* Match lazy PLT first. Need to check the first two
4570 instructions. */
4571 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4572 lazy_plt->plt0_got1_offset) == 0)
4573 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4574 2) == 0))
4575 plt_type = plt_lazy;
4576 else if (lazy_bnd_plt != NULL
4577 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4578 lazy_bnd_plt->plt0_got1_offset) == 0)
4579 && (memcmp (plt_contents + 6,
4580 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4581 {
4582 plt_type = plt_lazy | plt_second;
4583 /* The fist entry in the lazy IBT PLT is the same as the
4584 lazy BND PLT. */
4585 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4586 lazy_ibt_plt->plt_entry,
4587 lazy_ibt_plt->plt_got_offset) == 0))
4588 lazy_plt = lazy_ibt_plt;
4589 else
4590 lazy_plt = lazy_bnd_plt;
4591 }
4592 }
4593
4594 if (non_lazy_plt != NULL
4595 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4596 && plt->size >= non_lazy_plt->plt_entry_size)
4597 {
4598 /* Match non-lazy PLT. */
4599 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4600 non_lazy_plt->plt_got_offset) == 0)
4601 plt_type = plt_non_lazy;
4602 }
4603
4604 if (plt_type == plt_unknown || plt_type == plt_second)
4605 {
4606 if (non_lazy_bnd_plt != NULL
4607 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4608 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4609 non_lazy_bnd_plt->plt_got_offset) == 0))
4610 {
4611 /* Match BND PLT. */
4612 plt_type = plt_second;
4613 non_lazy_plt = non_lazy_bnd_plt;
4614 }
4615 else if (non_lazy_ibt_plt != NULL
4616 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4617 && (memcmp (plt_contents,
4618 non_lazy_ibt_plt->plt_entry,
4619 non_lazy_ibt_plt->plt_got_offset) == 0))
4620 {
4621 /* Match IBT PLT. */
4622 plt_type = plt_second;
4623 non_lazy_plt = non_lazy_ibt_plt;
4624 }
4625 }
4626
4627 if (plt_type == plt_unknown)
4628 {
4629 free (plt_contents);
4630 continue;
4631 }
4632
4633 plts[j].sec = plt;
4634 plts[j].type = plt_type;
4635
4636 if ((plt_type & plt_lazy))
4637 {
4638 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4639 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4640 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4641 /* Skip PLT0 in lazy PLT. */
4642 i = 1;
4643 }
4644 else
4645 {
4646 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4647 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4648 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4649 i = 0;
4650 }
4651
4652 /* Skip lazy PLT when the second PLT is used. */
4653 if (plt_type == (plt_lazy | plt_second))
4654 plts[j].count = 0;
4655 else
4656 {
4657 n = plt->size / plts[j].plt_entry_size;
4658 plts[j].count = n;
4659 count += n - i;
4660 }
4661
4662 plts[j].contents = plt_contents;
4663 }
4664
4665 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4666 (bfd_vma) 0, plts, dynsyms,
4667 ret);
4668 }
4669
4670 /* Handle an x86-64 specific section when reading an object file. This
4671 is called when elfcode.h finds a section with an unknown type. */
4672
4673 static bfd_boolean
4674 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4675 const char *name, int shindex)
4676 {
4677 if (hdr->sh_type != SHT_X86_64_UNWIND)
4678 return FALSE;
4679
4680 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4681 return FALSE;
4682
4683 return TRUE;
4684 }
4685
4686 /* Hook called by the linker routine which adds symbols from an object
4687 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4688 of .bss. */
4689
4690 static bfd_boolean
4691 elf_x86_64_add_symbol_hook (bfd *abfd,
4692 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4693 Elf_Internal_Sym *sym,
4694 const char **namep ATTRIBUTE_UNUSED,
4695 flagword *flagsp ATTRIBUTE_UNUSED,
4696 asection **secp,
4697 bfd_vma *valp)
4698 {
4699 asection *lcomm;
4700
4701 switch (sym->st_shndx)
4702 {
4703 case SHN_X86_64_LCOMMON:
4704 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4705 if (lcomm == NULL)
4706 {
4707 lcomm = bfd_make_section_with_flags (abfd,
4708 "LARGE_COMMON",
4709 (SEC_ALLOC
4710 | SEC_IS_COMMON
4711 | SEC_LINKER_CREATED));
4712 if (lcomm == NULL)
4713 return FALSE;
4714 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4715 }
4716 *secp = lcomm;
4717 *valp = sym->st_size;
4718 return TRUE;
4719 }
4720
4721 return TRUE;
4722 }
4723
4724
4725 /* Given a BFD section, try to locate the corresponding ELF section
4726 index. */
4727
4728 static bfd_boolean
4729 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4730 asection *sec, int *index_return)
4731 {
4732 if (sec == &_bfd_elf_large_com_section)
4733 {
4734 *index_return = SHN_X86_64_LCOMMON;
4735 return TRUE;
4736 }
4737 return FALSE;
4738 }
4739
4740 /* Process a symbol. */
4741
4742 static void
4743 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4744 asymbol *asym)
4745 {
4746 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4747
4748 switch (elfsym->internal_elf_sym.st_shndx)
4749 {
4750 case SHN_X86_64_LCOMMON:
4751 asym->section = &_bfd_elf_large_com_section;
4752 asym->value = elfsym->internal_elf_sym.st_size;
4753 /* Common symbol doesn't set BSF_GLOBAL. */
4754 asym->flags &= ~BSF_GLOBAL;
4755 break;
4756 }
4757 }
4758
4759 static bfd_boolean
4760 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4761 {
4762 return (sym->st_shndx == SHN_COMMON
4763 || sym->st_shndx == SHN_X86_64_LCOMMON);
4764 }
4765
4766 static unsigned int
4767 elf_x86_64_common_section_index (asection *sec)
4768 {
4769 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4770 return SHN_COMMON;
4771 else
4772 return SHN_X86_64_LCOMMON;
4773 }
4774
4775 static asection *
4776 elf_x86_64_common_section (asection *sec)
4777 {
4778 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4779 return bfd_com_section_ptr;
4780 else
4781 return &_bfd_elf_large_com_section;
4782 }
4783
4784 static bfd_boolean
4785 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4786 const Elf_Internal_Sym *sym,
4787 asection **psec,
4788 bfd_boolean newdef,
4789 bfd_boolean olddef,
4790 bfd *oldbfd,
4791 const asection *oldsec)
4792 {
4793 /* A normal common symbol and a large common symbol result in a
4794 normal common symbol. We turn the large common symbol into a
4795 normal one. */
4796 if (!olddef
4797 && h->root.type == bfd_link_hash_common
4798 && !newdef
4799 && bfd_is_com_section (*psec)
4800 && oldsec != *psec)
4801 {
4802 if (sym->st_shndx == SHN_COMMON
4803 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4804 {
4805 h->root.u.c.p->section
4806 = bfd_make_section_old_way (oldbfd, "COMMON");
4807 h->root.u.c.p->section->flags = SEC_ALLOC;
4808 }
4809 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4810 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4811 *psec = bfd_com_section_ptr;
4812 }
4813
4814 return TRUE;
4815 }
4816
4817 static int
4818 elf_x86_64_additional_program_headers (bfd *abfd,
4819 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4820 {
4821 asection *s;
4822 int count = 0;
4823
4824 /* Check to see if we need a large readonly segment. */
4825 s = bfd_get_section_by_name (abfd, ".lrodata");
4826 if (s && (s->flags & SEC_LOAD))
4827 count++;
4828
4829 /* Check to see if we need a large data segment. Since .lbss sections
4830 is placed right after the .bss section, there should be no need for
4831 a large data segment just because of .lbss. */
4832 s = bfd_get_section_by_name (abfd, ".ldata");
4833 if (s && (s->flags & SEC_LOAD))
4834 count++;
4835
4836 return count;
4837 }
4838
4839 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4840
4841 static bfd_boolean
4842 elf_x86_64_relocs_compatible (const bfd_target *input,
4843 const bfd_target *output)
4844 {
4845 return ((xvec_get_elf_backend_data (input)->s->elfclass
4846 == xvec_get_elf_backend_data (output)->s->elfclass)
4847 && _bfd_elf_relocs_compatible (input, output));
4848 }
4849
4850 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4851 with GNU properties if found. Otherwise, return NULL. */
4852
4853 static bfd *
4854 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4855 {
4856 struct elf_x86_init_table init_table;
4857
4858 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4859 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4860 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4861 != (int) R_X86_64_GNU_VTINHERIT)
4862 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4863 != (int) R_X86_64_GNU_VTENTRY))
4864 abort ();
4865
4866 /* This is unused for x86-64. */
4867 init_table.plt0_pad_byte = 0x90;
4868
4869 if (get_elf_x86_backend_data (info->output_bfd)->target_os
4870 == is_normal)
4871 {
4872 if (info->bndplt)
4873 {
4874 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4875 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4876 }
4877 else
4878 {
4879 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4880 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4881 }
4882
4883 if (ABI_64_P (info->output_bfd))
4884 {
4885 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4886 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4887 }
4888 else
4889 {
4890 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4891 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4892 }
4893 }
4894 else
4895 {
4896 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4897 init_table.non_lazy_plt = NULL;
4898 init_table.lazy_ibt_plt = NULL;
4899 init_table.non_lazy_ibt_plt = NULL;
4900 }
4901
4902 if (ABI_64_P (info->output_bfd))
4903 {
4904 init_table.r_info = elf64_r_info;
4905 init_table.r_sym = elf64_r_sym;
4906 }
4907 else
4908 {
4909 init_table.r_info = elf32_r_info;
4910 init_table.r_sym = elf32_r_sym;
4911 }
4912
4913 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4914 }
4915
4916 static const struct bfd_elf_special_section
4917 elf_x86_64_special_sections[]=
4918 {
4919 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4920 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4921 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4922 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4923 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4924 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4925 { NULL, 0, 0, 0, 0 }
4926 };
4927
4928 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4929 #define TARGET_LITTLE_NAME "elf64-x86-64"
4930 #define ELF_ARCH bfd_arch_i386
4931 #define ELF_TARGET_ID X86_64_ELF_DATA
4932 #define ELF_MACHINE_CODE EM_X86_64
4933 #define ELF_MAXPAGESIZE 0x200000
4934 #define ELF_MINPAGESIZE 0x1000
4935 #define ELF_COMMONPAGESIZE 0x1000
4936
4937 #define elf_backend_can_gc_sections 1
4938 #define elf_backend_can_refcount 1
4939 #define elf_backend_want_got_plt 1
4940 #define elf_backend_plt_readonly 1
4941 #define elf_backend_want_plt_sym 0
4942 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4943 #define elf_backend_rela_normal 1
4944 #define elf_backend_plt_alignment 4
4945 #define elf_backend_extern_protected_data 1
4946 #define elf_backend_caches_rawsize 1
4947 #define elf_backend_dtrel_excludes_plt 1
4948 #define elf_backend_want_dynrelro 1
4949
4950 #define elf_info_to_howto elf_x86_64_info_to_howto
4951
4952 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4953 #define bfd_elf64_bfd_reloc_name_lookup \
4954 elf_x86_64_reloc_name_lookup
4955
4956 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4957 #define elf_backend_check_relocs elf_x86_64_check_relocs
4958 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4959 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4960 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4961 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4962 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4963 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4964 #ifdef CORE_HEADER
4965 #define elf_backend_write_core_note elf_x86_64_write_core_note
4966 #endif
4967 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4968 #define elf_backend_relocate_section elf_x86_64_relocate_section
4969 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4970 #define elf_backend_object_p elf64_x86_64_elf_object_p
4971 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4972
4973 #define elf_backend_section_from_shdr \
4974 elf_x86_64_section_from_shdr
4975
4976 #define elf_backend_section_from_bfd_section \
4977 elf_x86_64_elf_section_from_bfd_section
4978 #define elf_backend_add_symbol_hook \
4979 elf_x86_64_add_symbol_hook
4980 #define elf_backend_symbol_processing \
4981 elf_x86_64_symbol_processing
4982 #define elf_backend_common_section_index \
4983 elf_x86_64_common_section_index
4984 #define elf_backend_common_section \
4985 elf_x86_64_common_section
4986 #define elf_backend_common_definition \
4987 elf_x86_64_common_definition
4988 #define elf_backend_merge_symbol \
4989 elf_x86_64_merge_symbol
4990 #define elf_backend_special_sections \
4991 elf_x86_64_special_sections
4992 #define elf_backend_additional_program_headers \
4993 elf_x86_64_additional_program_headers
4994 #define elf_backend_setup_gnu_properties \
4995 elf_x86_64_link_setup_gnu_properties
4996 #define elf_backend_hide_symbol \
4997 _bfd_x86_elf_hide_symbol
4998
4999 #include "elf64-target.h"
5000
5001 /* CloudABI support. */
5002
5003 #undef TARGET_LITTLE_SYM
5004 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5005 #undef TARGET_LITTLE_NAME
5006 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5007
5008 #undef ELF_OSABI
5009 #define ELF_OSABI ELFOSABI_CLOUDABI
5010
5011 #undef elf64_bed
5012 #define elf64_bed elf64_x86_64_cloudabi_bed
5013
5014 #include "elf64-target.h"
5015
5016 /* FreeBSD support. */
5017
5018 #undef TARGET_LITTLE_SYM
5019 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5020 #undef TARGET_LITTLE_NAME
5021 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5022
5023 #undef ELF_OSABI
5024 #define ELF_OSABI ELFOSABI_FREEBSD
5025
5026 #undef elf64_bed
5027 #define elf64_bed elf64_x86_64_fbsd_bed
5028
5029 #include "elf64-target.h"
5030
5031 /* Solaris 2 support. */
5032
5033 #undef TARGET_LITTLE_SYM
5034 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5035 #undef TARGET_LITTLE_NAME
5036 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5037
5038 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5039 objects won't be recognized. */
5040 #undef ELF_OSABI
5041
5042 #undef elf64_bed
5043 #define elf64_bed elf64_x86_64_sol2_bed
5044
5045 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5046 boundary. */
5047 #undef elf_backend_static_tls_alignment
5048 #define elf_backend_static_tls_alignment 16
5049
5050 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5051
5052 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5053 File, p.63. */
5054 #undef elf_backend_want_plt_sym
5055 #define elf_backend_want_plt_sym 1
5056
5057 #undef elf_backend_strtab_flags
5058 #define elf_backend_strtab_flags SHF_STRINGS
5059
5060 static bfd_boolean
5061 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5062 bfd *obfd ATTRIBUTE_UNUSED,
5063 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5064 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5065 {
5066 /* PR 19938: FIXME: Need to add code for setting the sh_info
5067 and sh_link fields of Solaris specific section types. */
5068 return FALSE;
5069 }
5070
5071 #undef elf_backend_copy_special_section_fields
5072 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5073
5074 #include "elf64-target.h"
5075
5076 /* Native Client support. */
5077
5078 static bfd_boolean
5079 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5080 {
5081 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5082 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5083 return TRUE;
5084 }
5085
5086 #undef TARGET_LITTLE_SYM
5087 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5088 #undef TARGET_LITTLE_NAME
5089 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5090 #undef elf64_bed
5091 #define elf64_bed elf64_x86_64_nacl_bed
5092
5093 #undef ELF_MAXPAGESIZE
5094 #undef ELF_MINPAGESIZE
5095 #undef ELF_COMMONPAGESIZE
5096 #define ELF_MAXPAGESIZE 0x10000
5097 #define ELF_MINPAGESIZE 0x10000
5098 #define ELF_COMMONPAGESIZE 0x10000
5099
5100 /* Restore defaults. */
5101 #undef ELF_OSABI
5102 #undef elf_backend_static_tls_alignment
5103 #undef elf_backend_want_plt_sym
5104 #define elf_backend_want_plt_sym 0
5105 #undef elf_backend_strtab_flags
5106 #undef elf_backend_copy_special_section_fields
5107
5108 /* NaCl uses substantially different PLT entries for the same effects. */
5109
5110 #undef elf_backend_plt_alignment
5111 #define elf_backend_plt_alignment 5
5112 #define NACL_PLT_ENTRY_SIZE 64
5113 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5114
5115 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5116 {
5117 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5118 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5119 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5120 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5121 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5122
5123 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5124 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5125
5126 /* 32 bytes of nop to pad out to the standard size. */
5127 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5128 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5129 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5130 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5131 0x66, /* excess data16 prefix */
5132 0x90 /* nop */
5133 };
5134
5135 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5136 {
5137 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5138 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5139 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5140 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5141
5142 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5143 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5144 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5145
5146 /* Lazy GOT entries point here (32-byte aligned). */
5147 0x68, /* pushq immediate */
5148 0, 0, 0, 0, /* replaced with index into relocation table. */
5149 0xe9, /* jmp relative */
5150 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5151
5152 /* 22 bytes of nop to pad out to the standard size. */
5153 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5154 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5155 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5156 };
5157
5158 /* .eh_frame covering the .plt section. */
5159
5160 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5161 {
5162 #if (PLT_CIE_LENGTH != 20 \
5163 || PLT_FDE_LENGTH != 36 \
5164 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5165 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5166 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5167 #endif
5168 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5169 0, 0, 0, 0, /* CIE ID */
5170 1, /* CIE version */
5171 'z', 'R', 0, /* Augmentation string */
5172 1, /* Code alignment factor */
5173 0x78, /* Data alignment factor */
5174 16, /* Return address column */
5175 1, /* Augmentation size */
5176 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5177 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5178 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5179 DW_CFA_nop, DW_CFA_nop,
5180
5181 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5182 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5183 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5184 0, 0, 0, 0, /* .plt size goes here */
5185 0, /* Augmentation size */
5186 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5187 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5188 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5189 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5190 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5191 13, /* Block length */
5192 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5193 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5194 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5195 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5196 DW_CFA_nop, DW_CFA_nop
5197 };
5198
5199 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5200 {
5201 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5202 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5203 elf_x86_64_nacl_plt_entry, /* plt_entry */
5204 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5205 2, /* plt0_got1_offset */
5206 9, /* plt0_got2_offset */
5207 13, /* plt0_got2_insn_end */
5208 3, /* plt_got_offset */
5209 33, /* plt_reloc_offset */
5210 38, /* plt_plt_offset */
5211 7, /* plt_got_insn_size */
5212 42, /* plt_plt_insn_end */
5213 32, /* plt_lazy_offset */
5214 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5215 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5216 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5217 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5218 };
5219
5220 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5221 {
5222 is_nacl /* os */
5223 };
5224
5225 #undef elf_backend_arch_data
5226 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5227
5228 #undef elf_backend_object_p
5229 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5230 #undef elf_backend_modify_segment_map
5231 #define elf_backend_modify_segment_map nacl_modify_segment_map
5232 #undef elf_backend_modify_program_headers
5233 #define elf_backend_modify_program_headers nacl_modify_program_headers
5234 #undef elf_backend_final_write_processing
5235 #define elf_backend_final_write_processing nacl_final_write_processing
5236
5237 #include "elf64-target.h"
5238
5239 /* Native Client x32 support. */
5240
5241 static bfd_boolean
5242 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5243 {
5244 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5245 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5246 return TRUE;
5247 }
5248
5249 #undef TARGET_LITTLE_SYM
5250 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5251 #undef TARGET_LITTLE_NAME
5252 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5253 #undef elf32_bed
5254 #define elf32_bed elf32_x86_64_nacl_bed
5255
5256 #define bfd_elf32_bfd_reloc_type_lookup \
5257 elf_x86_64_reloc_type_lookup
5258 #define bfd_elf32_bfd_reloc_name_lookup \
5259 elf_x86_64_reloc_name_lookup
5260 #define bfd_elf32_get_synthetic_symtab \
5261 elf_x86_64_get_synthetic_symtab
5262
5263 #undef elf_backend_object_p
5264 #define elf_backend_object_p \
5265 elf32_x86_64_nacl_elf_object_p
5266
5267 #undef elf_backend_bfd_from_remote_memory
5268 #define elf_backend_bfd_from_remote_memory \
5269 _bfd_elf32_bfd_from_remote_memory
5270
5271 #undef elf_backend_size_info
5272 #define elf_backend_size_info \
5273 _bfd_elf32_size_info
5274
5275 #include "elf32-target.h"
5276
5277 /* Restore defaults. */
5278 #undef elf_backend_object_p
5279 #define elf_backend_object_p elf64_x86_64_elf_object_p
5280 #undef elf_backend_bfd_from_remote_memory
5281 #undef elf_backend_size_info
5282 #undef elf_backend_modify_segment_map
5283 #undef elf_backend_modify_program_headers
5284 #undef elf_backend_final_write_processing
5285
5286 /* Intel L1OM support. */
5287
5288 static bfd_boolean
5289 elf64_l1om_elf_object_p (bfd *abfd)
5290 {
5291 /* Set the right machine number for an L1OM elf64 file. */
5292 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5293 return TRUE;
5294 }
5295
5296 #undef TARGET_LITTLE_SYM
5297 #define TARGET_LITTLE_SYM l1om_elf64_vec
5298 #undef TARGET_LITTLE_NAME
5299 #define TARGET_LITTLE_NAME "elf64-l1om"
5300 #undef ELF_ARCH
5301 #define ELF_ARCH bfd_arch_l1om
5302
5303 #undef ELF_MACHINE_CODE
5304 #define ELF_MACHINE_CODE EM_L1OM
5305
5306 #undef ELF_OSABI
5307
5308 #undef elf64_bed
5309 #define elf64_bed elf64_l1om_bed
5310
5311 #undef elf_backend_object_p
5312 #define elf_backend_object_p elf64_l1om_elf_object_p
5313
5314 /* Restore defaults. */
5315 #undef ELF_MAXPAGESIZE
5316 #undef ELF_MINPAGESIZE
5317 #undef ELF_COMMONPAGESIZE
5318 #define ELF_MAXPAGESIZE 0x200000
5319 #define ELF_MINPAGESIZE 0x1000
5320 #define ELF_COMMONPAGESIZE 0x1000
5321 #undef elf_backend_plt_alignment
5322 #define elf_backend_plt_alignment 4
5323 #undef elf_backend_arch_data
5324 #define elf_backend_arch_data &elf_x86_64_arch_bed
5325
5326 #include "elf64-target.h"
5327
5328 /* FreeBSD L1OM support. */
5329
5330 #undef TARGET_LITTLE_SYM
5331 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5332 #undef TARGET_LITTLE_NAME
5333 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5334
5335 #undef ELF_OSABI
5336 #define ELF_OSABI ELFOSABI_FREEBSD
5337
5338 #undef elf64_bed
5339 #define elf64_bed elf64_l1om_fbsd_bed
5340
5341 #include "elf64-target.h"
5342
5343 /* Intel K1OM support. */
5344
5345 static bfd_boolean
5346 elf64_k1om_elf_object_p (bfd *abfd)
5347 {
5348 /* Set the right machine number for an K1OM elf64 file. */
5349 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5350 return TRUE;
5351 }
5352
5353 #undef TARGET_LITTLE_SYM
5354 #define TARGET_LITTLE_SYM k1om_elf64_vec
5355 #undef TARGET_LITTLE_NAME
5356 #define TARGET_LITTLE_NAME "elf64-k1om"
5357 #undef ELF_ARCH
5358 #define ELF_ARCH bfd_arch_k1om
5359
5360 #undef ELF_MACHINE_CODE
5361 #define ELF_MACHINE_CODE EM_K1OM
5362
5363 #undef ELF_OSABI
5364
5365 #undef elf64_bed
5366 #define elf64_bed elf64_k1om_bed
5367
5368 #undef elf_backend_object_p
5369 #define elf_backend_object_p elf64_k1om_elf_object_p
5370
5371 #undef elf_backend_static_tls_alignment
5372
5373 #undef elf_backend_want_plt_sym
5374 #define elf_backend_want_plt_sym 0
5375
5376 #include "elf64-target.h"
5377
5378 /* FreeBSD K1OM support. */
5379
5380 #undef TARGET_LITTLE_SYM
5381 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5382 #undef TARGET_LITTLE_NAME
5383 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5384
5385 #undef ELF_OSABI
5386 #define ELF_OSABI ELFOSABI_FREEBSD
5387
5388 #undef elf64_bed
5389 #define elf64_bed elf64_k1om_fbsd_bed
5390
5391 #include "elf64-target.h"
5392
5393 /* 32bit x86-64 support. */
5394
5395 #undef TARGET_LITTLE_SYM
5396 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5397 #undef TARGET_LITTLE_NAME
5398 #define TARGET_LITTLE_NAME "elf32-x86-64"
5399 #undef elf32_bed
5400
5401 #undef ELF_ARCH
5402 #define ELF_ARCH bfd_arch_i386
5403
5404 #undef ELF_MACHINE_CODE
5405 #define ELF_MACHINE_CODE EM_X86_64
5406
5407 #undef ELF_OSABI
5408
5409 #undef elf_backend_object_p
5410 #define elf_backend_object_p \
5411 elf32_x86_64_elf_object_p
5412
5413 #undef elf_backend_bfd_from_remote_memory
5414 #define elf_backend_bfd_from_remote_memory \
5415 _bfd_elf32_bfd_from_remote_memory
5416
5417 #undef elf_backend_size_info
5418 #define elf_backend_size_info \
5419 _bfd_elf32_size_info
5420
5421 #include "elf32-target.h"
This page took 0.187251 seconds and 5 git commands to generate.