x86-64: Don't mask out R_X86_64_converted_reloc_bit
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
825 {
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
843 };
844
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
846 {
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
854 };
855
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
857 {
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
878 {
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
886 };
887
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
889 {
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
928 };
929
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
931 {
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
939 };
940
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
942 {
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
953 {
954 is_normal /* os */
955 };
956
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
958
959 static bfd_boolean
960 elf64_x86_64_elf_object_p (bfd *abfd)
961 {
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
964 return TRUE;
965 }
966
967 static bfd_boolean
968 elf32_x86_64_elf_object_p (bfd *abfd)
969 {
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
972 return TRUE;
973 }
974
975 /* Return TRUE if the TLS access code sequence support transition
976 from R_TYPE. */
977
978 static bfd_boolean
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
981 asection *sec,
982 bfd_byte *contents,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
985 unsigned int r_type,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
988 {
989 unsigned int val;
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
993 bfd_vma offset;
994 struct elf_x86_link_hash_table *htab;
995 bfd_byte *call;
996 bfd_boolean indirect_call;
997
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1000 switch (r_type)
1001 {
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1005 return FALSE;
1006
1007 if (r_type == R_X86_64_TLSGD)
1008 {
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1012 or
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .byte 0x66; rex64
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1021 or
1022 leaq foo@tlsgd(%rip), %rdi
1023 .byte 0x66; rex64
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1028 we also support:
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1031 addq $r15, %rax
1032 call *%rax
1033 or
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1036 addq $rbx, %rax
1037 call *%rax */
1038
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1040
1041 if ((offset + 12) > sec->size)
1042 return FALSE;
1043
1044 call = contents + offset + 4;
1045 if (call[0] != 0x66
1046 || !((call[1] == 0x48
1047 && call[2] == 0xff
1048 && call[3] == 0x15)
1049 || (call[1] == 0x48
1050 && call[2] == 0x67
1051 && call[3] == 0xe8)
1052 || (call[1] == 0x66
1053 && call[2] == 0x48
1054 && call[3] == 0xe8)))
1055 {
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1058 || offset < 3
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1061 || call[11] != 0x01
1062 || call[13] != 0xff
1063 || call[14] != 0xd0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1066 return FALSE;
1067 largepic = TRUE;
1068 }
1069 else if (ABI_64_P (abfd))
1070 {
1071 if (offset < 4
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1073 return FALSE;
1074 }
1075 else
1076 {
1077 if (offset < 3
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1079 return FALSE;
1080 }
1081 indirect_call = call[2] == 0xff;
1082 }
1083 else
1084 {
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1088 or
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1094 we also support:
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1097 addq $r15, %rax
1098 call *%rax
1099 or
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1102 addq $rbx, %rax
1103 call *%rax */
1104
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1106
1107 if (offset < 3 || (offset + 9) > sec->size)
1108 return FALSE;
1109
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1111 return FALSE;
1112
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1117 {
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1121 || call[11] != 0x01
1122 || call[13] != 0xff
1123 || call[14] != 0xd0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1126 return FALSE;
1127 largepic = TRUE;
1128 }
1129 indirect_call = call[0] == 0xff;
1130 }
1131
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1134 return FALSE;
1135
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1137 if (h == NULL
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1139 return FALSE;
1140 else
1141 {
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1144 if (largepic)
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1148 else
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1150 }
1151
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1156 */
1157
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1160 {
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1163 {
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1166 return FALSE;
1167 }
1168 }
1169 else
1170 {
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 if (offset < 2 || (offset + 3) > sec->size)
1175 return FALSE;
1176 }
1177
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1180 return FALSE;
1181
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1184
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1188
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1191 going to be rax. */
1192
1193 if (offset < 3 || (offset + 4) > sec->size)
1194 return FALSE;
1195
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1198 return FALSE;
1199
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1205
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1209 */
1210 if (offset + 2 <= sec->size)
1211 {
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1215 }
1216
1217 return FALSE;
1218
1219 default:
1220 abort ();
1221 }
1222 }
1223
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1226
1227 static bfd_boolean
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1238 {
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1242
1243 /* Skip TLS transition for functions. */
1244 if (h != NULL
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1247 return TRUE;
1248
1249 switch (from_type)
1250 {
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1256 {
1257 if (h == NULL)
1258 to_type = R_X86_64_TPOFF32;
1259 else
1260 to_type = R_X86_64_GOTTPOFF;
1261 }
1262
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1266 {
1267 unsigned int new_to_type = to_type;
1268
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1271
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1275 {
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1278 }
1279
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1285 }
1286
1287 break;
1288
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1292 break;
1293
1294 default:
1295 return TRUE;
1296 }
1297
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1300 return TRUE;
1301
1302 /* Check if the transition can be performed. */
1303 if (check
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1307 {
1308 reloc_howto_type *from, *to;
1309 const char *name;
1310
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1313
1314 if (from == NULL || to == NULL)
1315 return FALSE;
1316
1317 if (h)
1318 name = h->root.root.string;
1319 else
1320 {
1321 struct elf_x86_link_hash_table *htab;
1322
1323 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1324 if (htab == NULL)
1325 name = "*unknown*";
1326 else
1327 {
1328 Elf_Internal_Sym *isym;
1329
1330 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1331 abfd, r_symndx);
1332 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1333 }
1334 }
1335
1336 _bfd_error_handler
1337 /* xgettext:c-format */
1338 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1339 " in section `%pA' failed"),
1340 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1341 bfd_set_error (bfd_error_bad_value);
1342 return FALSE;
1343 }
1344
1345 *r_type = to_type;
1346 return TRUE;
1347 }
1348
1349 /* Rename some of the generic section flags to better document how they
1350 are used here. */
1351 #define check_relocs_failed sec_flg0
1352
1353 static bfd_boolean
1354 elf_x86_64_need_pic (struct bfd_link_info *info,
1355 bfd *input_bfd, asection *sec,
1356 struct elf_link_hash_entry *h,
1357 Elf_Internal_Shdr *symtab_hdr,
1358 Elf_Internal_Sym *isym,
1359 reloc_howto_type *howto)
1360 {
1361 const char *v = "";
1362 const char *und = "";
1363 const char *pic = "";
1364 const char *object;
1365
1366 const char *name;
1367 if (h)
1368 {
1369 name = h->root.root.string;
1370 switch (ELF_ST_VISIBILITY (h->other))
1371 {
1372 case STV_HIDDEN:
1373 v = _("hidden symbol ");
1374 break;
1375 case STV_INTERNAL:
1376 v = _("internal symbol ");
1377 break;
1378 case STV_PROTECTED:
1379 v = _("protected symbol ");
1380 break;
1381 default:
1382 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1383 v = _("protected symbol ");
1384 else
1385 v = _("symbol ");
1386 pic = _("; recompile with -fPIC");
1387 break;
1388 }
1389
1390 if (!h->def_regular && !h->def_dynamic)
1391 und = _("undefined ");
1392 }
1393 else
1394 {
1395 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1396 pic = _("; recompile with -fPIC");
1397 }
1398
1399 if (bfd_link_dll (info))
1400 object = _("a shared object");
1401 else if (bfd_link_pie (info))
1402 object = _("a PIE object");
1403 else
1404 object = _("a PDE object");
1405
1406 /* xgettext:c-format */
1407 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1408 "not be used when making %s%s"),
1409 input_bfd, howto->name, und, v, name,
1410 object, pic);
1411 bfd_set_error (bfd_error_bad_value);
1412 sec->check_relocs_failed = 1;
1413 return FALSE;
1414 }
1415
1416 /* With the local symbol, foo, we convert
1417 mov foo@GOTPCREL(%rip), %reg
1418 to
1419 lea foo(%rip), %reg
1420 and convert
1421 call/jmp *foo@GOTPCREL(%rip)
1422 to
1423 nop call foo/jmp foo nop
1424 When PIC is false, convert
1425 test %reg, foo@GOTPCREL(%rip)
1426 to
1427 test $foo, %reg
1428 and convert
1429 binop foo@GOTPCREL(%rip), %reg
1430 to
1431 binop $foo, %reg
1432 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1433 instructions. */
1434
1435 static bfd_boolean
1436 elf_x86_64_convert_load_reloc (bfd *abfd,
1437 bfd_byte *contents,
1438 unsigned int *r_type_p,
1439 Elf_Internal_Rela *irel,
1440 struct elf_link_hash_entry *h,
1441 bfd_boolean *converted,
1442 struct bfd_link_info *link_info)
1443 {
1444 struct elf_x86_link_hash_table *htab;
1445 bfd_boolean is_pic;
1446 bfd_boolean no_overflow;
1447 bfd_boolean relocx;
1448 bfd_boolean to_reloc_pc32;
1449 asection *tsec;
1450 bfd_signed_vma raddend;
1451 unsigned int opcode;
1452 unsigned int modrm;
1453 unsigned int r_type = *r_type_p;
1454 unsigned int r_symndx;
1455 bfd_vma roff = irel->r_offset;
1456
1457 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1458 return TRUE;
1459
1460 raddend = irel->r_addend;
1461 /* Addend for 32-bit PC-relative relocation must be -4. */
1462 if (raddend != -4)
1463 return TRUE;
1464
1465 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1466 is_pic = bfd_link_pic (link_info);
1467
1468 relocx = (r_type == R_X86_64_GOTPCRELX
1469 || r_type == R_X86_64_REX_GOTPCRELX);
1470
1471 /* TRUE if --no-relax is used. */
1472 no_overflow = link_info->disable_target_specific_optimizations > 1;
1473
1474 r_symndx = htab->r_sym (irel->r_info);
1475
1476 opcode = bfd_get_8 (abfd, contents + roff - 2);
1477
1478 /* Convert mov to lea since it has been done for a while. */
1479 if (opcode != 0x8b)
1480 {
1481 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1482 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1483 test, xor instructions. */
1484 if (!relocx)
1485 return TRUE;
1486 }
1487
1488 /* We convert only to R_X86_64_PC32:
1489 1. Branch.
1490 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1491 3. no_overflow is true.
1492 4. PIC.
1493 */
1494 to_reloc_pc32 = (opcode == 0xff
1495 || !relocx
1496 || no_overflow
1497 || is_pic);
1498
1499 /* Get the symbol referred to by the reloc. */
1500 if (h == NULL)
1501 {
1502 Elf_Internal_Sym *isym
1503 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1504
1505 /* Skip relocation against undefined symbols. */
1506 if (isym->st_shndx == SHN_UNDEF)
1507 return TRUE;
1508
1509 if (isym->st_shndx == SHN_ABS)
1510 tsec = bfd_abs_section_ptr;
1511 else if (isym->st_shndx == SHN_COMMON)
1512 tsec = bfd_com_section_ptr;
1513 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1514 tsec = &_bfd_elf_large_com_section;
1515 else
1516 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1517 }
1518 else
1519 {
1520 /* Undefined weak symbol is only bound locally in executable
1521 and its reference is resolved as 0 without relocation
1522 overflow. We can only perform this optimization for
1523 GOTPCRELX relocations since we need to modify REX byte.
1524 It is OK convert mov with R_X86_64_GOTPCREL to
1525 R_X86_64_PC32. */
1526 bfd_boolean local_ref;
1527 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1528
1529 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1530 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1531 if ((relocx || opcode == 0x8b)
1532 && (h->root.type == bfd_link_hash_undefweak
1533 && !eh->linker_def
1534 && local_ref))
1535 {
1536 if (opcode == 0xff)
1537 {
1538 /* Skip for branch instructions since R_X86_64_PC32
1539 may overflow. */
1540 if (no_overflow)
1541 return TRUE;
1542 }
1543 else if (relocx)
1544 {
1545 /* For non-branch instructions, we can convert to
1546 R_X86_64_32/R_X86_64_32S since we know if there
1547 is a REX byte. */
1548 to_reloc_pc32 = FALSE;
1549 }
1550
1551 /* Since we don't know the current PC when PIC is true,
1552 we can't convert to R_X86_64_PC32. */
1553 if (to_reloc_pc32 && is_pic)
1554 return TRUE;
1555
1556 goto convert;
1557 }
1558 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1559 ld.so may use its link-time address. */
1560 else if (h->start_stop
1561 || eh->linker_def
1562 || ((h->def_regular
1563 || h->root.type == bfd_link_hash_defined
1564 || h->root.type == bfd_link_hash_defweak)
1565 && h != htab->elf.hdynamic
1566 && local_ref))
1567 {
1568 /* bfd_link_hash_new or bfd_link_hash_undefined is
1569 set by an assignment in a linker script in
1570 bfd_elf_record_link_assignment. start_stop is set
1571 on __start_SECNAME/__stop_SECNAME which mark section
1572 SECNAME. */
1573 if (h->start_stop
1574 || eh->linker_def
1575 || (h->def_regular
1576 && (h->root.type == bfd_link_hash_new
1577 || h->root.type == bfd_link_hash_undefined
1578 || ((h->root.type == bfd_link_hash_defined
1579 || h->root.type == bfd_link_hash_defweak)
1580 && h->root.u.def.section == bfd_und_section_ptr))))
1581 {
1582 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1583 if (no_overflow)
1584 return TRUE;
1585 goto convert;
1586 }
1587 tsec = h->root.u.def.section;
1588 }
1589 else
1590 return TRUE;
1591 }
1592
1593 /* Don't convert GOTPCREL relocation against large section. */
1594 if (elf_section_data (tsec) != NULL
1595 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1596 return TRUE;
1597
1598 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1599 if (no_overflow)
1600 return TRUE;
1601
1602 convert:
1603 if (opcode == 0xff)
1604 {
1605 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1606 unsigned int nop;
1607 unsigned int disp;
1608 bfd_vma nop_offset;
1609
1610 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1611 R_X86_64_PC32. */
1612 modrm = bfd_get_8 (abfd, contents + roff - 1);
1613 if (modrm == 0x25)
1614 {
1615 /* Convert to "jmp foo nop". */
1616 modrm = 0xe9;
1617 nop = NOP_OPCODE;
1618 nop_offset = irel->r_offset + 3;
1619 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1620 irel->r_offset -= 1;
1621 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1622 }
1623 else
1624 {
1625 struct elf_x86_link_hash_entry *eh
1626 = (struct elf_x86_link_hash_entry *) h;
1627
1628 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1629 is a nop prefix. */
1630 modrm = 0xe8;
1631 /* To support TLS optimization, always use addr32 prefix for
1632 "call *__tls_get_addr@GOTPCREL(%rip)". */
1633 if (eh && eh->tls_get_addr)
1634 {
1635 nop = 0x67;
1636 nop_offset = irel->r_offset - 2;
1637 }
1638 else
1639 {
1640 nop = link_info->call_nop_byte;
1641 if (link_info->call_nop_as_suffix)
1642 {
1643 nop_offset = irel->r_offset + 3;
1644 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1645 irel->r_offset -= 1;
1646 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1647 }
1648 else
1649 nop_offset = irel->r_offset - 2;
1650 }
1651 }
1652 bfd_put_8 (abfd, nop, contents + nop_offset);
1653 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1654 r_type = R_X86_64_PC32;
1655 }
1656 else
1657 {
1658 unsigned int rex;
1659 unsigned int rex_mask = REX_R;
1660
1661 if (r_type == R_X86_64_REX_GOTPCRELX)
1662 rex = bfd_get_8 (abfd, contents + roff - 3);
1663 else
1664 rex = 0;
1665
1666 if (opcode == 0x8b)
1667 {
1668 if (to_reloc_pc32)
1669 {
1670 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1671 "lea foo(%rip), %reg". */
1672 opcode = 0x8d;
1673 r_type = R_X86_64_PC32;
1674 }
1675 else
1676 {
1677 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1678 "mov $foo, %reg". */
1679 opcode = 0xc7;
1680 modrm = bfd_get_8 (abfd, contents + roff - 1);
1681 modrm = 0xc0 | (modrm & 0x38) >> 3;
1682 if ((rex & REX_W) != 0
1683 && ABI_64_P (link_info->output_bfd))
1684 {
1685 /* Keep the REX_W bit in REX byte for LP64. */
1686 r_type = R_X86_64_32S;
1687 goto rewrite_modrm_rex;
1688 }
1689 else
1690 {
1691 /* If the REX_W bit in REX byte isn't needed,
1692 use R_X86_64_32 and clear the W bit to avoid
1693 sign-extend imm32 to imm64. */
1694 r_type = R_X86_64_32;
1695 /* Clear the W bit in REX byte. */
1696 rex_mask |= REX_W;
1697 goto rewrite_modrm_rex;
1698 }
1699 }
1700 }
1701 else
1702 {
1703 /* R_X86_64_PC32 isn't supported. */
1704 if (to_reloc_pc32)
1705 return TRUE;
1706
1707 modrm = bfd_get_8 (abfd, contents + roff - 1);
1708 if (opcode == 0x85)
1709 {
1710 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1711 "test $foo, %reg". */
1712 modrm = 0xc0 | (modrm & 0x38) >> 3;
1713 opcode = 0xf7;
1714 }
1715 else
1716 {
1717 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1718 "binop $foo, %reg". */
1719 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1720 opcode = 0x81;
1721 }
1722
1723 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1724 overflow when sign-extending imm32 to imm64. */
1725 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1726
1727 rewrite_modrm_rex:
1728 bfd_put_8 (abfd, modrm, contents + roff - 1);
1729
1730 if (rex)
1731 {
1732 /* Move the R bit to the B bit in REX byte. */
1733 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1734 bfd_put_8 (abfd, rex, contents + roff - 3);
1735 }
1736
1737 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1738 irel->r_addend = 0;
1739 }
1740
1741 bfd_put_8 (abfd, opcode, contents + roff - 2);
1742 }
1743
1744 *r_type_p = r_type;
1745 irel->r_info = htab->r_info (r_symndx,
1746 r_type | R_X86_64_converted_reloc_bit);
1747
1748 *converted = TRUE;
1749
1750 return TRUE;
1751 }
1752
1753 /* Look through the relocs for a section during the first phase, and
1754 calculate needed space in the global offset table, procedure
1755 linkage table, and dynamic reloc sections. */
1756
1757 static bfd_boolean
1758 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1759 asection *sec,
1760 const Elf_Internal_Rela *relocs)
1761 {
1762 struct elf_x86_link_hash_table *htab;
1763 Elf_Internal_Shdr *symtab_hdr;
1764 struct elf_link_hash_entry **sym_hashes;
1765 const Elf_Internal_Rela *rel;
1766 const Elf_Internal_Rela *rel_end;
1767 asection *sreloc;
1768 bfd_byte *contents;
1769 bfd_boolean converted;
1770
1771 if (bfd_link_relocatable (info))
1772 return TRUE;
1773
1774 /* Don't do anything special with non-loaded, non-alloced sections.
1775 In particular, any relocs in such sections should not affect GOT
1776 and PLT reference counting (ie. we don't allow them to create GOT
1777 or PLT entries), there's no possibility or desire to optimize TLS
1778 relocs, and there's not much point in propagating relocs to shared
1779 libs that the dynamic linker won't relocate. */
1780 if ((sec->flags & SEC_ALLOC) == 0)
1781 return TRUE;
1782
1783 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1784 if (htab == NULL)
1785 {
1786 sec->check_relocs_failed = 1;
1787 return FALSE;
1788 }
1789
1790 BFD_ASSERT (is_x86_elf (abfd, htab));
1791
1792 /* Get the section contents. */
1793 if (elf_section_data (sec)->this_hdr.contents != NULL)
1794 contents = elf_section_data (sec)->this_hdr.contents;
1795 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1796 {
1797 sec->check_relocs_failed = 1;
1798 return FALSE;
1799 }
1800
1801 symtab_hdr = &elf_symtab_hdr (abfd);
1802 sym_hashes = elf_sym_hashes (abfd);
1803
1804 converted = FALSE;
1805
1806 sreloc = NULL;
1807
1808 rel_end = relocs + sec->reloc_count;
1809 for (rel = relocs; rel < rel_end; rel++)
1810 {
1811 unsigned int r_type;
1812 unsigned int r_symndx;
1813 struct elf_link_hash_entry *h;
1814 struct elf_x86_link_hash_entry *eh;
1815 Elf_Internal_Sym *isym;
1816 const char *name;
1817 bfd_boolean size_reloc;
1818 bfd_boolean converted_reloc;
1819
1820 r_symndx = htab->r_sym (rel->r_info);
1821 r_type = ELF32_R_TYPE (rel->r_info);
1822
1823 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1824 {
1825 /* xgettext:c-format */
1826 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1827 abfd, r_symndx);
1828 goto error_return;
1829 }
1830
1831 if (r_symndx < symtab_hdr->sh_info)
1832 {
1833 /* A local symbol. */
1834 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1835 abfd, r_symndx);
1836 if (isym == NULL)
1837 goto error_return;
1838
1839 /* Check relocation against local STT_GNU_IFUNC symbol. */
1840 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1841 {
1842 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1843 TRUE);
1844 if (h == NULL)
1845 goto error_return;
1846
1847 /* Fake a STT_GNU_IFUNC symbol. */
1848 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1849 isym, NULL);
1850 h->type = STT_GNU_IFUNC;
1851 h->def_regular = 1;
1852 h->ref_regular = 1;
1853 h->forced_local = 1;
1854 h->root.type = bfd_link_hash_defined;
1855 }
1856 else
1857 h = NULL;
1858 }
1859 else
1860 {
1861 isym = NULL;
1862 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1863 while (h->root.type == bfd_link_hash_indirect
1864 || h->root.type == bfd_link_hash_warning)
1865 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1866 }
1867
1868 /* Check invalid x32 relocations. */
1869 if (!ABI_64_P (abfd))
1870 switch (r_type)
1871 {
1872 default:
1873 break;
1874
1875 case R_X86_64_DTPOFF64:
1876 case R_X86_64_TPOFF64:
1877 case R_X86_64_PC64:
1878 case R_X86_64_GOTOFF64:
1879 case R_X86_64_GOT64:
1880 case R_X86_64_GOTPCREL64:
1881 case R_X86_64_GOTPC64:
1882 case R_X86_64_GOTPLT64:
1883 case R_X86_64_PLTOFF64:
1884 {
1885 if (h)
1886 name = h->root.root.string;
1887 else
1888 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1889 NULL);
1890 _bfd_error_handler
1891 /* xgettext:c-format */
1892 (_("%pB: relocation %s against symbol `%s' isn't "
1893 "supported in x32 mode"), abfd,
1894 x86_64_elf_howto_table[r_type].name, name);
1895 bfd_set_error (bfd_error_bad_value);
1896 goto error_return;
1897 }
1898 break;
1899 }
1900
1901 if (h != NULL)
1902 {
1903 /* It is referenced by a non-shared object. */
1904 h->ref_regular = 1;
1905
1906 if (h->type == STT_GNU_IFUNC)
1907 elf_tdata (info->output_bfd)->has_gnu_symbols
1908 |= elf_gnu_symbol_ifunc;
1909 }
1910
1911 converted_reloc = FALSE;
1912 if ((r_type == R_X86_64_GOTPCREL
1913 || r_type == R_X86_64_GOTPCRELX
1914 || r_type == R_X86_64_REX_GOTPCRELX)
1915 && (h == NULL || h->type != STT_GNU_IFUNC))
1916 {
1917 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1918 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1919 irel, h, &converted_reloc,
1920 info))
1921 goto error_return;
1922
1923 if (converted_reloc)
1924 converted = TRUE;
1925 }
1926
1927 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1928 symtab_hdr, sym_hashes,
1929 &r_type, GOT_UNKNOWN,
1930 rel, rel_end, h, r_symndx, FALSE))
1931 goto error_return;
1932
1933 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1934 if (h == htab->elf.hgot)
1935 htab->got_referenced = TRUE;
1936
1937 eh = (struct elf_x86_link_hash_entry *) h;
1938 switch (r_type)
1939 {
1940 case R_X86_64_TLSLD:
1941 htab->tls_ld_or_ldm_got.refcount = 1;
1942 goto create_got;
1943
1944 case R_X86_64_TPOFF32:
1945 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1946 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1947 &x86_64_elf_howto_table[r_type]);
1948 if (eh != NULL)
1949 eh->zero_undefweak &= 0x2;
1950 break;
1951
1952 case R_X86_64_GOTTPOFF:
1953 if (!bfd_link_executable (info))
1954 info->flags |= DF_STATIC_TLS;
1955 /* Fall through */
1956
1957 case R_X86_64_GOT32:
1958 case R_X86_64_GOTPCREL:
1959 case R_X86_64_GOTPCRELX:
1960 case R_X86_64_REX_GOTPCRELX:
1961 case R_X86_64_TLSGD:
1962 case R_X86_64_GOT64:
1963 case R_X86_64_GOTPCREL64:
1964 case R_X86_64_GOTPLT64:
1965 case R_X86_64_GOTPC32_TLSDESC:
1966 case R_X86_64_TLSDESC_CALL:
1967 /* This symbol requires a global offset table entry. */
1968 {
1969 int tls_type, old_tls_type;
1970
1971 switch (r_type)
1972 {
1973 default: tls_type = GOT_NORMAL; break;
1974 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1975 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1976 case R_X86_64_GOTPC32_TLSDESC:
1977 case R_X86_64_TLSDESC_CALL:
1978 tls_type = GOT_TLS_GDESC; break;
1979 }
1980
1981 if (h != NULL)
1982 {
1983 h->got.refcount = 1;
1984 old_tls_type = eh->tls_type;
1985 }
1986 else
1987 {
1988 bfd_signed_vma *local_got_refcounts;
1989
1990 /* This is a global offset table entry for a local symbol. */
1991 local_got_refcounts = elf_local_got_refcounts (abfd);
1992 if (local_got_refcounts == NULL)
1993 {
1994 bfd_size_type size;
1995
1996 size = symtab_hdr->sh_info;
1997 size *= sizeof (bfd_signed_vma)
1998 + sizeof (bfd_vma) + sizeof (char);
1999 local_got_refcounts = ((bfd_signed_vma *)
2000 bfd_zalloc (abfd, size));
2001 if (local_got_refcounts == NULL)
2002 goto error_return;
2003 elf_local_got_refcounts (abfd) = local_got_refcounts;
2004 elf_x86_local_tlsdesc_gotent (abfd)
2005 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2006 elf_x86_local_got_tls_type (abfd)
2007 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2008 }
2009 local_got_refcounts[r_symndx] = 1;
2010 old_tls_type
2011 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2012 }
2013
2014 /* If a TLS symbol is accessed using IE at least once,
2015 there is no point to use dynamic model for it. */
2016 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2017 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2018 || tls_type != GOT_TLS_IE))
2019 {
2020 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2021 tls_type = old_tls_type;
2022 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2023 && GOT_TLS_GD_ANY_P (tls_type))
2024 tls_type |= old_tls_type;
2025 else
2026 {
2027 if (h)
2028 name = h->root.root.string;
2029 else
2030 name = bfd_elf_sym_name (abfd, symtab_hdr,
2031 isym, NULL);
2032 _bfd_error_handler
2033 /* xgettext:c-format */
2034 (_("%pB: '%s' accessed both as normal and"
2035 " thread local symbol"),
2036 abfd, name);
2037 bfd_set_error (bfd_error_bad_value);
2038 goto error_return;
2039 }
2040 }
2041
2042 if (old_tls_type != tls_type)
2043 {
2044 if (eh != NULL)
2045 eh->tls_type = tls_type;
2046 else
2047 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2048 }
2049 }
2050 /* Fall through */
2051
2052 case R_X86_64_GOTOFF64:
2053 case R_X86_64_GOTPC32:
2054 case R_X86_64_GOTPC64:
2055 create_got:
2056 if (eh != NULL)
2057 eh->zero_undefweak &= 0x2;
2058 break;
2059
2060 case R_X86_64_PLT32:
2061 case R_X86_64_PLT32_BND:
2062 /* This symbol requires a procedure linkage table entry. We
2063 actually build the entry in adjust_dynamic_symbol,
2064 because this might be a case of linking PIC code which is
2065 never referenced by a dynamic object, in which case we
2066 don't need to generate a procedure linkage table entry
2067 after all. */
2068
2069 /* If this is a local symbol, we resolve it directly without
2070 creating a procedure linkage table entry. */
2071 if (h == NULL)
2072 continue;
2073
2074 eh->zero_undefweak &= 0x2;
2075 h->needs_plt = 1;
2076 h->plt.refcount = 1;
2077 break;
2078
2079 case R_X86_64_PLTOFF64:
2080 /* This tries to form the 'address' of a function relative
2081 to GOT. For global symbols we need a PLT entry. */
2082 if (h != NULL)
2083 {
2084 h->needs_plt = 1;
2085 h->plt.refcount = 1;
2086 }
2087 goto create_got;
2088
2089 case R_X86_64_SIZE32:
2090 case R_X86_64_SIZE64:
2091 size_reloc = TRUE;
2092 goto do_size;
2093
2094 case R_X86_64_32:
2095 if (!ABI_64_P (abfd))
2096 goto pointer;
2097 /* Fall through. */
2098 case R_X86_64_8:
2099 case R_X86_64_16:
2100 case R_X86_64_32S:
2101 /* Check relocation overflow as these relocs may lead to
2102 run-time relocation overflow. Don't error out for
2103 sections we don't care about, such as debug sections or
2104 when relocation overflow check is disabled. */
2105 if (!info->no_reloc_overflow_check
2106 && !converted_reloc
2107 && (bfd_link_pic (info)
2108 || (bfd_link_executable (info)
2109 && h != NULL
2110 && !h->def_regular
2111 && h->def_dynamic
2112 && (sec->flags & SEC_READONLY) == 0)))
2113 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2114 &x86_64_elf_howto_table[r_type]);
2115 /* Fall through. */
2116
2117 case R_X86_64_PC8:
2118 case R_X86_64_PC16:
2119 case R_X86_64_PC32:
2120 case R_X86_64_PC32_BND:
2121 case R_X86_64_PC64:
2122 case R_X86_64_64:
2123 pointer:
2124 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2125 eh->zero_undefweak |= 0x2;
2126 /* We are called after all symbols have been resolved. Only
2127 relocation against STT_GNU_IFUNC symbol must go through
2128 PLT. */
2129 if (h != NULL
2130 && (bfd_link_executable (info)
2131 || h->type == STT_GNU_IFUNC))
2132 {
2133 bfd_boolean func_pointer_ref = FALSE;
2134
2135 if (r_type == R_X86_64_PC32)
2136 {
2137 /* Since something like ".long foo - ." may be used
2138 as pointer, make sure that PLT is used if foo is
2139 a function defined in a shared library. */
2140 if ((sec->flags & SEC_CODE) == 0)
2141 {
2142 h->pointer_equality_needed = 1;
2143 if (bfd_link_pie (info)
2144 && h->type == STT_FUNC
2145 && !h->def_regular
2146 && h->def_dynamic)
2147 {
2148 h->needs_plt = 1;
2149 h->plt.refcount = 1;
2150 }
2151 }
2152 }
2153 else if (r_type != R_X86_64_PC32_BND
2154 && r_type != R_X86_64_PC64)
2155 {
2156 h->pointer_equality_needed = 1;
2157 /* At run-time, R_X86_64_64 can be resolved for both
2158 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2159 can only be resolved for x32. */
2160 if ((sec->flags & SEC_READONLY) == 0
2161 && (r_type == R_X86_64_64
2162 || (!ABI_64_P (abfd)
2163 && (r_type == R_X86_64_32
2164 || r_type == R_X86_64_32S))))
2165 func_pointer_ref = TRUE;
2166 }
2167
2168 if (!func_pointer_ref)
2169 {
2170 /* If this reloc is in a read-only section, we might
2171 need a copy reloc. We can't check reliably at this
2172 stage whether the section is read-only, as input
2173 sections have not yet been mapped to output sections.
2174 Tentatively set the flag for now, and correct in
2175 adjust_dynamic_symbol. */
2176 h->non_got_ref = 1;
2177
2178 /* We may need a .plt entry if the symbol is a function
2179 defined in a shared lib or is a function referenced
2180 from the code or read-only section. */
2181 if (!h->def_regular
2182 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2183 h->plt.refcount = 1;
2184 }
2185 }
2186
2187 size_reloc = FALSE;
2188 do_size:
2189 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2190 htab->pointer_r_type))
2191 {
2192 struct elf_dyn_relocs *p;
2193 struct elf_dyn_relocs **head;
2194
2195 /* We must copy these reloc types into the output file.
2196 Create a reloc section in dynobj and make room for
2197 this reloc. */
2198 if (sreloc == NULL)
2199 {
2200 sreloc = _bfd_elf_make_dynamic_reloc_section
2201 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2202 abfd, /*rela?*/ TRUE);
2203
2204 if (sreloc == NULL)
2205 goto error_return;
2206 }
2207
2208 /* If this is a global symbol, we count the number of
2209 relocations we need for this symbol. */
2210 if (h != NULL)
2211 head = &eh->dyn_relocs;
2212 else
2213 {
2214 /* Track dynamic relocs needed for local syms too.
2215 We really need local syms available to do this
2216 easily. Oh well. */
2217 asection *s;
2218 void **vpp;
2219
2220 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2221 abfd, r_symndx);
2222 if (isym == NULL)
2223 goto error_return;
2224
2225 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2226 if (s == NULL)
2227 s = sec;
2228
2229 /* Beware of type punned pointers vs strict aliasing
2230 rules. */
2231 vpp = &(elf_section_data (s)->local_dynrel);
2232 head = (struct elf_dyn_relocs **)vpp;
2233 }
2234
2235 p = *head;
2236 if (p == NULL || p->sec != sec)
2237 {
2238 bfd_size_type amt = sizeof *p;
2239
2240 p = ((struct elf_dyn_relocs *)
2241 bfd_alloc (htab->elf.dynobj, amt));
2242 if (p == NULL)
2243 goto error_return;
2244 p->next = *head;
2245 *head = p;
2246 p->sec = sec;
2247 p->count = 0;
2248 p->pc_count = 0;
2249 }
2250
2251 p->count += 1;
2252 /* Count size relocation as PC-relative relocation. */
2253 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2254 p->pc_count += 1;
2255 }
2256 break;
2257
2258 /* This relocation describes the C++ object vtable hierarchy.
2259 Reconstruct it for later use during GC. */
2260 case R_X86_64_GNU_VTINHERIT:
2261 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2262 goto error_return;
2263 break;
2264
2265 /* This relocation describes which C++ vtable entries are actually
2266 used. Record for later use during GC. */
2267 case R_X86_64_GNU_VTENTRY:
2268 BFD_ASSERT (h != NULL);
2269 if (h != NULL
2270 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2271 goto error_return;
2272 break;
2273
2274 default:
2275 break;
2276 }
2277 }
2278
2279 if (elf_section_data (sec)->this_hdr.contents != contents)
2280 {
2281 if (!converted && !info->keep_memory)
2282 free (contents);
2283 else
2284 {
2285 /* Cache the section contents for elf_link_input_bfd if any
2286 load is converted or --no-keep-memory isn't used. */
2287 elf_section_data (sec)->this_hdr.contents = contents;
2288 }
2289 }
2290
2291 /* Cache relocations if any load is converted. */
2292 if (elf_section_data (sec)->relocs != relocs && converted)
2293 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2294
2295 return TRUE;
2296
2297 error_return:
2298 if (elf_section_data (sec)->this_hdr.contents != contents)
2299 free (contents);
2300 sec->check_relocs_failed = 1;
2301 return FALSE;
2302 }
2303
2304 /* Return the relocation value for @tpoff relocation
2305 if STT_TLS virtual address is ADDRESS. */
2306
2307 static bfd_vma
2308 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2309 {
2310 struct elf_link_hash_table *htab = elf_hash_table (info);
2311 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2312 bfd_vma static_tls_size;
2313
2314 /* If tls_segment is NULL, we should have signalled an error already. */
2315 if (htab->tls_sec == NULL)
2316 return 0;
2317
2318 /* Consider special static TLS alignment requirements. */
2319 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2320 return address - static_tls_size - htab->tls_sec->vma;
2321 }
2322
2323 /* Relocate an x86_64 ELF section. */
2324
2325 static bfd_boolean
2326 elf_x86_64_relocate_section (bfd *output_bfd,
2327 struct bfd_link_info *info,
2328 bfd *input_bfd,
2329 asection *input_section,
2330 bfd_byte *contents,
2331 Elf_Internal_Rela *relocs,
2332 Elf_Internal_Sym *local_syms,
2333 asection **local_sections)
2334 {
2335 struct elf_x86_link_hash_table *htab;
2336 Elf_Internal_Shdr *symtab_hdr;
2337 struct elf_link_hash_entry **sym_hashes;
2338 bfd_vma *local_got_offsets;
2339 bfd_vma *local_tlsdesc_gotents;
2340 Elf_Internal_Rela *rel;
2341 Elf_Internal_Rela *wrel;
2342 Elf_Internal_Rela *relend;
2343 unsigned int plt_entry_size;
2344
2345 /* Skip if check_relocs failed. */
2346 if (input_section->check_relocs_failed)
2347 return FALSE;
2348
2349 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2350 if (htab == NULL)
2351 return FALSE;
2352
2353 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2354
2355 plt_entry_size = htab->plt.plt_entry_size;
2356 symtab_hdr = &elf_symtab_hdr (input_bfd);
2357 sym_hashes = elf_sym_hashes (input_bfd);
2358 local_got_offsets = elf_local_got_offsets (input_bfd);
2359 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2360
2361 _bfd_x86_elf_set_tls_module_base (info);
2362
2363 rel = wrel = relocs;
2364 relend = relocs + input_section->reloc_count;
2365 for (; rel < relend; wrel++, rel++)
2366 {
2367 unsigned int r_type, r_type_tls;
2368 reloc_howto_type *howto;
2369 unsigned long r_symndx;
2370 struct elf_link_hash_entry *h;
2371 struct elf_x86_link_hash_entry *eh;
2372 Elf_Internal_Sym *sym;
2373 asection *sec;
2374 bfd_vma off, offplt, plt_offset;
2375 bfd_vma relocation;
2376 bfd_boolean unresolved_reloc;
2377 bfd_reloc_status_type r;
2378 int tls_type;
2379 asection *base_got, *resolved_plt;
2380 bfd_vma st_size;
2381 bfd_boolean resolved_to_zero;
2382 bfd_boolean relative_reloc;
2383 bfd_boolean converted_reloc;
2384 bfd_boolean need_copy_reloc_in_pie;
2385
2386 r_type = ELF32_R_TYPE (rel->r_info);
2387 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2388 || r_type == (int) R_X86_64_GNU_VTENTRY)
2389 {
2390 if (wrel != rel)
2391 *wrel = *rel;
2392 continue;
2393 }
2394
2395 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2396 r_type &= ~R_X86_64_converted_reloc_bit;
2397
2398 if (r_type >= (int) R_X86_64_standard)
2399 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2400
2401 if (r_type != (int) R_X86_64_32
2402 || ABI_64_P (output_bfd))
2403 howto = x86_64_elf_howto_table + r_type;
2404 else
2405 howto = (x86_64_elf_howto_table
2406 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2407 r_symndx = htab->r_sym (rel->r_info);
2408 h = NULL;
2409 sym = NULL;
2410 sec = NULL;
2411 unresolved_reloc = FALSE;
2412 if (r_symndx < symtab_hdr->sh_info)
2413 {
2414 sym = local_syms + r_symndx;
2415 sec = local_sections[r_symndx];
2416
2417 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2418 &sec, rel);
2419 st_size = sym->st_size;
2420
2421 /* Relocate against local STT_GNU_IFUNC symbol. */
2422 if (!bfd_link_relocatable (info)
2423 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2424 {
2425 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2426 rel, FALSE);
2427 if (h == NULL)
2428 abort ();
2429
2430 /* Set STT_GNU_IFUNC symbol value. */
2431 h->root.u.def.value = sym->st_value;
2432 h->root.u.def.section = sec;
2433 }
2434 }
2435 else
2436 {
2437 bfd_boolean warned ATTRIBUTE_UNUSED;
2438 bfd_boolean ignored ATTRIBUTE_UNUSED;
2439
2440 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2441 r_symndx, symtab_hdr, sym_hashes,
2442 h, sec, relocation,
2443 unresolved_reloc, warned, ignored);
2444 st_size = h->size;
2445 }
2446
2447 if (sec != NULL && discarded_section (sec))
2448 {
2449 _bfd_clear_contents (howto, input_bfd, input_section,
2450 contents + rel->r_offset);
2451 wrel->r_offset = rel->r_offset;
2452 wrel->r_info = 0;
2453 wrel->r_addend = 0;
2454
2455 /* For ld -r, remove relocations in debug sections against
2456 sections defined in discarded sections. Not done for
2457 eh_frame editing code expects to be present. */
2458 if (bfd_link_relocatable (info)
2459 && (input_section->flags & SEC_DEBUGGING))
2460 wrel--;
2461
2462 continue;
2463 }
2464
2465 if (bfd_link_relocatable (info))
2466 {
2467 if (wrel != rel)
2468 *wrel = *rel;
2469 continue;
2470 }
2471
2472 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2473 {
2474 if (r_type == R_X86_64_64)
2475 {
2476 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2477 zero-extend it to 64bit if addend is zero. */
2478 r_type = R_X86_64_32;
2479 memset (contents + rel->r_offset + 4, 0, 4);
2480 }
2481 else if (r_type == R_X86_64_SIZE64)
2482 {
2483 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2484 zero-extend it to 64bit if addend is zero. */
2485 r_type = R_X86_64_SIZE32;
2486 memset (contents + rel->r_offset + 4, 0, 4);
2487 }
2488 }
2489
2490 eh = (struct elf_x86_link_hash_entry *) h;
2491
2492 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2493 it here if it is defined in a non-shared object. */
2494 if (h != NULL
2495 && h->type == STT_GNU_IFUNC
2496 && h->def_regular)
2497 {
2498 bfd_vma plt_index;
2499 const char *name;
2500
2501 if ((input_section->flags & SEC_ALLOC) == 0)
2502 {
2503 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2504 STT_GNU_IFUNC symbol as STT_FUNC. */
2505 if (elf_section_type (input_section) == SHT_NOTE)
2506 goto skip_ifunc;
2507 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2508 sections because such sections are not SEC_ALLOC and
2509 thus ld.so will not process them. */
2510 if ((input_section->flags & SEC_DEBUGGING) != 0)
2511 continue;
2512 abort ();
2513 }
2514
2515 switch (r_type)
2516 {
2517 default:
2518 break;
2519
2520 case R_X86_64_GOTPCREL:
2521 case R_X86_64_GOTPCRELX:
2522 case R_X86_64_REX_GOTPCRELX:
2523 case R_X86_64_GOTPCREL64:
2524 base_got = htab->elf.sgot;
2525 off = h->got.offset;
2526
2527 if (base_got == NULL)
2528 abort ();
2529
2530 if (off == (bfd_vma) -1)
2531 {
2532 /* We can't use h->got.offset here to save state, or
2533 even just remember the offset, as finish_dynamic_symbol
2534 would use that as offset into .got. */
2535
2536 if (h->plt.offset == (bfd_vma) -1)
2537 abort ();
2538
2539 if (htab->elf.splt != NULL)
2540 {
2541 plt_index = (h->plt.offset / plt_entry_size
2542 - htab->plt.has_plt0);
2543 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2544 base_got = htab->elf.sgotplt;
2545 }
2546 else
2547 {
2548 plt_index = h->plt.offset / plt_entry_size;
2549 off = plt_index * GOT_ENTRY_SIZE;
2550 base_got = htab->elf.igotplt;
2551 }
2552
2553 if (h->dynindx == -1
2554 || h->forced_local
2555 || info->symbolic)
2556 {
2557 /* This references the local defitionion. We must
2558 initialize this entry in the global offset table.
2559 Since the offset must always be a multiple of 8,
2560 we use the least significant bit to record
2561 whether we have initialized it already.
2562
2563 When doing a dynamic link, we create a .rela.got
2564 relocation entry to initialize the value. This
2565 is done in the finish_dynamic_symbol routine. */
2566 if ((off & 1) != 0)
2567 off &= ~1;
2568 else
2569 {
2570 bfd_put_64 (output_bfd, relocation,
2571 base_got->contents + off);
2572 /* Note that this is harmless for the GOTPLT64
2573 case, as -1 | 1 still is -1. */
2574 h->got.offset |= 1;
2575 }
2576 }
2577 }
2578
2579 relocation = (base_got->output_section->vma
2580 + base_got->output_offset + off);
2581
2582 goto do_relocation;
2583 }
2584
2585 if (h->plt.offset == (bfd_vma) -1)
2586 {
2587 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2588 if (r_type == htab->pointer_r_type
2589 && (input_section->flags & SEC_CODE) == 0)
2590 goto do_ifunc_pointer;
2591 goto bad_ifunc_reloc;
2592 }
2593
2594 /* STT_GNU_IFUNC symbol must go through PLT. */
2595 if (htab->elf.splt != NULL)
2596 {
2597 if (htab->plt_second != NULL)
2598 {
2599 resolved_plt = htab->plt_second;
2600 plt_offset = eh->plt_second.offset;
2601 }
2602 else
2603 {
2604 resolved_plt = htab->elf.splt;
2605 plt_offset = h->plt.offset;
2606 }
2607 }
2608 else
2609 {
2610 resolved_plt = htab->elf.iplt;
2611 plt_offset = h->plt.offset;
2612 }
2613
2614 relocation = (resolved_plt->output_section->vma
2615 + resolved_plt->output_offset + plt_offset);
2616
2617 switch (r_type)
2618 {
2619 default:
2620 bad_ifunc_reloc:
2621 if (h->root.root.string)
2622 name = h->root.root.string;
2623 else
2624 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2625 NULL);
2626 _bfd_error_handler
2627 /* xgettext:c-format */
2628 (_("%pB: relocation %s against STT_GNU_IFUNC "
2629 "symbol `%s' isn't supported"), input_bfd,
2630 howto->name, name);
2631 bfd_set_error (bfd_error_bad_value);
2632 return FALSE;
2633
2634 case R_X86_64_32S:
2635 if (bfd_link_pic (info))
2636 abort ();
2637 goto do_relocation;
2638
2639 case R_X86_64_32:
2640 if (ABI_64_P (output_bfd))
2641 goto do_relocation;
2642 /* FALLTHROUGH */
2643 case R_X86_64_64:
2644 do_ifunc_pointer:
2645 if (rel->r_addend != 0)
2646 {
2647 if (h->root.root.string)
2648 name = h->root.root.string;
2649 else
2650 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2651 sym, NULL);
2652 _bfd_error_handler
2653 /* xgettext:c-format */
2654 (_("%pB: relocation %s against STT_GNU_IFUNC "
2655 "symbol `%s' has non-zero addend: %" PRId64),
2656 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2657 bfd_set_error (bfd_error_bad_value);
2658 return FALSE;
2659 }
2660
2661 /* Generate dynamic relcoation only when there is a
2662 non-GOT reference in a shared object or there is no
2663 PLT. */
2664 if ((bfd_link_pic (info) && h->non_got_ref)
2665 || h->plt.offset == (bfd_vma) -1)
2666 {
2667 Elf_Internal_Rela outrel;
2668 asection *sreloc;
2669
2670 /* Need a dynamic relocation to get the real function
2671 address. */
2672 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2673 info,
2674 input_section,
2675 rel->r_offset);
2676 if (outrel.r_offset == (bfd_vma) -1
2677 || outrel.r_offset == (bfd_vma) -2)
2678 abort ();
2679
2680 outrel.r_offset += (input_section->output_section->vma
2681 + input_section->output_offset);
2682
2683 if (POINTER_LOCAL_IFUNC_P (info, h))
2684 {
2685 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2686 h->root.root.string,
2687 h->root.u.def.section->owner);
2688
2689 /* This symbol is resolved locally. */
2690 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2691 outrel.r_addend = (h->root.u.def.value
2692 + h->root.u.def.section->output_section->vma
2693 + h->root.u.def.section->output_offset);
2694 }
2695 else
2696 {
2697 outrel.r_info = htab->r_info (h->dynindx, r_type);
2698 outrel.r_addend = 0;
2699 }
2700
2701 /* Dynamic relocations are stored in
2702 1. .rela.ifunc section in PIC object.
2703 2. .rela.got section in dynamic executable.
2704 3. .rela.iplt section in static executable. */
2705 if (bfd_link_pic (info))
2706 sreloc = htab->elf.irelifunc;
2707 else if (htab->elf.splt != NULL)
2708 sreloc = htab->elf.srelgot;
2709 else
2710 sreloc = htab->elf.irelplt;
2711 elf_append_rela (output_bfd, sreloc, &outrel);
2712
2713 /* If this reloc is against an external symbol, we
2714 do not want to fiddle with the addend. Otherwise,
2715 we need to include the symbol value so that it
2716 becomes an addend for the dynamic reloc. For an
2717 internal symbol, we have updated addend. */
2718 continue;
2719 }
2720 /* FALLTHROUGH */
2721 case R_X86_64_PC32:
2722 case R_X86_64_PC32_BND:
2723 case R_X86_64_PC64:
2724 case R_X86_64_PLT32:
2725 case R_X86_64_PLT32_BND:
2726 goto do_relocation;
2727 }
2728 }
2729
2730 skip_ifunc:
2731 resolved_to_zero = (eh != NULL
2732 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2733
2734 /* When generating a shared object, the relocations handled here are
2735 copied into the output file to be resolved at run time. */
2736 switch (r_type)
2737 {
2738 case R_X86_64_GOT32:
2739 case R_X86_64_GOT64:
2740 /* Relocation is to the entry for this symbol in the global
2741 offset table. */
2742 case R_X86_64_GOTPCREL:
2743 case R_X86_64_GOTPCRELX:
2744 case R_X86_64_REX_GOTPCRELX:
2745 case R_X86_64_GOTPCREL64:
2746 /* Use global offset table entry as symbol value. */
2747 case R_X86_64_GOTPLT64:
2748 /* This is obsolete and treated the same as GOT64. */
2749 base_got = htab->elf.sgot;
2750
2751 if (htab->elf.sgot == NULL)
2752 abort ();
2753
2754 relative_reloc = FALSE;
2755 if (h != NULL)
2756 {
2757 off = h->got.offset;
2758 if (h->needs_plt
2759 && h->plt.offset != (bfd_vma)-1
2760 && off == (bfd_vma)-1)
2761 {
2762 /* We can't use h->got.offset here to save
2763 state, or even just remember the offset, as
2764 finish_dynamic_symbol would use that as offset into
2765 .got. */
2766 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2767 - htab->plt.has_plt0);
2768 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2769 base_got = htab->elf.sgotplt;
2770 }
2771
2772 if (RESOLVED_LOCALLY_P (info, h, htab))
2773 {
2774 /* We must initialize this entry in the global offset
2775 table. Since the offset must always be a multiple
2776 of 8, we use the least significant bit to record
2777 whether we have initialized it already.
2778
2779 When doing a dynamic link, we create a .rela.got
2780 relocation entry to initialize the value. This is
2781 done in the finish_dynamic_symbol routine. */
2782 if ((off & 1) != 0)
2783 off &= ~1;
2784 else
2785 {
2786 bfd_put_64 (output_bfd, relocation,
2787 base_got->contents + off);
2788 /* Note that this is harmless for the GOTPLT64 case,
2789 as -1 | 1 still is -1. */
2790 h->got.offset |= 1;
2791
2792 if (GENERATE_RELATIVE_RELOC_P (info, h))
2793 {
2794 /* If this symbol isn't dynamic in PIC,
2795 generate R_X86_64_RELATIVE here. */
2796 eh->no_finish_dynamic_symbol = 1;
2797 relative_reloc = TRUE;
2798 }
2799 }
2800 }
2801 else
2802 unresolved_reloc = FALSE;
2803 }
2804 else
2805 {
2806 if (local_got_offsets == NULL)
2807 abort ();
2808
2809 off = local_got_offsets[r_symndx];
2810
2811 /* The offset must always be a multiple of 8. We use
2812 the least significant bit to record whether we have
2813 already generated the necessary reloc. */
2814 if ((off & 1) != 0)
2815 off &= ~1;
2816 else
2817 {
2818 bfd_put_64 (output_bfd, relocation,
2819 base_got->contents + off);
2820 local_got_offsets[r_symndx] |= 1;
2821
2822 if (bfd_link_pic (info))
2823 relative_reloc = TRUE;
2824 }
2825 }
2826
2827 if (relative_reloc)
2828 {
2829 asection *s;
2830 Elf_Internal_Rela outrel;
2831
2832 /* We need to generate a R_X86_64_RELATIVE reloc
2833 for the dynamic linker. */
2834 s = htab->elf.srelgot;
2835 if (s == NULL)
2836 abort ();
2837
2838 outrel.r_offset = (base_got->output_section->vma
2839 + base_got->output_offset
2840 + off);
2841 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2842 outrel.r_addend = relocation;
2843 elf_append_rela (output_bfd, s, &outrel);
2844 }
2845
2846 if (off >= (bfd_vma) -2)
2847 abort ();
2848
2849 relocation = base_got->output_section->vma
2850 + base_got->output_offset + off;
2851 if (r_type != R_X86_64_GOTPCREL
2852 && r_type != R_X86_64_GOTPCRELX
2853 && r_type != R_X86_64_REX_GOTPCRELX
2854 && r_type != R_X86_64_GOTPCREL64)
2855 relocation -= htab->elf.sgotplt->output_section->vma
2856 - htab->elf.sgotplt->output_offset;
2857
2858 break;
2859
2860 case R_X86_64_GOTOFF64:
2861 /* Relocation is relative to the start of the global offset
2862 table. */
2863
2864 /* Check to make sure it isn't a protected function or data
2865 symbol for shared library since it may not be local when
2866 used as function address or with copy relocation. We also
2867 need to make sure that a symbol is referenced locally. */
2868 if (bfd_link_pic (info) && h)
2869 {
2870 if (!h->def_regular)
2871 {
2872 const char *v;
2873
2874 switch (ELF_ST_VISIBILITY (h->other))
2875 {
2876 case STV_HIDDEN:
2877 v = _("hidden symbol");
2878 break;
2879 case STV_INTERNAL:
2880 v = _("internal symbol");
2881 break;
2882 case STV_PROTECTED:
2883 v = _("protected symbol");
2884 break;
2885 default:
2886 v = _("symbol");
2887 break;
2888 }
2889
2890 _bfd_error_handler
2891 /* xgettext:c-format */
2892 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2893 " `%s' can not be used when making a shared object"),
2894 input_bfd, v, h->root.root.string);
2895 bfd_set_error (bfd_error_bad_value);
2896 return FALSE;
2897 }
2898 else if (!bfd_link_executable (info)
2899 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2900 && (h->type == STT_FUNC
2901 || h->type == STT_OBJECT)
2902 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2903 {
2904 _bfd_error_handler
2905 /* xgettext:c-format */
2906 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2907 " `%s' can not be used when making a shared object"),
2908 input_bfd,
2909 h->type == STT_FUNC ? "function" : "data",
2910 h->root.root.string);
2911 bfd_set_error (bfd_error_bad_value);
2912 return FALSE;
2913 }
2914 }
2915
2916 /* Note that sgot is not involved in this
2917 calculation. We always want the start of .got.plt. If we
2918 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2919 permitted by the ABI, we might have to change this
2920 calculation. */
2921 relocation -= htab->elf.sgotplt->output_section->vma
2922 + htab->elf.sgotplt->output_offset;
2923 break;
2924
2925 case R_X86_64_GOTPC32:
2926 case R_X86_64_GOTPC64:
2927 /* Use global offset table as symbol value. */
2928 relocation = htab->elf.sgotplt->output_section->vma
2929 + htab->elf.sgotplt->output_offset;
2930 unresolved_reloc = FALSE;
2931 break;
2932
2933 case R_X86_64_PLTOFF64:
2934 /* Relocation is PLT entry relative to GOT. For local
2935 symbols it's the symbol itself relative to GOT. */
2936 if (h != NULL
2937 /* See PLT32 handling. */
2938 && (h->plt.offset != (bfd_vma) -1
2939 || eh->plt_got.offset != (bfd_vma) -1)
2940 && htab->elf.splt != NULL)
2941 {
2942 if (eh->plt_got.offset != (bfd_vma) -1)
2943 {
2944 /* Use the GOT PLT. */
2945 resolved_plt = htab->plt_got;
2946 plt_offset = eh->plt_got.offset;
2947 }
2948 else if (htab->plt_second != NULL)
2949 {
2950 resolved_plt = htab->plt_second;
2951 plt_offset = eh->plt_second.offset;
2952 }
2953 else
2954 {
2955 resolved_plt = htab->elf.splt;
2956 plt_offset = h->plt.offset;
2957 }
2958
2959 relocation = (resolved_plt->output_section->vma
2960 + resolved_plt->output_offset
2961 + plt_offset);
2962 unresolved_reloc = FALSE;
2963 }
2964
2965 relocation -= htab->elf.sgotplt->output_section->vma
2966 + htab->elf.sgotplt->output_offset;
2967 break;
2968
2969 case R_X86_64_PLT32:
2970 case R_X86_64_PLT32_BND:
2971 /* Relocation is to the entry for this symbol in the
2972 procedure linkage table. */
2973
2974 /* Resolve a PLT32 reloc against a local symbol directly,
2975 without using the procedure linkage table. */
2976 if (h == NULL)
2977 break;
2978
2979 if ((h->plt.offset == (bfd_vma) -1
2980 && eh->plt_got.offset == (bfd_vma) -1)
2981 || htab->elf.splt == NULL)
2982 {
2983 /* We didn't make a PLT entry for this symbol. This
2984 happens when statically linking PIC code, or when
2985 using -Bsymbolic. */
2986 break;
2987 }
2988
2989 use_plt:
2990 if (h->plt.offset != (bfd_vma) -1)
2991 {
2992 if (htab->plt_second != NULL)
2993 {
2994 resolved_plt = htab->plt_second;
2995 plt_offset = eh->plt_second.offset;
2996 }
2997 else
2998 {
2999 resolved_plt = htab->elf.splt;
3000 plt_offset = h->plt.offset;
3001 }
3002 }
3003 else
3004 {
3005 /* Use the GOT PLT. */
3006 resolved_plt = htab->plt_got;
3007 plt_offset = eh->plt_got.offset;
3008 }
3009
3010 relocation = (resolved_plt->output_section->vma
3011 + resolved_plt->output_offset
3012 + plt_offset);
3013 unresolved_reloc = FALSE;
3014 break;
3015
3016 case R_X86_64_SIZE32:
3017 case R_X86_64_SIZE64:
3018 /* Set to symbol size. */
3019 relocation = st_size;
3020 goto direct;
3021
3022 case R_X86_64_PC8:
3023 case R_X86_64_PC16:
3024 case R_X86_64_PC32:
3025 case R_X86_64_PC32_BND:
3026 /* Don't complain about -fPIC if the symbol is undefined when
3027 building executable unless it is unresolved weak symbol,
3028 references a dynamic definition in PIE or -z nocopyreloc
3029 is used. */
3030 if ((input_section->flags & SEC_ALLOC) != 0
3031 && (input_section->flags & SEC_READONLY) != 0
3032 && h != NULL
3033 && ((bfd_link_executable (info)
3034 && ((h->root.type == bfd_link_hash_undefweak
3035 && !resolved_to_zero)
3036 || (bfd_link_pie (info)
3037 && !h->def_regular
3038 && h->def_dynamic)
3039 || ((info->nocopyreloc
3040 || (eh->def_protected
3041 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3042 && h->def_dynamic
3043 && !(h->root.u.def.section->flags & SEC_CODE))))
3044 || bfd_link_dll (info)))
3045 {
3046 bfd_boolean fail = FALSE;
3047 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3048 {
3049 /* Symbol is referenced locally. Make sure it is
3050 defined locally. */
3051 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3052 }
3053 else if (!(bfd_link_pie (info)
3054 && (h->needs_copy || eh->needs_copy)))
3055 {
3056 /* Symbol doesn't need copy reloc and isn't referenced
3057 locally. Address of protected function may not be
3058 reachable at run-time. */
3059 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3060 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3061 && h->type == STT_FUNC));
3062 }
3063
3064 if (fail)
3065 return elf_x86_64_need_pic (info, input_bfd, input_section,
3066 h, NULL, NULL, howto);
3067 }
3068 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3069 as function address. */
3070 else if (h != NULL
3071 && (input_section->flags & SEC_CODE) == 0
3072 && bfd_link_pie (info)
3073 && h->type == STT_FUNC
3074 && !h->def_regular
3075 && h->def_dynamic)
3076 goto use_plt;
3077 /* Fall through. */
3078
3079 case R_X86_64_8:
3080 case R_X86_64_16:
3081 case R_X86_64_32:
3082 case R_X86_64_PC64:
3083 case R_X86_64_64:
3084 /* FIXME: The ABI says the linker should make sure the value is
3085 the same when it's zeroextended to 64 bit. */
3086
3087 direct:
3088 if ((input_section->flags & SEC_ALLOC) == 0)
3089 break;
3090
3091 need_copy_reloc_in_pie = (bfd_link_pie (info)
3092 && h != NULL
3093 && (h->needs_copy
3094 || eh->needs_copy
3095 || (h->root.type
3096 == bfd_link_hash_undefined))
3097 && (X86_PCREL_TYPE_P (r_type)
3098 || X86_SIZE_TYPE_P (r_type)));
3099
3100 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3101 need_copy_reloc_in_pie,
3102 resolved_to_zero, FALSE))
3103 {
3104 Elf_Internal_Rela outrel;
3105 bfd_boolean skip, relocate;
3106 asection *sreloc;
3107
3108 /* When generating a shared object, these relocations
3109 are copied into the output file to be resolved at run
3110 time. */
3111 skip = FALSE;
3112 relocate = FALSE;
3113
3114 outrel.r_offset =
3115 _bfd_elf_section_offset (output_bfd, info, input_section,
3116 rel->r_offset);
3117 if (outrel.r_offset == (bfd_vma) -1)
3118 skip = TRUE;
3119 else if (outrel.r_offset == (bfd_vma) -2)
3120 skip = TRUE, relocate = TRUE;
3121
3122 outrel.r_offset += (input_section->output_section->vma
3123 + input_section->output_offset);
3124
3125 if (skip)
3126 memset (&outrel, 0, sizeof outrel);
3127
3128 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3129 {
3130 outrel.r_info = htab->r_info (h->dynindx, r_type);
3131 outrel.r_addend = rel->r_addend;
3132 }
3133 else
3134 {
3135 /* This symbol is local, or marked to become local.
3136 When relocation overflow check is disabled, we
3137 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3138 if (r_type == htab->pointer_r_type
3139 || (r_type == R_X86_64_32
3140 && info->no_reloc_overflow_check))
3141 {
3142 relocate = TRUE;
3143 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3144 outrel.r_addend = relocation + rel->r_addend;
3145 }
3146 else if (r_type == R_X86_64_64
3147 && !ABI_64_P (output_bfd))
3148 {
3149 relocate = TRUE;
3150 outrel.r_info = htab->r_info (0,
3151 R_X86_64_RELATIVE64);
3152 outrel.r_addend = relocation + rel->r_addend;
3153 /* Check addend overflow. */
3154 if ((outrel.r_addend & 0x80000000)
3155 != (rel->r_addend & 0x80000000))
3156 {
3157 const char *name;
3158 int addend = rel->r_addend;
3159 if (h && h->root.root.string)
3160 name = h->root.root.string;
3161 else
3162 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3163 sym, NULL);
3164 _bfd_error_handler
3165 /* xgettext:c-format */
3166 (_("%pB: addend %s%#x in relocation %s against "
3167 "symbol `%s' at %#" PRIx64
3168 " in section `%pA' is out of range"),
3169 input_bfd, addend < 0 ? "-" : "", addend,
3170 howto->name, name, (uint64_t) rel->r_offset,
3171 input_section);
3172 bfd_set_error (bfd_error_bad_value);
3173 return FALSE;
3174 }
3175 }
3176 else
3177 {
3178 long sindx;
3179
3180 if (bfd_is_abs_section (sec))
3181 sindx = 0;
3182 else if (sec == NULL || sec->owner == NULL)
3183 {
3184 bfd_set_error (bfd_error_bad_value);
3185 return FALSE;
3186 }
3187 else
3188 {
3189 asection *osec;
3190
3191 /* We are turning this relocation into one
3192 against a section symbol. It would be
3193 proper to subtract the symbol's value,
3194 osec->vma, from the emitted reloc addend,
3195 but ld.so expects buggy relocs. */
3196 osec = sec->output_section;
3197 sindx = elf_section_data (osec)->dynindx;
3198 if (sindx == 0)
3199 {
3200 asection *oi = htab->elf.text_index_section;
3201 sindx = elf_section_data (oi)->dynindx;
3202 }
3203 BFD_ASSERT (sindx != 0);
3204 }
3205
3206 outrel.r_info = htab->r_info (sindx, r_type);
3207 outrel.r_addend = relocation + rel->r_addend;
3208 }
3209 }
3210
3211 sreloc = elf_section_data (input_section)->sreloc;
3212
3213 if (sreloc == NULL || sreloc->contents == NULL)
3214 {
3215 r = bfd_reloc_notsupported;
3216 goto check_relocation_error;
3217 }
3218
3219 elf_append_rela (output_bfd, sreloc, &outrel);
3220
3221 /* If this reloc is against an external symbol, we do
3222 not want to fiddle with the addend. Otherwise, we
3223 need to include the symbol value so that it becomes
3224 an addend for the dynamic reloc. */
3225 if (! relocate)
3226 continue;
3227 }
3228
3229 break;
3230
3231 case R_X86_64_TLSGD:
3232 case R_X86_64_GOTPC32_TLSDESC:
3233 case R_X86_64_TLSDESC_CALL:
3234 case R_X86_64_GOTTPOFF:
3235 tls_type = GOT_UNKNOWN;
3236 if (h == NULL && local_got_offsets)
3237 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3238 else if (h != NULL)
3239 tls_type = elf_x86_hash_entry (h)->tls_type;
3240
3241 r_type_tls = r_type;
3242 if (! elf_x86_64_tls_transition (info, input_bfd,
3243 input_section, contents,
3244 symtab_hdr, sym_hashes,
3245 &r_type_tls, tls_type, rel,
3246 relend, h, r_symndx, TRUE))
3247 return FALSE;
3248
3249 if (r_type_tls == R_X86_64_TPOFF32)
3250 {
3251 bfd_vma roff = rel->r_offset;
3252
3253 BFD_ASSERT (! unresolved_reloc);
3254
3255 if (r_type == R_X86_64_TLSGD)
3256 {
3257 /* GD->LE transition. For 64bit, change
3258 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3259 .word 0x6666; rex64; call __tls_get_addr@PLT
3260 or
3261 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3262 .byte 0x66; rex64
3263 call *__tls_get_addr@GOTPCREL(%rip)
3264 which may be converted to
3265 addr32 call __tls_get_addr
3266 into:
3267 movq %fs:0, %rax
3268 leaq foo@tpoff(%rax), %rax
3269 For 32bit, change
3270 leaq foo@tlsgd(%rip), %rdi
3271 .word 0x6666; rex64; call __tls_get_addr@PLT
3272 or
3273 leaq foo@tlsgd(%rip), %rdi
3274 .byte 0x66; rex64
3275 call *__tls_get_addr@GOTPCREL(%rip)
3276 which may be converted to
3277 addr32 call __tls_get_addr
3278 into:
3279 movl %fs:0, %eax
3280 leaq foo@tpoff(%rax), %rax
3281 For largepic, change:
3282 leaq foo@tlsgd(%rip), %rdi
3283 movabsq $__tls_get_addr@pltoff, %rax
3284 addq %r15, %rax
3285 call *%rax
3286 into:
3287 movq %fs:0, %rax
3288 leaq foo@tpoff(%rax), %rax
3289 nopw 0x0(%rax,%rax,1) */
3290 int largepic = 0;
3291 if (ABI_64_P (output_bfd))
3292 {
3293 if (contents[roff + 5] == 0xb8)
3294 {
3295 memcpy (contents + roff - 3,
3296 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3297 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3298 largepic = 1;
3299 }
3300 else
3301 memcpy (contents + roff - 4,
3302 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3303 16);
3304 }
3305 else
3306 memcpy (contents + roff - 3,
3307 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3308 15);
3309 bfd_put_32 (output_bfd,
3310 elf_x86_64_tpoff (info, relocation),
3311 contents + roff + 8 + largepic);
3312 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3313 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3314 rel++;
3315 wrel++;
3316 continue;
3317 }
3318 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3319 {
3320 /* GDesc -> LE transition.
3321 It's originally something like:
3322 leaq x@tlsdesc(%rip), %rax
3323
3324 Change it to:
3325 movl $x@tpoff, %rax. */
3326
3327 unsigned int val, type;
3328
3329 type = bfd_get_8 (input_bfd, contents + roff - 3);
3330 val = bfd_get_8 (input_bfd, contents + roff - 1);
3331 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3332 contents + roff - 3);
3333 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3334 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3335 contents + roff - 1);
3336 bfd_put_32 (output_bfd,
3337 elf_x86_64_tpoff (info, relocation),
3338 contents + roff);
3339 continue;
3340 }
3341 else if (r_type == R_X86_64_TLSDESC_CALL)
3342 {
3343 /* GDesc -> LE transition.
3344 It's originally:
3345 call *(%rax)
3346 Turn it into:
3347 xchg %ax,%ax. */
3348 bfd_put_8 (output_bfd, 0x66, contents + roff);
3349 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3350 continue;
3351 }
3352 else if (r_type == R_X86_64_GOTTPOFF)
3353 {
3354 /* IE->LE transition:
3355 For 64bit, originally it can be one of:
3356 movq foo@gottpoff(%rip), %reg
3357 addq foo@gottpoff(%rip), %reg
3358 We change it into:
3359 movq $foo, %reg
3360 leaq foo(%reg), %reg
3361 addq $foo, %reg.
3362 For 32bit, originally it can be one of:
3363 movq foo@gottpoff(%rip), %reg
3364 addl foo@gottpoff(%rip), %reg
3365 We change it into:
3366 movq $foo, %reg
3367 leal foo(%reg), %reg
3368 addl $foo, %reg. */
3369
3370 unsigned int val, type, reg;
3371
3372 if (roff >= 3)
3373 val = bfd_get_8 (input_bfd, contents + roff - 3);
3374 else
3375 val = 0;
3376 type = bfd_get_8 (input_bfd, contents + roff - 2);
3377 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3378 reg >>= 3;
3379 if (type == 0x8b)
3380 {
3381 /* movq */
3382 if (val == 0x4c)
3383 bfd_put_8 (output_bfd, 0x49,
3384 contents + roff - 3);
3385 else if (!ABI_64_P (output_bfd) && val == 0x44)
3386 bfd_put_8 (output_bfd, 0x41,
3387 contents + roff - 3);
3388 bfd_put_8 (output_bfd, 0xc7,
3389 contents + roff - 2);
3390 bfd_put_8 (output_bfd, 0xc0 | reg,
3391 contents + roff - 1);
3392 }
3393 else if (reg == 4)
3394 {
3395 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3396 is special */
3397 if (val == 0x4c)
3398 bfd_put_8 (output_bfd, 0x49,
3399 contents + roff - 3);
3400 else if (!ABI_64_P (output_bfd) && val == 0x44)
3401 bfd_put_8 (output_bfd, 0x41,
3402 contents + roff - 3);
3403 bfd_put_8 (output_bfd, 0x81,
3404 contents + roff - 2);
3405 bfd_put_8 (output_bfd, 0xc0 | reg,
3406 contents + roff - 1);
3407 }
3408 else
3409 {
3410 /* addq/addl -> leaq/leal */
3411 if (val == 0x4c)
3412 bfd_put_8 (output_bfd, 0x4d,
3413 contents + roff - 3);
3414 else if (!ABI_64_P (output_bfd) && val == 0x44)
3415 bfd_put_8 (output_bfd, 0x45,
3416 contents + roff - 3);
3417 bfd_put_8 (output_bfd, 0x8d,
3418 contents + roff - 2);
3419 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3420 contents + roff - 1);
3421 }
3422 bfd_put_32 (output_bfd,
3423 elf_x86_64_tpoff (info, relocation),
3424 contents + roff);
3425 continue;
3426 }
3427 else
3428 BFD_ASSERT (FALSE);
3429 }
3430
3431 if (htab->elf.sgot == NULL)
3432 abort ();
3433
3434 if (h != NULL)
3435 {
3436 off = h->got.offset;
3437 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3438 }
3439 else
3440 {
3441 if (local_got_offsets == NULL)
3442 abort ();
3443
3444 off = local_got_offsets[r_symndx];
3445 offplt = local_tlsdesc_gotents[r_symndx];
3446 }
3447
3448 if ((off & 1) != 0)
3449 off &= ~1;
3450 else
3451 {
3452 Elf_Internal_Rela outrel;
3453 int dr_type, indx;
3454 asection *sreloc;
3455
3456 if (htab->elf.srelgot == NULL)
3457 abort ();
3458
3459 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3460
3461 if (GOT_TLS_GDESC_P (tls_type))
3462 {
3463 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3464 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3465 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3466 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3467 + htab->elf.sgotplt->output_offset
3468 + offplt
3469 + htab->sgotplt_jump_table_size);
3470 sreloc = htab->elf.srelplt;
3471 if (indx == 0)
3472 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3473 else
3474 outrel.r_addend = 0;
3475 elf_append_rela (output_bfd, sreloc, &outrel);
3476 }
3477
3478 sreloc = htab->elf.srelgot;
3479
3480 outrel.r_offset = (htab->elf.sgot->output_section->vma
3481 + htab->elf.sgot->output_offset + off);
3482
3483 if (GOT_TLS_GD_P (tls_type))
3484 dr_type = R_X86_64_DTPMOD64;
3485 else if (GOT_TLS_GDESC_P (tls_type))
3486 goto dr_done;
3487 else
3488 dr_type = R_X86_64_TPOFF64;
3489
3490 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3491 outrel.r_addend = 0;
3492 if ((dr_type == R_X86_64_TPOFF64
3493 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3494 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3495 outrel.r_info = htab->r_info (indx, dr_type);
3496
3497 elf_append_rela (output_bfd, sreloc, &outrel);
3498
3499 if (GOT_TLS_GD_P (tls_type))
3500 {
3501 if (indx == 0)
3502 {
3503 BFD_ASSERT (! unresolved_reloc);
3504 bfd_put_64 (output_bfd,
3505 relocation - _bfd_x86_elf_dtpoff_base (info),
3506 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3507 }
3508 else
3509 {
3510 bfd_put_64 (output_bfd, 0,
3511 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3512 outrel.r_info = htab->r_info (indx,
3513 R_X86_64_DTPOFF64);
3514 outrel.r_offset += GOT_ENTRY_SIZE;
3515 elf_append_rela (output_bfd, sreloc,
3516 &outrel);
3517 }
3518 }
3519
3520 dr_done:
3521 if (h != NULL)
3522 h->got.offset |= 1;
3523 else
3524 local_got_offsets[r_symndx] |= 1;
3525 }
3526
3527 if (off >= (bfd_vma) -2
3528 && ! GOT_TLS_GDESC_P (tls_type))
3529 abort ();
3530 if (r_type_tls == r_type)
3531 {
3532 if (r_type == R_X86_64_GOTPC32_TLSDESC
3533 || r_type == R_X86_64_TLSDESC_CALL)
3534 relocation = htab->elf.sgotplt->output_section->vma
3535 + htab->elf.sgotplt->output_offset
3536 + offplt + htab->sgotplt_jump_table_size;
3537 else
3538 relocation = htab->elf.sgot->output_section->vma
3539 + htab->elf.sgot->output_offset + off;
3540 unresolved_reloc = FALSE;
3541 }
3542 else
3543 {
3544 bfd_vma roff = rel->r_offset;
3545
3546 if (r_type == R_X86_64_TLSGD)
3547 {
3548 /* GD->IE transition. For 64bit, change
3549 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3550 .word 0x6666; rex64; call __tls_get_addr@PLT
3551 or
3552 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3553 .byte 0x66; rex64
3554 call *__tls_get_addr@GOTPCREL(%rip
3555 which may be converted to
3556 addr32 call __tls_get_addr
3557 into:
3558 movq %fs:0, %rax
3559 addq foo@gottpoff(%rip), %rax
3560 For 32bit, change
3561 leaq foo@tlsgd(%rip), %rdi
3562 .word 0x6666; rex64; call __tls_get_addr@PLT
3563 or
3564 leaq foo@tlsgd(%rip), %rdi
3565 .byte 0x66; rex64;
3566 call *__tls_get_addr@GOTPCREL(%rip)
3567 which may be converted to
3568 addr32 call __tls_get_addr
3569 into:
3570 movl %fs:0, %eax
3571 addq foo@gottpoff(%rip), %rax
3572 For largepic, change:
3573 leaq foo@tlsgd(%rip), %rdi
3574 movabsq $__tls_get_addr@pltoff, %rax
3575 addq %r15, %rax
3576 call *%rax
3577 into:
3578 movq %fs:0, %rax
3579 addq foo@gottpoff(%rax), %rax
3580 nopw 0x0(%rax,%rax,1) */
3581 int largepic = 0;
3582 if (ABI_64_P (output_bfd))
3583 {
3584 if (contents[roff + 5] == 0xb8)
3585 {
3586 memcpy (contents + roff - 3,
3587 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3588 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3589 largepic = 1;
3590 }
3591 else
3592 memcpy (contents + roff - 4,
3593 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3594 16);
3595 }
3596 else
3597 memcpy (contents + roff - 3,
3598 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3599 15);
3600
3601 relocation = (htab->elf.sgot->output_section->vma
3602 + htab->elf.sgot->output_offset + off
3603 - roff
3604 - largepic
3605 - input_section->output_section->vma
3606 - input_section->output_offset
3607 - 12);
3608 bfd_put_32 (output_bfd, relocation,
3609 contents + roff + 8 + largepic);
3610 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3611 rel++;
3612 wrel++;
3613 continue;
3614 }
3615 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3616 {
3617 /* GDesc -> IE transition.
3618 It's originally something like:
3619 leaq x@tlsdesc(%rip), %rax
3620
3621 Change it to:
3622 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3623
3624 /* Now modify the instruction as appropriate. To
3625 turn a leaq into a movq in the form we use it, it
3626 suffices to change the second byte from 0x8d to
3627 0x8b. */
3628 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3629
3630 bfd_put_32 (output_bfd,
3631 htab->elf.sgot->output_section->vma
3632 + htab->elf.sgot->output_offset + off
3633 - rel->r_offset
3634 - input_section->output_section->vma
3635 - input_section->output_offset
3636 - 4,
3637 contents + roff);
3638 continue;
3639 }
3640 else if (r_type == R_X86_64_TLSDESC_CALL)
3641 {
3642 /* GDesc -> IE transition.
3643 It's originally:
3644 call *(%rax)
3645
3646 Change it to:
3647 xchg %ax, %ax. */
3648
3649 bfd_put_8 (output_bfd, 0x66, contents + roff);
3650 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3651 continue;
3652 }
3653 else
3654 BFD_ASSERT (FALSE);
3655 }
3656 break;
3657
3658 case R_X86_64_TLSLD:
3659 if (! elf_x86_64_tls_transition (info, input_bfd,
3660 input_section, contents,
3661 symtab_hdr, sym_hashes,
3662 &r_type, GOT_UNKNOWN, rel,
3663 relend, h, r_symndx, TRUE))
3664 return FALSE;
3665
3666 if (r_type != R_X86_64_TLSLD)
3667 {
3668 /* LD->LE transition:
3669 leaq foo@tlsld(%rip), %rdi
3670 call __tls_get_addr@PLT
3671 For 64bit, we change it into:
3672 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3673 For 32bit, we change it into:
3674 nopl 0x0(%rax); movl %fs:0, %eax
3675 Or
3676 leaq foo@tlsld(%rip), %rdi;
3677 call *__tls_get_addr@GOTPCREL(%rip)
3678 which may be converted to
3679 addr32 call __tls_get_addr
3680 For 64bit, we change it into:
3681 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3682 For 32bit, we change it into:
3683 nopw 0x0(%rax); movl %fs:0, %eax
3684 For largepic, change:
3685 leaq foo@tlsgd(%rip), %rdi
3686 movabsq $__tls_get_addr@pltoff, %rax
3687 addq %rbx, %rax
3688 call *%rax
3689 into
3690 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3691 movq %fs:0, %eax */
3692
3693 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3694 if (ABI_64_P (output_bfd))
3695 {
3696 if (contents[rel->r_offset + 5] == 0xb8)
3697 memcpy (contents + rel->r_offset - 3,
3698 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3699 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3700 else if (contents[rel->r_offset + 4] == 0xff
3701 || contents[rel->r_offset + 4] == 0x67)
3702 memcpy (contents + rel->r_offset - 3,
3703 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3704 13);
3705 else
3706 memcpy (contents + rel->r_offset - 3,
3707 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3708 }
3709 else
3710 {
3711 if (contents[rel->r_offset + 4] == 0xff)
3712 memcpy (contents + rel->r_offset - 3,
3713 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3714 13);
3715 else
3716 memcpy (contents + rel->r_offset - 3,
3717 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3718 }
3719 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3720 and R_X86_64_PLTOFF64. */
3721 rel++;
3722 wrel++;
3723 continue;
3724 }
3725
3726 if (htab->elf.sgot == NULL)
3727 abort ();
3728
3729 off = htab->tls_ld_or_ldm_got.offset;
3730 if (off & 1)
3731 off &= ~1;
3732 else
3733 {
3734 Elf_Internal_Rela outrel;
3735
3736 if (htab->elf.srelgot == NULL)
3737 abort ();
3738
3739 outrel.r_offset = (htab->elf.sgot->output_section->vma
3740 + htab->elf.sgot->output_offset + off);
3741
3742 bfd_put_64 (output_bfd, 0,
3743 htab->elf.sgot->contents + off);
3744 bfd_put_64 (output_bfd, 0,
3745 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3746 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3747 outrel.r_addend = 0;
3748 elf_append_rela (output_bfd, htab->elf.srelgot,
3749 &outrel);
3750 htab->tls_ld_or_ldm_got.offset |= 1;
3751 }
3752 relocation = htab->elf.sgot->output_section->vma
3753 + htab->elf.sgot->output_offset + off;
3754 unresolved_reloc = FALSE;
3755 break;
3756
3757 case R_X86_64_DTPOFF32:
3758 if (!bfd_link_executable (info)
3759 || (input_section->flags & SEC_CODE) == 0)
3760 relocation -= _bfd_x86_elf_dtpoff_base (info);
3761 else
3762 relocation = elf_x86_64_tpoff (info, relocation);
3763 break;
3764
3765 case R_X86_64_TPOFF32:
3766 case R_X86_64_TPOFF64:
3767 BFD_ASSERT (bfd_link_executable (info));
3768 relocation = elf_x86_64_tpoff (info, relocation);
3769 break;
3770
3771 case R_X86_64_DTPOFF64:
3772 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3773 relocation -= _bfd_x86_elf_dtpoff_base (info);
3774 break;
3775
3776 default:
3777 break;
3778 }
3779
3780 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3781 because such sections are not SEC_ALLOC and thus ld.so will
3782 not process them. */
3783 if (unresolved_reloc
3784 && !((input_section->flags & SEC_DEBUGGING) != 0
3785 && h->def_dynamic)
3786 && _bfd_elf_section_offset (output_bfd, info, input_section,
3787 rel->r_offset) != (bfd_vma) -1)
3788 {
3789 switch (r_type)
3790 {
3791 case R_X86_64_32S:
3792 sec = h->root.u.def.section;
3793 if ((info->nocopyreloc
3794 || (eh->def_protected
3795 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3796 && !(h->root.u.def.section->flags & SEC_CODE))
3797 return elf_x86_64_need_pic (info, input_bfd, input_section,
3798 h, NULL, NULL, howto);
3799 /* Fall through. */
3800
3801 default:
3802 _bfd_error_handler
3803 /* xgettext:c-format */
3804 (_("%pB(%pA+%#" PRIx64 "): "
3805 "unresolvable %s relocation against symbol `%s'"),
3806 input_bfd,
3807 input_section,
3808 (uint64_t) rel->r_offset,
3809 howto->name,
3810 h->root.root.string);
3811 return FALSE;
3812 }
3813 }
3814
3815 do_relocation:
3816 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3817 contents, rel->r_offset,
3818 relocation, rel->r_addend);
3819
3820 check_relocation_error:
3821 if (r != bfd_reloc_ok)
3822 {
3823 const char *name;
3824
3825 if (h != NULL)
3826 name = h->root.root.string;
3827 else
3828 {
3829 name = bfd_elf_string_from_elf_section (input_bfd,
3830 symtab_hdr->sh_link,
3831 sym->st_name);
3832 if (name == NULL)
3833 return FALSE;
3834 if (*name == '\0')
3835 name = bfd_section_name (input_bfd, sec);
3836 }
3837
3838 if (r == bfd_reloc_overflow)
3839 {
3840 if (converted_reloc)
3841 {
3842 info->callbacks->einfo
3843 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3844 return FALSE;
3845 }
3846 (*info->callbacks->reloc_overflow)
3847 (info, (h ? &h->root : NULL), name, howto->name,
3848 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3849 }
3850 else
3851 {
3852 _bfd_error_handler
3853 /* xgettext:c-format */
3854 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3855 input_bfd, input_section,
3856 (uint64_t) rel->r_offset, name, (int) r);
3857 return FALSE;
3858 }
3859 }
3860
3861 if (wrel != rel)
3862 *wrel = *rel;
3863 }
3864
3865 if (wrel != rel)
3866 {
3867 Elf_Internal_Shdr *rel_hdr;
3868 size_t deleted = rel - wrel;
3869
3870 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3871 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3872 if (rel_hdr->sh_size == 0)
3873 {
3874 /* It is too late to remove an empty reloc section. Leave
3875 one NONE reloc.
3876 ??? What is wrong with an empty section??? */
3877 rel_hdr->sh_size = rel_hdr->sh_entsize;
3878 deleted -= 1;
3879 }
3880 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3881 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3882 input_section->reloc_count -= deleted;
3883 }
3884
3885 return TRUE;
3886 }
3887
3888 /* Finish up dynamic symbol handling. We set the contents of various
3889 dynamic sections here. */
3890
3891 static bfd_boolean
3892 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3893 struct bfd_link_info *info,
3894 struct elf_link_hash_entry *h,
3895 Elf_Internal_Sym *sym)
3896 {
3897 struct elf_x86_link_hash_table *htab;
3898 bfd_boolean use_plt_second;
3899 struct elf_x86_link_hash_entry *eh;
3900 bfd_boolean local_undefweak;
3901
3902 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3903 if (htab == NULL)
3904 return FALSE;
3905
3906 /* Use the second PLT section only if there is .plt section. */
3907 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3908
3909 eh = (struct elf_x86_link_hash_entry *) h;
3910 if (eh->no_finish_dynamic_symbol)
3911 abort ();
3912
3913 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3914 resolved undefined weak symbols in executable so that their
3915 references have value 0 at run-time. */
3916 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3917
3918 if (h->plt.offset != (bfd_vma) -1)
3919 {
3920 bfd_vma plt_index;
3921 bfd_vma got_offset, plt_offset;
3922 Elf_Internal_Rela rela;
3923 bfd_byte *loc;
3924 asection *plt, *gotplt, *relplt, *resolved_plt;
3925 const struct elf_backend_data *bed;
3926 bfd_vma plt_got_pcrel_offset;
3927
3928 /* When building a static executable, use .iplt, .igot.plt and
3929 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3930 if (htab->elf.splt != NULL)
3931 {
3932 plt = htab->elf.splt;
3933 gotplt = htab->elf.sgotplt;
3934 relplt = htab->elf.srelplt;
3935 }
3936 else
3937 {
3938 plt = htab->elf.iplt;
3939 gotplt = htab->elf.igotplt;
3940 relplt = htab->elf.irelplt;
3941 }
3942
3943 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3944
3945 /* Get the index in the procedure linkage table which
3946 corresponds to this symbol. This is the index of this symbol
3947 in all the symbols for which we are making plt entries. The
3948 first entry in the procedure linkage table is reserved.
3949
3950 Get the offset into the .got table of the entry that
3951 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3952 bytes. The first three are reserved for the dynamic linker.
3953
3954 For static executables, we don't reserve anything. */
3955
3956 if (plt == htab->elf.splt)
3957 {
3958 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3959 - htab->plt.has_plt0);
3960 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3961 }
3962 else
3963 {
3964 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3965 got_offset = got_offset * GOT_ENTRY_SIZE;
3966 }
3967
3968 /* Fill in the entry in the procedure linkage table. */
3969 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3970 htab->plt.plt_entry_size);
3971 if (use_plt_second)
3972 {
3973 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3974 htab->non_lazy_plt->plt_entry,
3975 htab->non_lazy_plt->plt_entry_size);
3976
3977 resolved_plt = htab->plt_second;
3978 plt_offset = eh->plt_second.offset;
3979 }
3980 else
3981 {
3982 resolved_plt = plt;
3983 plt_offset = h->plt.offset;
3984 }
3985
3986 /* Insert the relocation positions of the plt section. */
3987
3988 /* Put offset the PC-relative instruction referring to the GOT entry,
3989 subtracting the size of that instruction. */
3990 plt_got_pcrel_offset = (gotplt->output_section->vma
3991 + gotplt->output_offset
3992 + got_offset
3993 - resolved_plt->output_section->vma
3994 - resolved_plt->output_offset
3995 - plt_offset
3996 - htab->plt.plt_got_insn_size);
3997
3998 /* Check PC-relative offset overflow in PLT entry. */
3999 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4000 /* xgettext:c-format */
4001 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4002 output_bfd, h->root.root.string);
4003
4004 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4005 (resolved_plt->contents + plt_offset
4006 + htab->plt.plt_got_offset));
4007
4008 /* Fill in the entry in the global offset table, initially this
4009 points to the second part of the PLT entry. Leave the entry
4010 as zero for undefined weak symbol in PIE. No PLT relocation
4011 against undefined weak symbol in PIE. */
4012 if (!local_undefweak)
4013 {
4014 if (htab->plt.has_plt0)
4015 bfd_put_64 (output_bfd, (plt->output_section->vma
4016 + plt->output_offset
4017 + h->plt.offset
4018 + htab->lazy_plt->plt_lazy_offset),
4019 gotplt->contents + got_offset);
4020
4021 /* Fill in the entry in the .rela.plt section. */
4022 rela.r_offset = (gotplt->output_section->vma
4023 + gotplt->output_offset
4024 + got_offset);
4025 if (PLT_LOCAL_IFUNC_P (info, h))
4026 {
4027 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4028 h->root.root.string,
4029 h->root.u.def.section->owner);
4030
4031 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4032 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4033 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4034 rela.r_addend = (h->root.u.def.value
4035 + h->root.u.def.section->output_section->vma
4036 + h->root.u.def.section->output_offset);
4037 /* R_X86_64_IRELATIVE comes last. */
4038 plt_index = htab->next_irelative_index--;
4039 }
4040 else
4041 {
4042 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4043 rela.r_addend = 0;
4044 plt_index = htab->next_jump_slot_index++;
4045 }
4046
4047 /* Don't fill the second and third slots in PLT entry for
4048 static executables nor without PLT0. */
4049 if (plt == htab->elf.splt && htab->plt.has_plt0)
4050 {
4051 bfd_vma plt0_offset
4052 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4053
4054 /* Put relocation index. */
4055 bfd_put_32 (output_bfd, plt_index,
4056 (plt->contents + h->plt.offset
4057 + htab->lazy_plt->plt_reloc_offset));
4058
4059 /* Put offset for jmp .PLT0 and check for overflow. We don't
4060 check relocation index for overflow since branch displacement
4061 will overflow first. */
4062 if (plt0_offset > 0x80000000)
4063 /* xgettext:c-format */
4064 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4065 output_bfd, h->root.root.string);
4066 bfd_put_32 (output_bfd, - plt0_offset,
4067 (plt->contents + h->plt.offset
4068 + htab->lazy_plt->plt_plt_offset));
4069 }
4070
4071 bed = get_elf_backend_data (output_bfd);
4072 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4073 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4074 }
4075 }
4076 else if (eh->plt_got.offset != (bfd_vma) -1)
4077 {
4078 bfd_vma got_offset, plt_offset;
4079 asection *plt, *got;
4080 bfd_boolean got_after_plt;
4081 int32_t got_pcrel_offset;
4082
4083 /* Set the entry in the GOT procedure linkage table. */
4084 plt = htab->plt_got;
4085 got = htab->elf.sgot;
4086 got_offset = h->got.offset;
4087
4088 if (got_offset == (bfd_vma) -1
4089 || (h->type == STT_GNU_IFUNC && h->def_regular)
4090 || plt == NULL
4091 || got == NULL)
4092 abort ();
4093
4094 /* Use the non-lazy PLT entry template for the GOT PLT since they
4095 are the identical. */
4096 /* Fill in the entry in the GOT procedure linkage table. */
4097 plt_offset = eh->plt_got.offset;
4098 memcpy (plt->contents + plt_offset,
4099 htab->non_lazy_plt->plt_entry,
4100 htab->non_lazy_plt->plt_entry_size);
4101
4102 /* Put offset the PC-relative instruction referring to the GOT
4103 entry, subtracting the size of that instruction. */
4104 got_pcrel_offset = (got->output_section->vma
4105 + got->output_offset
4106 + got_offset
4107 - plt->output_section->vma
4108 - plt->output_offset
4109 - plt_offset
4110 - htab->non_lazy_plt->plt_got_insn_size);
4111
4112 /* Check PC-relative offset overflow in GOT PLT entry. */
4113 got_after_plt = got->output_section->vma > plt->output_section->vma;
4114 if ((got_after_plt && got_pcrel_offset < 0)
4115 || (!got_after_plt && got_pcrel_offset > 0))
4116 /* xgettext:c-format */
4117 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4118 output_bfd, h->root.root.string);
4119
4120 bfd_put_32 (output_bfd, got_pcrel_offset,
4121 (plt->contents + plt_offset
4122 + htab->non_lazy_plt->plt_got_offset));
4123 }
4124
4125 if (!local_undefweak
4126 && !h->def_regular
4127 && (h->plt.offset != (bfd_vma) -1
4128 || eh->plt_got.offset != (bfd_vma) -1))
4129 {
4130 /* Mark the symbol as undefined, rather than as defined in
4131 the .plt section. Leave the value if there were any
4132 relocations where pointer equality matters (this is a clue
4133 for the dynamic linker, to make function pointer
4134 comparisons work between an application and shared
4135 library), otherwise set it to zero. If a function is only
4136 called from a binary, there is no need to slow down
4137 shared libraries because of that. */
4138 sym->st_shndx = SHN_UNDEF;
4139 if (!h->pointer_equality_needed)
4140 sym->st_value = 0;
4141 }
4142
4143 /* Don't generate dynamic GOT relocation against undefined weak
4144 symbol in executable. */
4145 if (h->got.offset != (bfd_vma) -1
4146 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4147 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4148 && !local_undefweak)
4149 {
4150 Elf_Internal_Rela rela;
4151 asection *relgot = htab->elf.srelgot;
4152
4153 /* This symbol has an entry in the global offset table. Set it
4154 up. */
4155 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4156 abort ();
4157
4158 rela.r_offset = (htab->elf.sgot->output_section->vma
4159 + htab->elf.sgot->output_offset
4160 + (h->got.offset &~ (bfd_vma) 1));
4161
4162 /* If this is a static link, or it is a -Bsymbolic link and the
4163 symbol is defined locally or was forced to be local because
4164 of a version file, we just want to emit a RELATIVE reloc.
4165 The entry in the global offset table will already have been
4166 initialized in the relocate_section function. */
4167 if (h->def_regular
4168 && h->type == STT_GNU_IFUNC)
4169 {
4170 if (h->plt.offset == (bfd_vma) -1)
4171 {
4172 /* STT_GNU_IFUNC is referenced without PLT. */
4173 if (htab->elf.splt == NULL)
4174 {
4175 /* use .rel[a].iplt section to store .got relocations
4176 in static executable. */
4177 relgot = htab->elf.irelplt;
4178 }
4179 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4180 {
4181 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4182 h->root.root.string,
4183 h->root.u.def.section->owner);
4184
4185 rela.r_info = htab->r_info (0,
4186 R_X86_64_IRELATIVE);
4187 rela.r_addend = (h->root.u.def.value
4188 + h->root.u.def.section->output_section->vma
4189 + h->root.u.def.section->output_offset);
4190 }
4191 else
4192 goto do_glob_dat;
4193 }
4194 else if (bfd_link_pic (info))
4195 {
4196 /* Generate R_X86_64_GLOB_DAT. */
4197 goto do_glob_dat;
4198 }
4199 else
4200 {
4201 asection *plt;
4202 bfd_vma plt_offset;
4203
4204 if (!h->pointer_equality_needed)
4205 abort ();
4206
4207 /* For non-shared object, we can't use .got.plt, which
4208 contains the real function addres if we need pointer
4209 equality. We load the GOT entry with the PLT entry. */
4210 if (htab->plt_second != NULL)
4211 {
4212 plt = htab->plt_second;
4213 plt_offset = eh->plt_second.offset;
4214 }
4215 else
4216 {
4217 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4218 plt_offset = h->plt.offset;
4219 }
4220 bfd_put_64 (output_bfd, (plt->output_section->vma
4221 + plt->output_offset
4222 + plt_offset),
4223 htab->elf.sgot->contents + h->got.offset);
4224 return TRUE;
4225 }
4226 }
4227 else if (bfd_link_pic (info)
4228 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4229 {
4230 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4231 return FALSE;
4232 BFD_ASSERT((h->got.offset & 1) != 0);
4233 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4234 rela.r_addend = (h->root.u.def.value
4235 + h->root.u.def.section->output_section->vma
4236 + h->root.u.def.section->output_offset);
4237 }
4238 else
4239 {
4240 BFD_ASSERT((h->got.offset & 1) == 0);
4241 do_glob_dat:
4242 bfd_put_64 (output_bfd, (bfd_vma) 0,
4243 htab->elf.sgot->contents + h->got.offset);
4244 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4245 rela.r_addend = 0;
4246 }
4247
4248 elf_append_rela (output_bfd, relgot, &rela);
4249 }
4250
4251 if (h->needs_copy)
4252 {
4253 Elf_Internal_Rela rela;
4254 asection *s;
4255
4256 /* This symbol needs a copy reloc. Set it up. */
4257 VERIFY_COPY_RELOC (h, htab)
4258
4259 rela.r_offset = (h->root.u.def.value
4260 + h->root.u.def.section->output_section->vma
4261 + h->root.u.def.section->output_offset);
4262 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4263 rela.r_addend = 0;
4264 if (h->root.u.def.section == htab->elf.sdynrelro)
4265 s = htab->elf.sreldynrelro;
4266 else
4267 s = htab->elf.srelbss;
4268 elf_append_rela (output_bfd, s, &rela);
4269 }
4270
4271 return TRUE;
4272 }
4273
4274 /* Finish up local dynamic symbol handling. We set the contents of
4275 various dynamic sections here. */
4276
4277 static bfd_boolean
4278 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4279 {
4280 struct elf_link_hash_entry *h
4281 = (struct elf_link_hash_entry *) *slot;
4282 struct bfd_link_info *info
4283 = (struct bfd_link_info *) inf;
4284
4285 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4286 info, h, NULL);
4287 }
4288
4289 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4290 here since undefined weak symbol may not be dynamic and may not be
4291 called for elf_x86_64_finish_dynamic_symbol. */
4292
4293 static bfd_boolean
4294 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4295 void *inf)
4296 {
4297 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4298 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4299
4300 if (h->root.type != bfd_link_hash_undefweak
4301 || h->dynindx != -1)
4302 return TRUE;
4303
4304 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4305 info, h, NULL);
4306 }
4307
4308 /* Used to decide how to sort relocs in an optimal manner for the
4309 dynamic linker, before writing them out. */
4310
4311 static enum elf_reloc_type_class
4312 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4313 const asection *rel_sec ATTRIBUTE_UNUSED,
4314 const Elf_Internal_Rela *rela)
4315 {
4316 bfd *abfd = info->output_bfd;
4317 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4318 struct elf_x86_link_hash_table *htab
4319 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4320
4321 if (htab->elf.dynsym != NULL
4322 && htab->elf.dynsym->contents != NULL)
4323 {
4324 /* Check relocation against STT_GNU_IFUNC symbol if there are
4325 dynamic symbols. */
4326 unsigned long r_symndx = htab->r_sym (rela->r_info);
4327 if (r_symndx != STN_UNDEF)
4328 {
4329 Elf_Internal_Sym sym;
4330 if (!bed->s->swap_symbol_in (abfd,
4331 (htab->elf.dynsym->contents
4332 + r_symndx * bed->s->sizeof_sym),
4333 0, &sym))
4334 abort ();
4335
4336 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4337 return reloc_class_ifunc;
4338 }
4339 }
4340
4341 switch ((int) ELF32_R_TYPE (rela->r_info))
4342 {
4343 case R_X86_64_IRELATIVE:
4344 return reloc_class_ifunc;
4345 case R_X86_64_RELATIVE:
4346 case R_X86_64_RELATIVE64:
4347 return reloc_class_relative;
4348 case R_X86_64_JUMP_SLOT:
4349 return reloc_class_plt;
4350 case R_X86_64_COPY:
4351 return reloc_class_copy;
4352 default:
4353 return reloc_class_normal;
4354 }
4355 }
4356
4357 /* Finish up the dynamic sections. */
4358
4359 static bfd_boolean
4360 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4361 struct bfd_link_info *info)
4362 {
4363 struct elf_x86_link_hash_table *htab;
4364
4365 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4366 if (htab == NULL)
4367 return FALSE;
4368
4369 if (! htab->elf.dynamic_sections_created)
4370 return TRUE;
4371
4372 if (htab->elf.splt && htab->elf.splt->size > 0)
4373 {
4374 elf_section_data (htab->elf.splt->output_section)
4375 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4376
4377 if (htab->plt.has_plt0)
4378 {
4379 /* Fill in the special first entry in the procedure linkage
4380 table. */
4381 memcpy (htab->elf.splt->contents,
4382 htab->lazy_plt->plt0_entry,
4383 htab->lazy_plt->plt0_entry_size);
4384 /* Add offset for pushq GOT+8(%rip), since the instruction
4385 uses 6 bytes subtract this value. */
4386 bfd_put_32 (output_bfd,
4387 (htab->elf.sgotplt->output_section->vma
4388 + htab->elf.sgotplt->output_offset
4389 + 8
4390 - htab->elf.splt->output_section->vma
4391 - htab->elf.splt->output_offset
4392 - 6),
4393 (htab->elf.splt->contents
4394 + htab->lazy_plt->plt0_got1_offset));
4395 /* Add offset for the PC-relative instruction accessing
4396 GOT+16, subtracting the offset to the end of that
4397 instruction. */
4398 bfd_put_32 (output_bfd,
4399 (htab->elf.sgotplt->output_section->vma
4400 + htab->elf.sgotplt->output_offset
4401 + 16
4402 - htab->elf.splt->output_section->vma
4403 - htab->elf.splt->output_offset
4404 - htab->lazy_plt->plt0_got2_insn_end),
4405 (htab->elf.splt->contents
4406 + htab->lazy_plt->plt0_got2_offset));
4407 }
4408
4409 if (htab->tlsdesc_plt)
4410 {
4411 /* The TLSDESC entry in a lazy procedure linkage table. */
4412 static const bfd_byte tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
4413 {
4414 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
4415 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
4416 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
4417 };
4418
4419 bfd_put_64 (output_bfd, (bfd_vma) 0,
4420 htab->elf.sgot->contents + htab->tlsdesc_got);
4421
4422 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4423 tlsdesc_plt_entry, LAZY_PLT_ENTRY_SIZE);
4424
4425 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4426 bytes and the instruction uses 6 bytes, subtract these
4427 values. */
4428 bfd_put_32 (output_bfd,
4429 (htab->elf.sgotplt->output_section->vma
4430 + htab->elf.sgotplt->output_offset
4431 + 8
4432 - htab->elf.splt->output_section->vma
4433 - htab->elf.splt->output_offset
4434 - htab->tlsdesc_plt
4435 - 4 - 6),
4436 (htab->elf.splt->contents
4437 + htab->tlsdesc_plt
4438 + 4 + 2));
4439 /* Add offset for indirect branch via GOT+TDG, where TDG
4440 stands for htab->tlsdesc_got, subtracting the offset
4441 to the end of that instruction. */
4442 bfd_put_32 (output_bfd,
4443 (htab->elf.sgot->output_section->vma
4444 + htab->elf.sgot->output_offset
4445 + htab->tlsdesc_got
4446 - htab->elf.splt->output_section->vma
4447 - htab->elf.splt->output_offset
4448 - htab->tlsdesc_plt
4449 - 4 - 6 - 6),
4450 (htab->elf.splt->contents
4451 + htab->tlsdesc_plt + 4 + 6 + 2));
4452 }
4453 }
4454
4455 /* Fill PLT entries for undefined weak symbols in PIE. */
4456 if (bfd_link_pie (info))
4457 bfd_hash_traverse (&info->hash->table,
4458 elf_x86_64_pie_finish_undefweak_symbol,
4459 info);
4460
4461 return TRUE;
4462 }
4463
4464 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4465 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4466 It has to be done before elf_link_sort_relocs is called so that
4467 dynamic relocations are properly sorted. */
4468
4469 static bfd_boolean
4470 elf_x86_64_output_arch_local_syms
4471 (bfd *output_bfd ATTRIBUTE_UNUSED,
4472 struct bfd_link_info *info,
4473 void *flaginfo ATTRIBUTE_UNUSED,
4474 int (*func) (void *, const char *,
4475 Elf_Internal_Sym *,
4476 asection *,
4477 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4478 {
4479 struct elf_x86_link_hash_table *htab
4480 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4481 if (htab == NULL)
4482 return FALSE;
4483
4484 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4485 htab_traverse (htab->loc_hash_table,
4486 elf_x86_64_finish_local_dynamic_symbol,
4487 info);
4488
4489 return TRUE;
4490 }
4491
4492 /* Forward declaration. */
4493 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4494
4495 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4496 dynamic relocations. */
4497
4498 static long
4499 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4500 long symcount ATTRIBUTE_UNUSED,
4501 asymbol **syms ATTRIBUTE_UNUSED,
4502 long dynsymcount,
4503 asymbol **dynsyms,
4504 asymbol **ret)
4505 {
4506 long count, i, n;
4507 int j;
4508 bfd_byte *plt_contents;
4509 long relsize;
4510 const struct elf_x86_lazy_plt_layout *lazy_plt;
4511 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4512 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4513 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4514 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4515 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4516 asection *plt;
4517 enum elf_x86_plt_type plt_type;
4518 struct elf_x86_plt plts[] =
4519 {
4520 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4521 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4522 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4523 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4524 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4525 };
4526
4527 *ret = NULL;
4528
4529 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4530 return 0;
4531
4532 if (dynsymcount <= 0)
4533 return 0;
4534
4535 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4536 if (relsize <= 0)
4537 return -1;
4538
4539 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4540 {
4541 lazy_plt = &elf_x86_64_lazy_plt;
4542 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4543 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4544 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4545 if (ABI_64_P (abfd))
4546 {
4547 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4548 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4549 }
4550 else
4551 {
4552 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4553 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4554 }
4555 }
4556 else
4557 {
4558 lazy_plt = &elf_x86_64_nacl_plt;
4559 non_lazy_plt = NULL;
4560 lazy_bnd_plt = NULL;
4561 non_lazy_bnd_plt = NULL;
4562 lazy_ibt_plt = NULL;
4563 non_lazy_ibt_plt = NULL;
4564 }
4565
4566 count = 0;
4567 for (j = 0; plts[j].name != NULL; j++)
4568 {
4569 plt = bfd_get_section_by_name (abfd, plts[j].name);
4570 if (plt == NULL || plt->size == 0)
4571 continue;
4572
4573 /* Get the PLT section contents. */
4574 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4575 if (plt_contents == NULL)
4576 break;
4577 if (!bfd_get_section_contents (abfd, (asection *) plt,
4578 plt_contents, 0, plt->size))
4579 {
4580 free (plt_contents);
4581 break;
4582 }
4583
4584 /* Check what kind of PLT it is. */
4585 plt_type = plt_unknown;
4586 if (plts[j].type == plt_unknown
4587 && (plt->size >= (lazy_plt->plt_entry_size
4588 + lazy_plt->plt_entry_size)))
4589 {
4590 /* Match lazy PLT first. Need to check the first two
4591 instructions. */
4592 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4593 lazy_plt->plt0_got1_offset) == 0)
4594 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4595 2) == 0))
4596 plt_type = plt_lazy;
4597 else if (lazy_bnd_plt != NULL
4598 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4599 lazy_bnd_plt->plt0_got1_offset) == 0)
4600 && (memcmp (plt_contents + 6,
4601 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4602 {
4603 plt_type = plt_lazy | plt_second;
4604 /* The fist entry in the lazy IBT PLT is the same as the
4605 lazy BND PLT. */
4606 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4607 lazy_ibt_plt->plt_entry,
4608 lazy_ibt_plt->plt_got_offset) == 0))
4609 lazy_plt = lazy_ibt_plt;
4610 else
4611 lazy_plt = lazy_bnd_plt;
4612 }
4613 }
4614
4615 if (non_lazy_plt != NULL
4616 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4617 && plt->size >= non_lazy_plt->plt_entry_size)
4618 {
4619 /* Match non-lazy PLT. */
4620 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4621 non_lazy_plt->plt_got_offset) == 0)
4622 plt_type = plt_non_lazy;
4623 }
4624
4625 if (plt_type == plt_unknown || plt_type == plt_second)
4626 {
4627 if (non_lazy_bnd_plt != NULL
4628 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4629 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4630 non_lazy_bnd_plt->plt_got_offset) == 0))
4631 {
4632 /* Match BND PLT. */
4633 plt_type = plt_second;
4634 non_lazy_plt = non_lazy_bnd_plt;
4635 }
4636 else if (non_lazy_ibt_plt != NULL
4637 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4638 && (memcmp (plt_contents,
4639 non_lazy_ibt_plt->plt_entry,
4640 non_lazy_ibt_plt->plt_got_offset) == 0))
4641 {
4642 /* Match IBT PLT. */
4643 plt_type = plt_second;
4644 non_lazy_plt = non_lazy_ibt_plt;
4645 }
4646 }
4647
4648 if (plt_type == plt_unknown)
4649 {
4650 free (plt_contents);
4651 continue;
4652 }
4653
4654 plts[j].sec = plt;
4655 plts[j].type = plt_type;
4656
4657 if ((plt_type & plt_lazy))
4658 {
4659 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4660 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4661 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4662 /* Skip PLT0 in lazy PLT. */
4663 i = 1;
4664 }
4665 else
4666 {
4667 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4668 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4669 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4670 i = 0;
4671 }
4672
4673 /* Skip lazy PLT when the second PLT is used. */
4674 if (plt_type == (plt_lazy | plt_second))
4675 plts[j].count = 0;
4676 else
4677 {
4678 n = plt->size / plts[j].plt_entry_size;
4679 plts[j].count = n;
4680 count += n - i;
4681 }
4682
4683 plts[j].contents = plt_contents;
4684 }
4685
4686 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4687 (bfd_vma) 0, plts, dynsyms,
4688 ret);
4689 }
4690
4691 /* Handle an x86-64 specific section when reading an object file. This
4692 is called when elfcode.h finds a section with an unknown type. */
4693
4694 static bfd_boolean
4695 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4696 const char *name, int shindex)
4697 {
4698 if (hdr->sh_type != SHT_X86_64_UNWIND)
4699 return FALSE;
4700
4701 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4702 return FALSE;
4703
4704 return TRUE;
4705 }
4706
4707 /* Hook called by the linker routine which adds symbols from an object
4708 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4709 of .bss. */
4710
4711 static bfd_boolean
4712 elf_x86_64_add_symbol_hook (bfd *abfd,
4713 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4714 Elf_Internal_Sym *sym,
4715 const char **namep ATTRIBUTE_UNUSED,
4716 flagword *flagsp ATTRIBUTE_UNUSED,
4717 asection **secp,
4718 bfd_vma *valp)
4719 {
4720 asection *lcomm;
4721
4722 switch (sym->st_shndx)
4723 {
4724 case SHN_X86_64_LCOMMON:
4725 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4726 if (lcomm == NULL)
4727 {
4728 lcomm = bfd_make_section_with_flags (abfd,
4729 "LARGE_COMMON",
4730 (SEC_ALLOC
4731 | SEC_IS_COMMON
4732 | SEC_LINKER_CREATED));
4733 if (lcomm == NULL)
4734 return FALSE;
4735 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4736 }
4737 *secp = lcomm;
4738 *valp = sym->st_size;
4739 return TRUE;
4740 }
4741
4742 return TRUE;
4743 }
4744
4745
4746 /* Given a BFD section, try to locate the corresponding ELF section
4747 index. */
4748
4749 static bfd_boolean
4750 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4751 asection *sec, int *index_return)
4752 {
4753 if (sec == &_bfd_elf_large_com_section)
4754 {
4755 *index_return = SHN_X86_64_LCOMMON;
4756 return TRUE;
4757 }
4758 return FALSE;
4759 }
4760
4761 /* Process a symbol. */
4762
4763 static void
4764 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4765 asymbol *asym)
4766 {
4767 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4768
4769 switch (elfsym->internal_elf_sym.st_shndx)
4770 {
4771 case SHN_X86_64_LCOMMON:
4772 asym->section = &_bfd_elf_large_com_section;
4773 asym->value = elfsym->internal_elf_sym.st_size;
4774 /* Common symbol doesn't set BSF_GLOBAL. */
4775 asym->flags &= ~BSF_GLOBAL;
4776 break;
4777 }
4778 }
4779
4780 static bfd_boolean
4781 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4782 {
4783 return (sym->st_shndx == SHN_COMMON
4784 || sym->st_shndx == SHN_X86_64_LCOMMON);
4785 }
4786
4787 static unsigned int
4788 elf_x86_64_common_section_index (asection *sec)
4789 {
4790 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4791 return SHN_COMMON;
4792 else
4793 return SHN_X86_64_LCOMMON;
4794 }
4795
4796 static asection *
4797 elf_x86_64_common_section (asection *sec)
4798 {
4799 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4800 return bfd_com_section_ptr;
4801 else
4802 return &_bfd_elf_large_com_section;
4803 }
4804
4805 static bfd_boolean
4806 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4807 const Elf_Internal_Sym *sym,
4808 asection **psec,
4809 bfd_boolean newdef,
4810 bfd_boolean olddef,
4811 bfd *oldbfd,
4812 const asection *oldsec)
4813 {
4814 /* A normal common symbol and a large common symbol result in a
4815 normal common symbol. We turn the large common symbol into a
4816 normal one. */
4817 if (!olddef
4818 && h->root.type == bfd_link_hash_common
4819 && !newdef
4820 && bfd_is_com_section (*psec)
4821 && oldsec != *psec)
4822 {
4823 if (sym->st_shndx == SHN_COMMON
4824 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4825 {
4826 h->root.u.c.p->section
4827 = bfd_make_section_old_way (oldbfd, "COMMON");
4828 h->root.u.c.p->section->flags = SEC_ALLOC;
4829 }
4830 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4831 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4832 *psec = bfd_com_section_ptr;
4833 }
4834
4835 return TRUE;
4836 }
4837
4838 static int
4839 elf_x86_64_additional_program_headers (bfd *abfd,
4840 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4841 {
4842 asection *s;
4843 int count = 0;
4844
4845 /* Check to see if we need a large readonly segment. */
4846 s = bfd_get_section_by_name (abfd, ".lrodata");
4847 if (s && (s->flags & SEC_LOAD))
4848 count++;
4849
4850 /* Check to see if we need a large data segment. Since .lbss sections
4851 is placed right after the .bss section, there should be no need for
4852 a large data segment just because of .lbss. */
4853 s = bfd_get_section_by_name (abfd, ".ldata");
4854 if (s && (s->flags & SEC_LOAD))
4855 count++;
4856
4857 return count;
4858 }
4859
4860 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4861
4862 static bfd_boolean
4863 elf_x86_64_relocs_compatible (const bfd_target *input,
4864 const bfd_target *output)
4865 {
4866 return ((xvec_get_elf_backend_data (input)->s->elfclass
4867 == xvec_get_elf_backend_data (output)->s->elfclass)
4868 && _bfd_elf_relocs_compatible (input, output));
4869 }
4870
4871 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4872 with GNU properties if found. Otherwise, return NULL. */
4873
4874 static bfd *
4875 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4876 {
4877 struct elf_x86_init_table init_table;
4878
4879 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4880 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4881 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4882 != (int) R_X86_64_GNU_VTINHERIT)
4883 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4884 != (int) R_X86_64_GNU_VTENTRY))
4885 abort ();
4886
4887 /* This is unused for x86-64. */
4888 init_table.plt0_pad_byte = 0x90;
4889
4890 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4891 {
4892 if (info->bndplt)
4893 {
4894 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4895 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4896 }
4897 else
4898 {
4899 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4900 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4901 }
4902
4903 if (ABI_64_P (info->output_bfd))
4904 {
4905 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4906 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4907 }
4908 else
4909 {
4910 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4911 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4912 }
4913 }
4914 else
4915 {
4916 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4917 init_table.non_lazy_plt = NULL;
4918 init_table.lazy_ibt_plt = NULL;
4919 init_table.non_lazy_ibt_plt = NULL;
4920 }
4921
4922 if (ABI_64_P (info->output_bfd))
4923 {
4924 init_table.r_info = elf64_r_info;
4925 init_table.r_sym = elf64_r_sym;
4926 }
4927 else
4928 {
4929 init_table.r_info = elf32_r_info;
4930 init_table.r_sym = elf32_r_sym;
4931 }
4932
4933 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4934 }
4935
4936 static const struct bfd_elf_special_section
4937 elf_x86_64_special_sections[]=
4938 {
4939 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4940 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4941 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4942 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4943 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4944 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4945 { NULL, 0, 0, 0, 0 }
4946 };
4947
4948 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4949 #define TARGET_LITTLE_NAME "elf64-x86-64"
4950 #define ELF_ARCH bfd_arch_i386
4951 #define ELF_TARGET_ID X86_64_ELF_DATA
4952 #define ELF_MACHINE_CODE EM_X86_64
4953 #if DEFAULT_LD_Z_SEPARATE_CODE
4954 # define ELF_MAXPAGESIZE 0x1000
4955 #else
4956 # define ELF_MAXPAGESIZE 0x200000
4957 #endif
4958 #define ELF_MINPAGESIZE 0x1000
4959 #define ELF_COMMONPAGESIZE 0x1000
4960
4961 #define elf_backend_can_gc_sections 1
4962 #define elf_backend_can_refcount 1
4963 #define elf_backend_want_got_plt 1
4964 #define elf_backend_plt_readonly 1
4965 #define elf_backend_want_plt_sym 0
4966 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4967 #define elf_backend_rela_normal 1
4968 #define elf_backend_plt_alignment 4
4969 #define elf_backend_extern_protected_data 1
4970 #define elf_backend_caches_rawsize 1
4971 #define elf_backend_dtrel_excludes_plt 1
4972 #define elf_backend_want_dynrelro 1
4973
4974 #define elf_info_to_howto elf_x86_64_info_to_howto
4975
4976 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4977 #define bfd_elf64_bfd_reloc_name_lookup \
4978 elf_x86_64_reloc_name_lookup
4979
4980 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4981 #define elf_backend_check_relocs elf_x86_64_check_relocs
4982 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4983 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4984 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4985 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4986 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4987 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4988 #ifdef CORE_HEADER
4989 #define elf_backend_write_core_note elf_x86_64_write_core_note
4990 #endif
4991 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4992 #define elf_backend_relocate_section elf_x86_64_relocate_section
4993 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4994 #define elf_backend_object_p elf64_x86_64_elf_object_p
4995 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4996
4997 #define elf_backend_section_from_shdr \
4998 elf_x86_64_section_from_shdr
4999
5000 #define elf_backend_section_from_bfd_section \
5001 elf_x86_64_elf_section_from_bfd_section
5002 #define elf_backend_add_symbol_hook \
5003 elf_x86_64_add_symbol_hook
5004 #define elf_backend_symbol_processing \
5005 elf_x86_64_symbol_processing
5006 #define elf_backend_common_section_index \
5007 elf_x86_64_common_section_index
5008 #define elf_backend_common_section \
5009 elf_x86_64_common_section
5010 #define elf_backend_common_definition \
5011 elf_x86_64_common_definition
5012 #define elf_backend_merge_symbol \
5013 elf_x86_64_merge_symbol
5014 #define elf_backend_special_sections \
5015 elf_x86_64_special_sections
5016 #define elf_backend_additional_program_headers \
5017 elf_x86_64_additional_program_headers
5018 #define elf_backend_setup_gnu_properties \
5019 elf_x86_64_link_setup_gnu_properties
5020 #define elf_backend_hide_symbol \
5021 _bfd_x86_elf_hide_symbol
5022
5023 #include "elf64-target.h"
5024
5025 /* CloudABI support. */
5026
5027 #undef TARGET_LITTLE_SYM
5028 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5029 #undef TARGET_LITTLE_NAME
5030 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5031
5032 #undef ELF_OSABI
5033 #define ELF_OSABI ELFOSABI_CLOUDABI
5034
5035 #undef elf64_bed
5036 #define elf64_bed elf64_x86_64_cloudabi_bed
5037
5038 #include "elf64-target.h"
5039
5040 /* FreeBSD support. */
5041
5042 #undef TARGET_LITTLE_SYM
5043 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5044 #undef TARGET_LITTLE_NAME
5045 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5046
5047 #undef ELF_OSABI
5048 #define ELF_OSABI ELFOSABI_FREEBSD
5049
5050 #undef elf64_bed
5051 #define elf64_bed elf64_x86_64_fbsd_bed
5052
5053 #include "elf64-target.h"
5054
5055 /* Solaris 2 support. */
5056
5057 #undef TARGET_LITTLE_SYM
5058 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5059 #undef TARGET_LITTLE_NAME
5060 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5061
5062 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5063 {
5064 is_solaris /* os */
5065 };
5066
5067 #undef elf_backend_arch_data
5068 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5069
5070 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5071 objects won't be recognized. */
5072 #undef ELF_OSABI
5073
5074 #undef elf64_bed
5075 #define elf64_bed elf64_x86_64_sol2_bed
5076
5077 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5078 boundary. */
5079 #undef elf_backend_static_tls_alignment
5080 #define elf_backend_static_tls_alignment 16
5081
5082 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5083
5084 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5085 File, p.63. */
5086 #undef elf_backend_want_plt_sym
5087 #define elf_backend_want_plt_sym 1
5088
5089 #undef elf_backend_strtab_flags
5090 #define elf_backend_strtab_flags SHF_STRINGS
5091
5092 static bfd_boolean
5093 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5094 bfd *obfd ATTRIBUTE_UNUSED,
5095 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5096 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5097 {
5098 /* PR 19938: FIXME: Need to add code for setting the sh_info
5099 and sh_link fields of Solaris specific section types. */
5100 return FALSE;
5101 }
5102
5103 #undef elf_backend_copy_special_section_fields
5104 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5105
5106 #include "elf64-target.h"
5107
5108 /* Native Client support. */
5109
5110 static bfd_boolean
5111 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5112 {
5113 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5114 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5115 return TRUE;
5116 }
5117
5118 #undef TARGET_LITTLE_SYM
5119 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5120 #undef TARGET_LITTLE_NAME
5121 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5122 #undef elf64_bed
5123 #define elf64_bed elf64_x86_64_nacl_bed
5124
5125 #undef ELF_MAXPAGESIZE
5126 #undef ELF_MINPAGESIZE
5127 #undef ELF_COMMONPAGESIZE
5128 #define ELF_MAXPAGESIZE 0x10000
5129 #define ELF_MINPAGESIZE 0x10000
5130 #define ELF_COMMONPAGESIZE 0x10000
5131
5132 /* Restore defaults. */
5133 #undef ELF_OSABI
5134 #undef elf_backend_static_tls_alignment
5135 #undef elf_backend_want_plt_sym
5136 #define elf_backend_want_plt_sym 0
5137 #undef elf_backend_strtab_flags
5138 #undef elf_backend_copy_special_section_fields
5139
5140 /* NaCl uses substantially different PLT entries for the same effects. */
5141
5142 #undef elf_backend_plt_alignment
5143 #define elf_backend_plt_alignment 5
5144 #define NACL_PLT_ENTRY_SIZE 64
5145 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5146
5147 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5148 {
5149 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5150 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5151 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5152 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5153 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5154
5155 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5156 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5157
5158 /* 32 bytes of nop to pad out to the standard size. */
5159 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5160 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5161 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5162 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5163 0x66, /* excess data16 prefix */
5164 0x90 /* nop */
5165 };
5166
5167 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5168 {
5169 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5170 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5171 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5172 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5173
5174 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5175 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5176 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5177
5178 /* Lazy GOT entries point here (32-byte aligned). */
5179 0x68, /* pushq immediate */
5180 0, 0, 0, 0, /* replaced with index into relocation table. */
5181 0xe9, /* jmp relative */
5182 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5183
5184 /* 22 bytes of nop to pad out to the standard size. */
5185 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5186 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5187 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5188 };
5189
5190 /* .eh_frame covering the .plt section. */
5191
5192 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5193 {
5194 #if (PLT_CIE_LENGTH != 20 \
5195 || PLT_FDE_LENGTH != 36 \
5196 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5197 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5198 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5199 #endif
5200 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5201 0, 0, 0, 0, /* CIE ID */
5202 1, /* CIE version */
5203 'z', 'R', 0, /* Augmentation string */
5204 1, /* Code alignment factor */
5205 0x78, /* Data alignment factor */
5206 16, /* Return address column */
5207 1, /* Augmentation size */
5208 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5209 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5210 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5211 DW_CFA_nop, DW_CFA_nop,
5212
5213 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5214 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5215 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5216 0, 0, 0, 0, /* .plt size goes here */
5217 0, /* Augmentation size */
5218 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5219 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5220 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5221 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5222 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5223 13, /* Block length */
5224 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5225 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5226 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5227 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5228 DW_CFA_nop, DW_CFA_nop
5229 };
5230
5231 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5232 {
5233 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5234 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5235 elf_x86_64_nacl_plt_entry, /* plt_entry */
5236 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5237 2, /* plt0_got1_offset */
5238 9, /* plt0_got2_offset */
5239 13, /* plt0_got2_insn_end */
5240 3, /* plt_got_offset */
5241 33, /* plt_reloc_offset */
5242 38, /* plt_plt_offset */
5243 7, /* plt_got_insn_size */
5244 42, /* plt_plt_insn_end */
5245 32, /* plt_lazy_offset */
5246 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5247 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5248 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5249 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5250 };
5251
5252 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5253 {
5254 is_nacl /* os */
5255 };
5256
5257 #undef elf_backend_arch_data
5258 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5259
5260 #undef elf_backend_object_p
5261 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5262 #undef elf_backend_modify_segment_map
5263 #define elf_backend_modify_segment_map nacl_modify_segment_map
5264 #undef elf_backend_modify_program_headers
5265 #define elf_backend_modify_program_headers nacl_modify_program_headers
5266 #undef elf_backend_final_write_processing
5267 #define elf_backend_final_write_processing nacl_final_write_processing
5268
5269 #include "elf64-target.h"
5270
5271 /* Native Client x32 support. */
5272
5273 static bfd_boolean
5274 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5275 {
5276 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5277 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5278 return TRUE;
5279 }
5280
5281 #undef TARGET_LITTLE_SYM
5282 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5283 #undef TARGET_LITTLE_NAME
5284 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5285 #undef elf32_bed
5286 #define elf32_bed elf32_x86_64_nacl_bed
5287
5288 #define bfd_elf32_bfd_reloc_type_lookup \
5289 elf_x86_64_reloc_type_lookup
5290 #define bfd_elf32_bfd_reloc_name_lookup \
5291 elf_x86_64_reloc_name_lookup
5292 #define bfd_elf32_get_synthetic_symtab \
5293 elf_x86_64_get_synthetic_symtab
5294
5295 #undef elf_backend_object_p
5296 #define elf_backend_object_p \
5297 elf32_x86_64_nacl_elf_object_p
5298
5299 #undef elf_backend_bfd_from_remote_memory
5300 #define elf_backend_bfd_from_remote_memory \
5301 _bfd_elf32_bfd_from_remote_memory
5302
5303 #undef elf_backend_size_info
5304 #define elf_backend_size_info \
5305 _bfd_elf32_size_info
5306
5307 #include "elf32-target.h"
5308
5309 /* Restore defaults. */
5310 #undef elf_backend_object_p
5311 #define elf_backend_object_p elf64_x86_64_elf_object_p
5312 #undef elf_backend_bfd_from_remote_memory
5313 #undef elf_backend_size_info
5314 #undef elf_backend_modify_segment_map
5315 #undef elf_backend_modify_program_headers
5316 #undef elf_backend_final_write_processing
5317
5318 /* Intel L1OM support. */
5319
5320 static bfd_boolean
5321 elf64_l1om_elf_object_p (bfd *abfd)
5322 {
5323 /* Set the right machine number for an L1OM elf64 file. */
5324 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5325 return TRUE;
5326 }
5327
5328 #undef TARGET_LITTLE_SYM
5329 #define TARGET_LITTLE_SYM l1om_elf64_vec
5330 #undef TARGET_LITTLE_NAME
5331 #define TARGET_LITTLE_NAME "elf64-l1om"
5332 #undef ELF_ARCH
5333 #define ELF_ARCH bfd_arch_l1om
5334
5335 #undef ELF_MACHINE_CODE
5336 #define ELF_MACHINE_CODE EM_L1OM
5337
5338 #undef ELF_OSABI
5339
5340 #undef elf64_bed
5341 #define elf64_bed elf64_l1om_bed
5342
5343 #undef elf_backend_object_p
5344 #define elf_backend_object_p elf64_l1om_elf_object_p
5345
5346 /* Restore defaults. */
5347 #undef ELF_MAXPAGESIZE
5348 #undef ELF_MINPAGESIZE
5349 #undef ELF_COMMONPAGESIZE
5350 #if DEFAULT_LD_Z_SEPARATE_CODE
5351 # define ELF_MAXPAGESIZE 0x1000
5352 #else
5353 # define ELF_MAXPAGESIZE 0x200000
5354 #endif
5355 #define ELF_MINPAGESIZE 0x1000
5356 #define ELF_COMMONPAGESIZE 0x1000
5357 #undef elf_backend_plt_alignment
5358 #define elf_backend_plt_alignment 4
5359 #undef elf_backend_arch_data
5360 #define elf_backend_arch_data &elf_x86_64_arch_bed
5361
5362 #include "elf64-target.h"
5363
5364 /* FreeBSD L1OM support. */
5365
5366 #undef TARGET_LITTLE_SYM
5367 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5368 #undef TARGET_LITTLE_NAME
5369 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5370
5371 #undef ELF_OSABI
5372 #define ELF_OSABI ELFOSABI_FREEBSD
5373
5374 #undef elf64_bed
5375 #define elf64_bed elf64_l1om_fbsd_bed
5376
5377 #include "elf64-target.h"
5378
5379 /* Intel K1OM support. */
5380
5381 static bfd_boolean
5382 elf64_k1om_elf_object_p (bfd *abfd)
5383 {
5384 /* Set the right machine number for an K1OM elf64 file. */
5385 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5386 return TRUE;
5387 }
5388
5389 #undef TARGET_LITTLE_SYM
5390 #define TARGET_LITTLE_SYM k1om_elf64_vec
5391 #undef TARGET_LITTLE_NAME
5392 #define TARGET_LITTLE_NAME "elf64-k1om"
5393 #undef ELF_ARCH
5394 #define ELF_ARCH bfd_arch_k1om
5395
5396 #undef ELF_MACHINE_CODE
5397 #define ELF_MACHINE_CODE EM_K1OM
5398
5399 #undef ELF_OSABI
5400
5401 #undef elf64_bed
5402 #define elf64_bed elf64_k1om_bed
5403
5404 #undef elf_backend_object_p
5405 #define elf_backend_object_p elf64_k1om_elf_object_p
5406
5407 #undef elf_backend_static_tls_alignment
5408
5409 #undef elf_backend_want_plt_sym
5410 #define elf_backend_want_plt_sym 0
5411
5412 #include "elf64-target.h"
5413
5414 /* FreeBSD K1OM support. */
5415
5416 #undef TARGET_LITTLE_SYM
5417 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5418 #undef TARGET_LITTLE_NAME
5419 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5420
5421 #undef ELF_OSABI
5422 #define ELF_OSABI ELFOSABI_FREEBSD
5423
5424 #undef elf64_bed
5425 #define elf64_bed elf64_k1om_fbsd_bed
5426
5427 #include "elf64-target.h"
5428
5429 /* 32bit x86-64 support. */
5430
5431 #undef TARGET_LITTLE_SYM
5432 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5433 #undef TARGET_LITTLE_NAME
5434 #define TARGET_LITTLE_NAME "elf32-x86-64"
5435 #undef elf32_bed
5436
5437 #undef ELF_ARCH
5438 #define ELF_ARCH bfd_arch_i386
5439
5440 #undef ELF_MACHINE_CODE
5441 #define ELF_MACHINE_CODE EM_X86_64
5442
5443 #undef ELF_OSABI
5444
5445 #undef elf_backend_object_p
5446 #define elf_backend_object_p \
5447 elf32_x86_64_elf_object_p
5448
5449 #undef elf_backend_bfd_from_remote_memory
5450 #define elf_backend_bfd_from_remote_memory \
5451 _bfd_elf32_bfd_from_remote_memory
5452
5453 #undef elf_backend_size_info
5454 #define elf_backend_size_info \
5455 _bfd_elf32_size_info
5456
5457 #include "elf32-target.h"
This page took 0.206672 seconds and 5 git commands to generate.