Update copyright years
[deliverable/binutils-gdb.git] / bfd / elf32-rx.c
1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
19
20 #include "sysdep.h"
21 #include "bfd.h"
22 #include "bfd_stdint.h"
23 #include "libbfd.h"
24 #include "elf-bfd.h"
25 #include "elf/rx.h"
26 #include "libiberty.h"
27
28 #define RX_OPCODE_BIG_ENDIAN 0
29
30 /* This is a meta-target that's used only with objcopy, to avoid the
31 endian-swap we would otherwise get. We check for this in
32 rx_elf_object_p(). */
33 const bfd_target bfd_elf32_rx_be_ns_vec;
34 const bfd_target bfd_elf32_rx_be_vec;
35
36 #ifdef DEBUG
37 char * rx_get_reloc (long);
38 void rx_dump_symtab (bfd *, void *, void *);
39 #endif
40
41 #define RXREL(n,sz,bit,shift,complain,pcrel) \
42 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
43 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
44
45 /* Note that the relocations around 0x7f are internal to this file;
46 feel free to move them as needed to avoid conflicts with published
47 relocation numbers. */
48
49 static reloc_howto_type rx_elf_howto_table [] =
50 {
51 RXREL (NONE, 0, 0, 0, dont, FALSE),
52 RXREL (DIR32, 2, 32, 0, signed, FALSE),
53 RXREL (DIR24S, 2, 24, 0, signed, FALSE),
54 RXREL (DIR16, 1, 16, 0, dont, FALSE),
55 RXREL (DIR16U, 1, 16, 0, unsigned, FALSE),
56 RXREL (DIR16S, 1, 16, 0, signed, FALSE),
57 RXREL (DIR8, 0, 8, 0, dont, FALSE),
58 RXREL (DIR8U, 0, 8, 0, unsigned, FALSE),
59 RXREL (DIR8S, 0, 8, 0, signed, FALSE),
60 RXREL (DIR24S_PCREL, 2, 24, 0, signed, TRUE),
61 RXREL (DIR16S_PCREL, 1, 16, 0, signed, TRUE),
62 RXREL (DIR8S_PCREL, 0, 8, 0, signed, TRUE),
63 RXREL (DIR16UL, 1, 16, 2, unsigned, FALSE),
64 RXREL (DIR16UW, 1, 16, 1, unsigned, FALSE),
65 RXREL (DIR8UL, 0, 8, 2, unsigned, FALSE),
66 RXREL (DIR8UW, 0, 8, 1, unsigned, FALSE),
67 RXREL (DIR32_REV, 1, 16, 0, dont, FALSE),
68 RXREL (DIR16_REV, 1, 16, 0, dont, FALSE),
69 RXREL (DIR3U_PCREL, 0, 3, 0, dont, TRUE),
70
71 EMPTY_HOWTO (0x13),
72 EMPTY_HOWTO (0x14),
73 EMPTY_HOWTO (0x15),
74 EMPTY_HOWTO (0x16),
75 EMPTY_HOWTO (0x17),
76 EMPTY_HOWTO (0x18),
77 EMPTY_HOWTO (0x19),
78 EMPTY_HOWTO (0x1a),
79 EMPTY_HOWTO (0x1b),
80 EMPTY_HOWTO (0x1c),
81 EMPTY_HOWTO (0x1d),
82 EMPTY_HOWTO (0x1e),
83 EMPTY_HOWTO (0x1f),
84
85 RXREL (RH_3_PCREL, 0, 3, 0, signed, TRUE),
86 RXREL (RH_16_OP, 1, 16, 0, signed, FALSE),
87 RXREL (RH_24_OP, 2, 24, 0, signed, FALSE),
88 RXREL (RH_32_OP, 2, 32, 0, signed, FALSE),
89 RXREL (RH_24_UNS, 2, 24, 0, unsigned, FALSE),
90 RXREL (RH_8_NEG, 0, 8, 0, signed, FALSE),
91 RXREL (RH_16_NEG, 1, 16, 0, signed, FALSE),
92 RXREL (RH_24_NEG, 2, 24, 0, signed, FALSE),
93 RXREL (RH_32_NEG, 2, 32, 0, signed, FALSE),
94 RXREL (RH_DIFF, 2, 32, 0, signed, FALSE),
95 RXREL (RH_GPRELB, 1, 16, 0, unsigned, FALSE),
96 RXREL (RH_GPRELW, 1, 16, 0, unsigned, FALSE),
97 RXREL (RH_GPRELL, 1, 16, 0, unsigned, FALSE),
98 RXREL (RH_RELAX, 0, 0, 0, dont, FALSE),
99
100 EMPTY_HOWTO (0x2e),
101 EMPTY_HOWTO (0x2f),
102 EMPTY_HOWTO (0x30),
103 EMPTY_HOWTO (0x31),
104 EMPTY_HOWTO (0x32),
105 EMPTY_HOWTO (0x33),
106 EMPTY_HOWTO (0x34),
107 EMPTY_HOWTO (0x35),
108 EMPTY_HOWTO (0x36),
109 EMPTY_HOWTO (0x37),
110 EMPTY_HOWTO (0x38),
111 EMPTY_HOWTO (0x39),
112 EMPTY_HOWTO (0x3a),
113 EMPTY_HOWTO (0x3b),
114 EMPTY_HOWTO (0x3c),
115 EMPTY_HOWTO (0x3d),
116 EMPTY_HOWTO (0x3e),
117 EMPTY_HOWTO (0x3f),
118 EMPTY_HOWTO (0x40),
119
120 RXREL (ABS32, 2, 32, 0, dont, FALSE),
121 RXREL (ABS24S, 2, 24, 0, signed, FALSE),
122 RXREL (ABS16, 1, 16, 0, dont, FALSE),
123 RXREL (ABS16U, 1, 16, 0, unsigned, FALSE),
124 RXREL (ABS16S, 1, 16, 0, signed, FALSE),
125 RXREL (ABS8, 0, 8, 0, dont, FALSE),
126 RXREL (ABS8U, 0, 8, 0, unsigned, FALSE),
127 RXREL (ABS8S, 0, 8, 0, signed, FALSE),
128 RXREL (ABS24S_PCREL, 2, 24, 0, signed, TRUE),
129 RXREL (ABS16S_PCREL, 1, 16, 0, signed, TRUE),
130 RXREL (ABS8S_PCREL, 0, 8, 0, signed, TRUE),
131 RXREL (ABS16UL, 1, 16, 0, unsigned, FALSE),
132 RXREL (ABS16UW, 1, 16, 0, unsigned, FALSE),
133 RXREL (ABS8UL, 0, 8, 0, unsigned, FALSE),
134 RXREL (ABS8UW, 0, 8, 0, unsigned, FALSE),
135 RXREL (ABS32_REV, 2, 32, 0, dont, FALSE),
136 RXREL (ABS16_REV, 1, 16, 0, dont, FALSE),
137
138 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
139
140 EMPTY_HOWTO (0x52),
141 EMPTY_HOWTO (0x53),
142 EMPTY_HOWTO (0x54),
143 EMPTY_HOWTO (0x55),
144 EMPTY_HOWTO (0x56),
145 EMPTY_HOWTO (0x57),
146 EMPTY_HOWTO (0x58),
147 EMPTY_HOWTO (0x59),
148 EMPTY_HOWTO (0x5a),
149 EMPTY_HOWTO (0x5b),
150 EMPTY_HOWTO (0x5c),
151 EMPTY_HOWTO (0x5d),
152 EMPTY_HOWTO (0x5e),
153 EMPTY_HOWTO (0x5f),
154 EMPTY_HOWTO (0x60),
155 EMPTY_HOWTO (0x61),
156 EMPTY_HOWTO (0x62),
157 EMPTY_HOWTO (0x63),
158 EMPTY_HOWTO (0x64),
159 EMPTY_HOWTO (0x65),
160 EMPTY_HOWTO (0x66),
161 EMPTY_HOWTO (0x67),
162 EMPTY_HOWTO (0x68),
163 EMPTY_HOWTO (0x69),
164 EMPTY_HOWTO (0x6a),
165 EMPTY_HOWTO (0x6b),
166 EMPTY_HOWTO (0x6c),
167 EMPTY_HOWTO (0x6d),
168 EMPTY_HOWTO (0x6e),
169 EMPTY_HOWTO (0x6f),
170 EMPTY_HOWTO (0x70),
171 EMPTY_HOWTO (0x71),
172 EMPTY_HOWTO (0x72),
173 EMPTY_HOWTO (0x73),
174 EMPTY_HOWTO (0x74),
175 EMPTY_HOWTO (0x75),
176 EMPTY_HOWTO (0x76),
177 EMPTY_HOWTO (0x77),
178
179 /* These are internal. */
180 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
181 /* ---- ---- 4--- 3210. */
182 #define R_RX_RH_ABS5p8B 0x78
183 RXREL (RH_ABS5p8B, 0, 0, 0, dont, FALSE),
184 #define R_RX_RH_ABS5p8W 0x79
185 RXREL (RH_ABS5p8W, 0, 0, 0, dont, FALSE),
186 #define R_RX_RH_ABS5p8L 0x7a
187 RXREL (RH_ABS5p8L, 0, 0, 0, dont, FALSE),
188 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
189 /* ---- -432 1--- 0---. */
190 #define R_RX_RH_ABS5p5B 0x7b
191 RXREL (RH_ABS5p5B, 0, 0, 0, dont, FALSE),
192 #define R_RX_RH_ABS5p5W 0x7c
193 RXREL (RH_ABS5p5W, 0, 0, 0, dont, FALSE),
194 #define R_RX_RH_ABS5p5L 0x7d
195 RXREL (RH_ABS5p5L, 0, 0, 0, dont, FALSE),
196 /* A 4-bit unsigned immediate at bit position 8. */
197 #define R_RX_RH_UIMM4p8 0x7e
198 RXREL (RH_UIMM4p8, 0, 0, 0, dont, FALSE),
199 /* A 4-bit negative unsigned immediate at bit position 8. */
200 #define R_RX_RH_UNEG4p8 0x7f
201 RXREL (RH_UNEG4p8, 0, 0, 0, dont, FALSE),
202 /* End of internal relocs. */
203
204 RXREL (SYM, 2, 32, 0, dont, FALSE),
205 RXREL (OPneg, 2, 32, 0, dont, FALSE),
206 RXREL (OPadd, 2, 32, 0, dont, FALSE),
207 RXREL (OPsub, 2, 32, 0, dont, FALSE),
208 RXREL (OPmul, 2, 32, 0, dont, FALSE),
209 RXREL (OPdiv, 2, 32, 0, dont, FALSE),
210 RXREL (OPshla, 2, 32, 0, dont, FALSE),
211 RXREL (OPshra, 2, 32, 0, dont, FALSE),
212 RXREL (OPsctsize, 2, 32, 0, dont, FALSE),
213 RXREL (OPscttop, 2, 32, 0, dont, FALSE),
214 RXREL (OPand, 2, 32, 0, dont, FALSE),
215 RXREL (OPor, 2, 32, 0, dont, FALSE),
216 RXREL (OPxor, 2, 32, 0, dont, FALSE),
217 RXREL (OPnot, 2, 32, 0, dont, FALSE),
218 RXREL (OPmod, 2, 32, 0, dont, FALSE),
219 RXREL (OPromtop, 2, 32, 0, dont, FALSE),
220 RXREL (OPramtop, 2, 32, 0, dont, FALSE)
221 };
222 \f
223 /* Map BFD reloc types to RX ELF reloc types. */
224
225 struct rx_reloc_map
226 {
227 bfd_reloc_code_real_type bfd_reloc_val;
228 unsigned int rx_reloc_val;
229 };
230
231 static const struct rx_reloc_map rx_reloc_map [] =
232 {
233 { BFD_RELOC_NONE, R_RX_NONE },
234 { BFD_RELOC_8, R_RX_DIR8S },
235 { BFD_RELOC_16, R_RX_DIR16S },
236 { BFD_RELOC_24, R_RX_DIR24S },
237 { BFD_RELOC_32, R_RX_DIR32 },
238 { BFD_RELOC_RX_16_OP, R_RX_DIR16 },
239 { BFD_RELOC_RX_DIR3U_PCREL, R_RX_DIR3U_PCREL },
240 { BFD_RELOC_8_PCREL, R_RX_DIR8S_PCREL },
241 { BFD_RELOC_16_PCREL, R_RX_DIR16S_PCREL },
242 { BFD_RELOC_24_PCREL, R_RX_DIR24S_PCREL },
243 { BFD_RELOC_RX_8U, R_RX_DIR8U },
244 { BFD_RELOC_RX_16U, R_RX_DIR16U },
245 { BFD_RELOC_RX_24U, R_RX_RH_24_UNS },
246 { BFD_RELOC_RX_NEG8, R_RX_RH_8_NEG },
247 { BFD_RELOC_RX_NEG16, R_RX_RH_16_NEG },
248 { BFD_RELOC_RX_NEG24, R_RX_RH_24_NEG },
249 { BFD_RELOC_RX_NEG32, R_RX_RH_32_NEG },
250 { BFD_RELOC_RX_DIFF, R_RX_RH_DIFF },
251 { BFD_RELOC_RX_GPRELB, R_RX_RH_GPRELB },
252 { BFD_RELOC_RX_GPRELW, R_RX_RH_GPRELW },
253 { BFD_RELOC_RX_GPRELL, R_RX_RH_GPRELL },
254 { BFD_RELOC_RX_RELAX, R_RX_RH_RELAX },
255 { BFD_RELOC_RX_SYM, R_RX_SYM },
256 { BFD_RELOC_RX_OP_SUBTRACT, R_RX_OPsub },
257 { BFD_RELOC_RX_OP_NEG, R_RX_OPneg },
258 { BFD_RELOC_RX_ABS8, R_RX_ABS8 },
259 { BFD_RELOC_RX_ABS16, R_RX_ABS16 },
260 { BFD_RELOC_RX_ABS16_REV, R_RX_ABS16_REV },
261 { BFD_RELOC_RX_ABS32, R_RX_ABS32 },
262 { BFD_RELOC_RX_ABS32_REV, R_RX_ABS32_REV },
263 { BFD_RELOC_RX_ABS16UL, R_RX_ABS16UL },
264 { BFD_RELOC_RX_ABS16UW, R_RX_ABS16UW },
265 { BFD_RELOC_RX_ABS16U, R_RX_ABS16U }
266 };
267
268 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
269
270 static reloc_howto_type *
271 rx_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
272 bfd_reloc_code_real_type code)
273 {
274 unsigned int i;
275
276 if (code == BFD_RELOC_RX_32_OP)
277 return rx_elf_howto_table + R_RX_DIR32;
278
279 for (i = ARRAY_SIZE (rx_reloc_map); --i;)
280 if (rx_reloc_map [i].bfd_reloc_val == code)
281 return rx_elf_howto_table + rx_reloc_map[i].rx_reloc_val;
282
283 return NULL;
284 }
285
286 static reloc_howto_type *
287 rx_reloc_name_lookup (bfd * abfd ATTRIBUTE_UNUSED, const char * r_name)
288 {
289 unsigned int i;
290
291 for (i = 0; i < ARRAY_SIZE (rx_elf_howto_table); i++)
292 if (rx_elf_howto_table[i].name != NULL
293 && strcasecmp (rx_elf_howto_table[i].name, r_name) == 0)
294 return rx_elf_howto_table + i;
295
296 return NULL;
297 }
298
299 /* Set the howto pointer for an RX ELF reloc. */
300
301 static void
302 rx_info_to_howto_rela (bfd * abfd ATTRIBUTE_UNUSED,
303 arelent * cache_ptr,
304 Elf_Internal_Rela * dst)
305 {
306 unsigned int r_type;
307
308 r_type = ELF32_R_TYPE (dst->r_info);
309 BFD_ASSERT (r_type < (unsigned int) R_RX_max);
310 cache_ptr->howto = rx_elf_howto_table + r_type;
311 }
312 \f
313 static bfd_vma
314 get_symbol_value (const char * name,
315 bfd_reloc_status_type * status,
316 struct bfd_link_info * info,
317 bfd * input_bfd,
318 asection * input_section,
319 int offset)
320 {
321 bfd_vma value = 0;
322 struct bfd_link_hash_entry * h;
323
324 h = bfd_link_hash_lookup (info->hash, name, FALSE, FALSE, TRUE);
325
326 if (h == NULL
327 || (h->type != bfd_link_hash_defined
328 && h->type != bfd_link_hash_defweak))
329 * status = info->callbacks->undefined_symbol
330 (info, name, input_bfd, input_section, offset, TRUE);
331 else
332 value = (h->u.def.value
333 + h->u.def.section->output_section->vma
334 + h->u.def.section->output_offset);
335
336 return value;
337 }
338
339 static bfd_vma
340 get_gp (bfd_reloc_status_type * status,
341 struct bfd_link_info * info,
342 bfd * abfd,
343 asection * sec,
344 int offset)
345 {
346 static bfd_boolean cached = FALSE;
347 static bfd_vma cached_value = 0;
348
349 if (!cached)
350 {
351 cached_value = get_symbol_value ("__gp", status, info, abfd, sec, offset);
352 cached = TRUE;
353 }
354 return cached_value;
355 }
356
357 static bfd_vma
358 get_romstart (bfd_reloc_status_type * status,
359 struct bfd_link_info * info,
360 bfd * abfd,
361 asection * sec,
362 int offset)
363 {
364 static bfd_boolean cached = FALSE;
365 static bfd_vma cached_value = 0;
366
367 if (!cached)
368 {
369 cached_value = get_symbol_value ("_start", status, info, abfd, sec, offset);
370 cached = TRUE;
371 }
372 return cached_value;
373 }
374
375 static bfd_vma
376 get_ramstart (bfd_reloc_status_type * status,
377 struct bfd_link_info * info,
378 bfd * abfd,
379 asection * sec,
380 int offset)
381 {
382 static bfd_boolean cached = FALSE;
383 static bfd_vma cached_value = 0;
384
385 if (!cached)
386 {
387 cached_value = get_symbol_value ("__datastart", status, info, abfd, sec, offset);
388 cached = TRUE;
389 }
390 return cached_value;
391 }
392
393 #define NUM_STACK_ENTRIES 16
394 static int32_t rx_stack [ NUM_STACK_ENTRIES ];
395 static unsigned int rx_stack_top;
396
397 #define RX_STACK_PUSH(val) \
398 do \
399 { \
400 if (rx_stack_top < NUM_STACK_ENTRIES) \
401 rx_stack [rx_stack_top ++] = (val); \
402 else \
403 r = bfd_reloc_dangerous; \
404 } \
405 while (0)
406
407 #define RX_STACK_POP(dest) \
408 do \
409 { \
410 if (rx_stack_top > 0) \
411 (dest) = rx_stack [-- rx_stack_top]; \
412 else \
413 (dest) = 0, r = bfd_reloc_dangerous; \
414 } \
415 while (0)
416
417 /* Relocate an RX ELF section.
418 There is some attempt to make this function usable for many architectures,
419 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
420 if only to serve as a learning tool.
421
422 The RELOCATE_SECTION function is called by the new ELF backend linker
423 to handle the relocations for a section.
424
425 The relocs are always passed as Rela structures; if the section
426 actually uses Rel structures, the r_addend field will always be
427 zero.
428
429 This function is responsible for adjusting the section contents as
430 necessary, and (if using Rela relocs and generating a relocatable
431 output file) adjusting the reloc addend as necessary.
432
433 This function does not have to worry about setting the reloc
434 address or the reloc symbol index.
435
436 LOCAL_SYMS is a pointer to the swapped in local symbols.
437
438 LOCAL_SECTIONS is an array giving the section in the input file
439 corresponding to the st_shndx field of each local symbol.
440
441 The global hash table entry for the global symbols can be found
442 via elf_sym_hashes (input_bfd).
443
444 When generating relocatable output, this function must handle
445 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
446 going to be the section symbol corresponding to the output
447 section, which means that the addend must be adjusted
448 accordingly. */
449
450 static bfd_boolean
451 rx_elf_relocate_section
452 (bfd * output_bfd,
453 struct bfd_link_info * info,
454 bfd * input_bfd,
455 asection * input_section,
456 bfd_byte * contents,
457 Elf_Internal_Rela * relocs,
458 Elf_Internal_Sym * local_syms,
459 asection ** local_sections)
460 {
461 Elf_Internal_Shdr * symtab_hdr;
462 struct elf_link_hash_entry ** sym_hashes;
463 Elf_Internal_Rela * rel;
464 Elf_Internal_Rela * relend;
465 bfd_boolean pid_mode;
466 bfd_boolean saw_subtract = FALSE;
467
468 if (elf_elfheader (output_bfd)->e_flags & E_FLAG_RX_PID)
469 pid_mode = TRUE;
470 else
471 pid_mode = FALSE;
472
473 symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
474 sym_hashes = elf_sym_hashes (input_bfd);
475 relend = relocs + input_section->reloc_count;
476 for (rel = relocs; rel < relend; rel ++)
477 {
478 reloc_howto_type * howto;
479 unsigned long r_symndx;
480 Elf_Internal_Sym * sym;
481 asection * sec;
482 struct elf_link_hash_entry * h;
483 bfd_vma relocation;
484 bfd_reloc_status_type r;
485 const char * name = NULL;
486 bfd_boolean unresolved_reloc = TRUE;
487 int r_type;
488
489 r_type = ELF32_R_TYPE (rel->r_info);
490 r_symndx = ELF32_R_SYM (rel->r_info);
491
492 howto = rx_elf_howto_table + ELF32_R_TYPE (rel->r_info);
493 h = NULL;
494 sym = NULL;
495 sec = NULL;
496 relocation = 0;
497
498 if (rx_stack_top == 0)
499 saw_subtract = FALSE;
500
501 if (r_symndx < symtab_hdr->sh_info)
502 {
503 sym = local_syms + r_symndx;
504 sec = local_sections [r_symndx];
505 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, & sec, rel);
506
507 name = bfd_elf_string_from_elf_section
508 (input_bfd, symtab_hdr->sh_link, sym->st_name);
509 name = (sym->st_name == 0) ? bfd_section_name (input_bfd, sec) : name;
510 }
511 else
512 {
513 bfd_boolean warned, ignored;
514
515 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
516 r_symndx, symtab_hdr, sym_hashes, h,
517 sec, relocation, unresolved_reloc,
518 warned, ignored);
519
520 name = h->root.root.string;
521 }
522
523 if (sec != NULL && discarded_section (sec))
524 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
525 rel, 1, relend, howto, 0, contents);
526
527 if (info->relocatable)
528 {
529 /* This is a relocatable link. We don't have to change
530 anything, unless the reloc is against a section symbol,
531 in which case we have to adjust according to where the
532 section symbol winds up in the output section. */
533 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
534 rel->r_addend += sec->output_offset;
535 continue;
536 }
537
538 if (h != NULL && h->root.type == bfd_link_hash_undefweak)
539 /* If the symbol is undefined and weak
540 then the relocation resolves to zero. */
541 relocation = 0;
542 else
543 {
544 if (howto->pc_relative)
545 {
546 relocation -= (input_section->output_section->vma
547 + input_section->output_offset
548 + rel->r_offset);
549 if (r_type != R_RX_RH_3_PCREL
550 && r_type != R_RX_DIR3U_PCREL)
551 relocation ++;
552 }
553
554 relocation += rel->r_addend;
555 }
556
557 r = bfd_reloc_ok;
558
559 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
560 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
561 #define OP(i) (contents[rel->r_offset + (i)])
562 #define WARN_REDHAT(type) \
563 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
564 input_bfd, input_section, name)
565
566 /* Check for unsafe relocs in PID mode. These are any relocs where
567 an absolute address is being computed. There are special cases
568 for relocs against symbols that are known to be referenced in
569 crt0.o before the PID base address register has been initialised. */
570 #define UNSAFE_FOR_PID \
571 do \
572 { \
573 if (pid_mode \
574 && sec != NULL \
575 && sec->flags & SEC_READONLY \
576 && !(input_section->flags & SEC_DEBUGGING) \
577 && strcmp (name, "__pid_base") != 0 \
578 && strcmp (name, "__gp") != 0 \
579 && strcmp (name, "__romdatastart") != 0 \
580 && !saw_subtract) \
581 _bfd_error_handler (_("%B(%A): unsafe PID relocation %s at 0x%08lx (against %s in %s)"), \
582 input_bfd, input_section, howto->name, \
583 input_section->output_section->vma + input_section->output_offset + rel->r_offset, \
584 name, sec->name); \
585 } \
586 while (0)
587
588 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
589 switch (r_type)
590 {
591 case R_RX_NONE:
592 break;
593
594 case R_RX_RH_RELAX:
595 break;
596
597 case R_RX_RH_3_PCREL:
598 WARN_REDHAT ("RX_RH_3_PCREL");
599 RANGE (3, 10);
600 OP (0) &= 0xf8;
601 OP (0) |= relocation & 0x07;
602 break;
603
604 case R_RX_RH_8_NEG:
605 WARN_REDHAT ("RX_RH_8_NEG");
606 relocation = - relocation;
607 case R_RX_DIR8S_PCREL:
608 UNSAFE_FOR_PID;
609 RANGE (-128, 127);
610 OP (0) = relocation;
611 break;
612
613 case R_RX_DIR8S:
614 UNSAFE_FOR_PID;
615 RANGE (-128, 255);
616 OP (0) = relocation;
617 break;
618
619 case R_RX_DIR8U:
620 UNSAFE_FOR_PID;
621 RANGE (0, 255);
622 OP (0) = relocation;
623 break;
624
625 case R_RX_RH_16_NEG:
626 WARN_REDHAT ("RX_RH_16_NEG");
627 relocation = - relocation;
628 case R_RX_DIR16S_PCREL:
629 UNSAFE_FOR_PID;
630 RANGE (-32768, 32767);
631 #if RX_OPCODE_BIG_ENDIAN
632 #else
633 OP (0) = relocation;
634 OP (1) = relocation >> 8;
635 #endif
636 break;
637
638 case R_RX_RH_16_OP:
639 WARN_REDHAT ("RX_RH_16_OP");
640 UNSAFE_FOR_PID;
641 RANGE (-32768, 32767);
642 #if RX_OPCODE_BIG_ENDIAN
643 OP (1) = relocation;
644 OP (0) = relocation >> 8;
645 #else
646 OP (0) = relocation;
647 OP (1) = relocation >> 8;
648 #endif
649 break;
650
651 case R_RX_DIR16S:
652 UNSAFE_FOR_PID;
653 RANGE (-32768, 65535);
654 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
655 {
656 OP (1) = relocation;
657 OP (0) = relocation >> 8;
658 }
659 else
660 {
661 OP (0) = relocation;
662 OP (1) = relocation >> 8;
663 }
664 break;
665
666 case R_RX_DIR16U:
667 UNSAFE_FOR_PID;
668 RANGE (0, 65536);
669 #if RX_OPCODE_BIG_ENDIAN
670 OP (1) = relocation;
671 OP (0) = relocation >> 8;
672 #else
673 OP (0) = relocation;
674 OP (1) = relocation >> 8;
675 #endif
676 break;
677
678 case R_RX_DIR16:
679 UNSAFE_FOR_PID;
680 RANGE (-32768, 65536);
681 #if RX_OPCODE_BIG_ENDIAN
682 OP (1) = relocation;
683 OP (0) = relocation >> 8;
684 #else
685 OP (0) = relocation;
686 OP (1) = relocation >> 8;
687 #endif
688 break;
689
690 case R_RX_DIR16_REV:
691 UNSAFE_FOR_PID;
692 RANGE (-32768, 65536);
693 #if RX_OPCODE_BIG_ENDIAN
694 OP (0) = relocation;
695 OP (1) = relocation >> 8;
696 #else
697 OP (1) = relocation;
698 OP (0) = relocation >> 8;
699 #endif
700 break;
701
702 case R_RX_DIR3U_PCREL:
703 RANGE (3, 10);
704 OP (0) &= 0xf8;
705 OP (0) |= relocation & 0x07;
706 break;
707
708 case R_RX_RH_24_NEG:
709 UNSAFE_FOR_PID;
710 WARN_REDHAT ("RX_RH_24_NEG");
711 relocation = - relocation;
712 case R_RX_DIR24S_PCREL:
713 RANGE (-0x800000, 0x7fffff);
714 #if RX_OPCODE_BIG_ENDIAN
715 OP (2) = relocation;
716 OP (1) = relocation >> 8;
717 OP (0) = relocation >> 16;
718 #else
719 OP (0) = relocation;
720 OP (1) = relocation >> 8;
721 OP (2) = relocation >> 16;
722 #endif
723 break;
724
725 case R_RX_RH_24_OP:
726 UNSAFE_FOR_PID;
727 WARN_REDHAT ("RX_RH_24_OP");
728 RANGE (-0x800000, 0x7fffff);
729 #if RX_OPCODE_BIG_ENDIAN
730 OP (2) = relocation;
731 OP (1) = relocation >> 8;
732 OP (0) = relocation >> 16;
733 #else
734 OP (0) = relocation;
735 OP (1) = relocation >> 8;
736 OP (2) = relocation >> 16;
737 #endif
738 break;
739
740 case R_RX_DIR24S:
741 UNSAFE_FOR_PID;
742 RANGE (-0x800000, 0x7fffff);
743 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
744 {
745 OP (2) = relocation;
746 OP (1) = relocation >> 8;
747 OP (0) = relocation >> 16;
748 }
749 else
750 {
751 OP (0) = relocation;
752 OP (1) = relocation >> 8;
753 OP (2) = relocation >> 16;
754 }
755 break;
756
757 case R_RX_RH_24_UNS:
758 UNSAFE_FOR_PID;
759 WARN_REDHAT ("RX_RH_24_UNS");
760 RANGE (0, 0xffffff);
761 #if RX_OPCODE_BIG_ENDIAN
762 OP (2) = relocation;
763 OP (1) = relocation >> 8;
764 OP (0) = relocation >> 16;
765 #else
766 OP (0) = relocation;
767 OP (1) = relocation >> 8;
768 OP (2) = relocation >> 16;
769 #endif
770 break;
771
772 case R_RX_RH_32_NEG:
773 UNSAFE_FOR_PID;
774 WARN_REDHAT ("RX_RH_32_NEG");
775 relocation = - relocation;
776 #if RX_OPCODE_BIG_ENDIAN
777 OP (3) = relocation;
778 OP (2) = relocation >> 8;
779 OP (1) = relocation >> 16;
780 OP (0) = relocation >> 24;
781 #else
782 OP (0) = relocation;
783 OP (1) = relocation >> 8;
784 OP (2) = relocation >> 16;
785 OP (3) = relocation >> 24;
786 #endif
787 break;
788
789 case R_RX_RH_32_OP:
790 UNSAFE_FOR_PID;
791 WARN_REDHAT ("RX_RH_32_OP");
792 #if RX_OPCODE_BIG_ENDIAN
793 OP (3) = relocation;
794 OP (2) = relocation >> 8;
795 OP (1) = relocation >> 16;
796 OP (0) = relocation >> 24;
797 #else
798 OP (0) = relocation;
799 OP (1) = relocation >> 8;
800 OP (2) = relocation >> 16;
801 OP (3) = relocation >> 24;
802 #endif
803 break;
804
805 case R_RX_DIR32:
806 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
807 {
808 OP (3) = relocation;
809 OP (2) = relocation >> 8;
810 OP (1) = relocation >> 16;
811 OP (0) = relocation >> 24;
812 }
813 else
814 {
815 OP (0) = relocation;
816 OP (1) = relocation >> 8;
817 OP (2) = relocation >> 16;
818 OP (3) = relocation >> 24;
819 }
820 break;
821
822 case R_RX_DIR32_REV:
823 if (BIGE (output_bfd))
824 {
825 OP (0) = relocation;
826 OP (1) = relocation >> 8;
827 OP (2) = relocation >> 16;
828 OP (3) = relocation >> 24;
829 }
830 else
831 {
832 OP (3) = relocation;
833 OP (2) = relocation >> 8;
834 OP (1) = relocation >> 16;
835 OP (0) = relocation >> 24;
836 }
837 break;
838
839 case R_RX_RH_DIFF:
840 {
841 bfd_vma val;
842 WARN_REDHAT ("RX_RH_DIFF");
843 val = bfd_get_32 (output_bfd, & OP (0));
844 val -= relocation;
845 bfd_put_32 (output_bfd, val, & OP (0));
846 }
847 break;
848
849 case R_RX_RH_GPRELB:
850 WARN_REDHAT ("RX_RH_GPRELB");
851 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
852 RANGE (0, 65535);
853 #if RX_OPCODE_BIG_ENDIAN
854 OP (1) = relocation;
855 OP (0) = relocation >> 8;
856 #else
857 OP (0) = relocation;
858 OP (1) = relocation >> 8;
859 #endif
860 break;
861
862 case R_RX_RH_GPRELW:
863 WARN_REDHAT ("RX_RH_GPRELW");
864 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
865 ALIGN (1);
866 relocation >>= 1;
867 RANGE (0, 65535);
868 #if RX_OPCODE_BIG_ENDIAN
869 OP (1) = relocation;
870 OP (0) = relocation >> 8;
871 #else
872 OP (0) = relocation;
873 OP (1) = relocation >> 8;
874 #endif
875 break;
876
877 case R_RX_RH_GPRELL:
878 WARN_REDHAT ("RX_RH_GPRELL");
879 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
880 ALIGN (3);
881 relocation >>= 2;
882 RANGE (0, 65535);
883 #if RX_OPCODE_BIG_ENDIAN
884 OP (1) = relocation;
885 OP (0) = relocation >> 8;
886 #else
887 OP (0) = relocation;
888 OP (1) = relocation >> 8;
889 #endif
890 break;
891
892 /* Internal relocations just for relaxation: */
893 case R_RX_RH_ABS5p5B:
894 RX_STACK_POP (relocation);
895 RANGE (0, 31);
896 OP (0) &= 0xf8;
897 OP (0) |= relocation >> 2;
898 OP (1) &= 0x77;
899 OP (1) |= (relocation << 6) & 0x80;
900 OP (1) |= (relocation << 3) & 0x08;
901 break;
902
903 case R_RX_RH_ABS5p5W:
904 RX_STACK_POP (relocation);
905 RANGE (0, 62);
906 ALIGN (1);
907 relocation >>= 1;
908 OP (0) &= 0xf8;
909 OP (0) |= relocation >> 2;
910 OP (1) &= 0x77;
911 OP (1) |= (relocation << 6) & 0x80;
912 OP (1) |= (relocation << 3) & 0x08;
913 break;
914
915 case R_RX_RH_ABS5p5L:
916 RX_STACK_POP (relocation);
917 RANGE (0, 124);
918 ALIGN (3);
919 relocation >>= 2;
920 OP (0) &= 0xf8;
921 OP (0) |= relocation >> 2;
922 OP (1) &= 0x77;
923 OP (1) |= (relocation << 6) & 0x80;
924 OP (1) |= (relocation << 3) & 0x08;
925 break;
926
927 case R_RX_RH_ABS5p8B:
928 RX_STACK_POP (relocation);
929 RANGE (0, 31);
930 OP (0) &= 0x70;
931 OP (0) |= (relocation << 3) & 0x80;
932 OP (0) |= relocation & 0x0f;
933 break;
934
935 case R_RX_RH_ABS5p8W:
936 RX_STACK_POP (relocation);
937 RANGE (0, 62);
938 ALIGN (1);
939 relocation >>= 1;
940 OP (0) &= 0x70;
941 OP (0) |= (relocation << 3) & 0x80;
942 OP (0) |= relocation & 0x0f;
943 break;
944
945 case R_RX_RH_ABS5p8L:
946 RX_STACK_POP (relocation);
947 RANGE (0, 124);
948 ALIGN (3);
949 relocation >>= 2;
950 OP (0) &= 0x70;
951 OP (0) |= (relocation << 3) & 0x80;
952 OP (0) |= relocation & 0x0f;
953 break;
954
955 case R_RX_RH_UIMM4p8:
956 RANGE (0, 15);
957 OP (0) &= 0x0f;
958 OP (0) |= relocation << 4;
959 break;
960
961 case R_RX_RH_UNEG4p8:
962 RANGE (-15, 0);
963 OP (0) &= 0x0f;
964 OP (0) |= (-relocation) << 4;
965 break;
966
967 /* Complex reloc handling: */
968
969 case R_RX_ABS32:
970 UNSAFE_FOR_PID;
971 RX_STACK_POP (relocation);
972 #if RX_OPCODE_BIG_ENDIAN
973 OP (3) = relocation;
974 OP (2) = relocation >> 8;
975 OP (1) = relocation >> 16;
976 OP (0) = relocation >> 24;
977 #else
978 OP (0) = relocation;
979 OP (1) = relocation >> 8;
980 OP (2) = relocation >> 16;
981 OP (3) = relocation >> 24;
982 #endif
983 break;
984
985 case R_RX_ABS32_REV:
986 UNSAFE_FOR_PID;
987 RX_STACK_POP (relocation);
988 #if RX_OPCODE_BIG_ENDIAN
989 OP (0) = relocation;
990 OP (1) = relocation >> 8;
991 OP (2) = relocation >> 16;
992 OP (3) = relocation >> 24;
993 #else
994 OP (3) = relocation;
995 OP (2) = relocation >> 8;
996 OP (1) = relocation >> 16;
997 OP (0) = relocation >> 24;
998 #endif
999 break;
1000
1001 case R_RX_ABS24S_PCREL:
1002 case R_RX_ABS24S:
1003 UNSAFE_FOR_PID;
1004 RX_STACK_POP (relocation);
1005 RANGE (-0x800000, 0x7fffff);
1006 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
1007 {
1008 OP (2) = relocation;
1009 OP (1) = relocation >> 8;
1010 OP (0) = relocation >> 16;
1011 }
1012 else
1013 {
1014 OP (0) = relocation;
1015 OP (1) = relocation >> 8;
1016 OP (2) = relocation >> 16;
1017 }
1018 break;
1019
1020 case R_RX_ABS16:
1021 UNSAFE_FOR_PID;
1022 RX_STACK_POP (relocation);
1023 RANGE (-32768, 65535);
1024 #if RX_OPCODE_BIG_ENDIAN
1025 OP (1) = relocation;
1026 OP (0) = relocation >> 8;
1027 #else
1028 OP (0) = relocation;
1029 OP (1) = relocation >> 8;
1030 #endif
1031 break;
1032
1033 case R_RX_ABS16_REV:
1034 UNSAFE_FOR_PID;
1035 RX_STACK_POP (relocation);
1036 RANGE (-32768, 65535);
1037 #if RX_OPCODE_BIG_ENDIAN
1038 OP (0) = relocation;
1039 OP (1) = relocation >> 8;
1040 #else
1041 OP (1) = relocation;
1042 OP (0) = relocation >> 8;
1043 #endif
1044 break;
1045
1046 case R_RX_ABS16S_PCREL:
1047 case R_RX_ABS16S:
1048 RX_STACK_POP (relocation);
1049 RANGE (-32768, 32767);
1050 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
1051 {
1052 OP (1) = relocation;
1053 OP (0) = relocation >> 8;
1054 }
1055 else
1056 {
1057 OP (0) = relocation;
1058 OP (1) = relocation >> 8;
1059 }
1060 break;
1061
1062 case R_RX_ABS16U:
1063 UNSAFE_FOR_PID;
1064 RX_STACK_POP (relocation);
1065 RANGE (0, 65536);
1066 #if RX_OPCODE_BIG_ENDIAN
1067 OP (1) = relocation;
1068 OP (0) = relocation >> 8;
1069 #else
1070 OP (0) = relocation;
1071 OP (1) = relocation >> 8;
1072 #endif
1073 break;
1074
1075 case R_RX_ABS16UL:
1076 UNSAFE_FOR_PID;
1077 RX_STACK_POP (relocation);
1078 relocation >>= 2;
1079 RANGE (0, 65536);
1080 #if RX_OPCODE_BIG_ENDIAN
1081 OP (1) = relocation;
1082 OP (0) = relocation >> 8;
1083 #else
1084 OP (0) = relocation;
1085 OP (1) = relocation >> 8;
1086 #endif
1087 break;
1088
1089 case R_RX_ABS16UW:
1090 UNSAFE_FOR_PID;
1091 RX_STACK_POP (relocation);
1092 relocation >>= 1;
1093 RANGE (0, 65536);
1094 #if RX_OPCODE_BIG_ENDIAN
1095 OP (1) = relocation;
1096 OP (0) = relocation >> 8;
1097 #else
1098 OP (0) = relocation;
1099 OP (1) = relocation >> 8;
1100 #endif
1101 break;
1102
1103 case R_RX_ABS8:
1104 UNSAFE_FOR_PID;
1105 RX_STACK_POP (relocation);
1106 RANGE (-128, 255);
1107 OP (0) = relocation;
1108 break;
1109
1110 case R_RX_ABS8U:
1111 UNSAFE_FOR_PID;
1112 RX_STACK_POP (relocation);
1113 RANGE (0, 255);
1114 OP (0) = relocation;
1115 break;
1116
1117 case R_RX_ABS8UL:
1118 UNSAFE_FOR_PID;
1119 RX_STACK_POP (relocation);
1120 relocation >>= 2;
1121 RANGE (0, 255);
1122 OP (0) = relocation;
1123 break;
1124
1125 case R_RX_ABS8UW:
1126 UNSAFE_FOR_PID;
1127 RX_STACK_POP (relocation);
1128 relocation >>= 1;
1129 RANGE (0, 255);
1130 OP (0) = relocation;
1131 break;
1132
1133 case R_RX_ABS8S:
1134 UNSAFE_FOR_PID;
1135 case R_RX_ABS8S_PCREL:
1136 RX_STACK_POP (relocation);
1137 RANGE (-128, 127);
1138 OP (0) = relocation;
1139 break;
1140
1141 case R_RX_SYM:
1142 if (r_symndx < symtab_hdr->sh_info)
1143 RX_STACK_PUSH (sec->output_section->vma
1144 + sec->output_offset
1145 + sym->st_value
1146 + rel->r_addend);
1147 else
1148 {
1149 if (h != NULL
1150 && (h->root.type == bfd_link_hash_defined
1151 || h->root.type == bfd_link_hash_defweak))
1152 RX_STACK_PUSH (h->root.u.def.value
1153 + sec->output_section->vma
1154 + sec->output_offset
1155 + rel->r_addend);
1156 else
1157 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1158 }
1159 break;
1160
1161 case R_RX_OPneg:
1162 {
1163 int32_t tmp;
1164
1165 saw_subtract = TRUE;
1166 RX_STACK_POP (tmp);
1167 tmp = - tmp;
1168 RX_STACK_PUSH (tmp);
1169 }
1170 break;
1171
1172 case R_RX_OPadd:
1173 {
1174 int32_t tmp1, tmp2;
1175
1176 RX_STACK_POP (tmp1);
1177 RX_STACK_POP (tmp2);
1178 tmp1 += tmp2;
1179 RX_STACK_PUSH (tmp1);
1180 }
1181 break;
1182
1183 case R_RX_OPsub:
1184 {
1185 int32_t tmp1, tmp2;
1186
1187 saw_subtract = TRUE;
1188 RX_STACK_POP (tmp1);
1189 RX_STACK_POP (tmp2);
1190 tmp2 -= tmp1;
1191 RX_STACK_PUSH (tmp2);
1192 }
1193 break;
1194
1195 case R_RX_OPmul:
1196 {
1197 int32_t tmp1, tmp2;
1198
1199 RX_STACK_POP (tmp1);
1200 RX_STACK_POP (tmp2);
1201 tmp1 *= tmp2;
1202 RX_STACK_PUSH (tmp1);
1203 }
1204 break;
1205
1206 case R_RX_OPdiv:
1207 {
1208 int32_t tmp1, tmp2;
1209
1210 RX_STACK_POP (tmp1);
1211 RX_STACK_POP (tmp2);
1212 tmp1 /= tmp2;
1213 RX_STACK_PUSH (tmp1);
1214 }
1215 break;
1216
1217 case R_RX_OPshla:
1218 {
1219 int32_t tmp1, tmp2;
1220
1221 RX_STACK_POP (tmp1);
1222 RX_STACK_POP (tmp2);
1223 tmp1 <<= tmp2;
1224 RX_STACK_PUSH (tmp1);
1225 }
1226 break;
1227
1228 case R_RX_OPshra:
1229 {
1230 int32_t tmp1, tmp2;
1231
1232 RX_STACK_POP (tmp1);
1233 RX_STACK_POP (tmp2);
1234 tmp1 >>= tmp2;
1235 RX_STACK_PUSH (tmp1);
1236 }
1237 break;
1238
1239 case R_RX_OPsctsize:
1240 RX_STACK_PUSH (input_section->size);
1241 break;
1242
1243 case R_RX_OPscttop:
1244 RX_STACK_PUSH (input_section->output_section->vma);
1245 break;
1246
1247 case R_RX_OPand:
1248 {
1249 int32_t tmp1, tmp2;
1250
1251 RX_STACK_POP (tmp1);
1252 RX_STACK_POP (tmp2);
1253 tmp1 &= tmp2;
1254 RX_STACK_PUSH (tmp1);
1255 }
1256 break;
1257
1258 case R_RX_OPor:
1259 {
1260 int32_t tmp1, tmp2;
1261
1262 RX_STACK_POP (tmp1);
1263 RX_STACK_POP (tmp2);
1264 tmp1 |= tmp2;
1265 RX_STACK_PUSH (tmp1);
1266 }
1267 break;
1268
1269 case R_RX_OPxor:
1270 {
1271 int32_t tmp1, tmp2;
1272
1273 RX_STACK_POP (tmp1);
1274 RX_STACK_POP (tmp2);
1275 tmp1 ^= tmp2;
1276 RX_STACK_PUSH (tmp1);
1277 }
1278 break;
1279
1280 case R_RX_OPnot:
1281 {
1282 int32_t tmp;
1283
1284 RX_STACK_POP (tmp);
1285 tmp = ~ tmp;
1286 RX_STACK_PUSH (tmp);
1287 }
1288 break;
1289
1290 case R_RX_OPmod:
1291 {
1292 int32_t tmp1, tmp2;
1293
1294 RX_STACK_POP (tmp1);
1295 RX_STACK_POP (tmp2);
1296 tmp1 %= tmp2;
1297 RX_STACK_PUSH (tmp1);
1298 }
1299 break;
1300
1301 case R_RX_OPromtop:
1302 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1303 break;
1304
1305 case R_RX_OPramtop:
1306 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1307 break;
1308
1309 default:
1310 r = bfd_reloc_notsupported;
1311 break;
1312 }
1313
1314 if (r != bfd_reloc_ok)
1315 {
1316 const char * msg = NULL;
1317
1318 switch (r)
1319 {
1320 case bfd_reloc_overflow:
1321 /* Catch the case of a missing function declaration
1322 and emit a more helpful error message. */
1323 if (r_type == R_RX_DIR24S_PCREL)
1324 msg = _("%B(%A): error: call to undefined function '%s'");
1325 else
1326 r = info->callbacks->reloc_overflow
1327 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
1328 input_bfd, input_section, rel->r_offset);
1329 break;
1330
1331 case bfd_reloc_undefined:
1332 r = info->callbacks->undefined_symbol
1333 (info, name, input_bfd, input_section, rel->r_offset,
1334 TRUE);
1335 break;
1336
1337 case bfd_reloc_other:
1338 msg = _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1339 break;
1340
1341 case bfd_reloc_outofrange:
1342 msg = _("%B(%A): internal error: out of range error");
1343 break;
1344
1345 case bfd_reloc_notsupported:
1346 msg = _("%B(%A): internal error: unsupported relocation error");
1347 break;
1348
1349 case bfd_reloc_dangerous:
1350 msg = _("%B(%A): internal error: dangerous relocation");
1351 break;
1352
1353 default:
1354 msg = _("%B(%A): internal error: unknown error");
1355 break;
1356 }
1357
1358 if (msg)
1359 _bfd_error_handler (msg, input_bfd, input_section, name);
1360
1361 if (! r)
1362 return FALSE;
1363 }
1364 }
1365
1366 return TRUE;
1367 }
1368 \f
1369 /* Relaxation Support. */
1370
1371 /* Progression of relocations from largest operand size to smallest
1372 operand size. */
1373
1374 static int
1375 next_smaller_reloc (int r)
1376 {
1377 switch (r)
1378 {
1379 case R_RX_DIR32: return R_RX_DIR24S;
1380 case R_RX_DIR24S: return R_RX_DIR16S;
1381 case R_RX_DIR16S: return R_RX_DIR8S;
1382 case R_RX_DIR8S: return R_RX_NONE;
1383
1384 case R_RX_DIR16: return R_RX_DIR8;
1385 case R_RX_DIR8: return R_RX_NONE;
1386
1387 case R_RX_DIR16U: return R_RX_DIR8U;
1388 case R_RX_DIR8U: return R_RX_NONE;
1389
1390 case R_RX_DIR24S_PCREL: return R_RX_DIR16S_PCREL;
1391 case R_RX_DIR16S_PCREL: return R_RX_DIR8S_PCREL;
1392 case R_RX_DIR8S_PCREL: return R_RX_DIR3U_PCREL;
1393
1394 case R_RX_DIR16UL: return R_RX_DIR8UL;
1395 case R_RX_DIR8UL: return R_RX_NONE;
1396 case R_RX_DIR16UW: return R_RX_DIR8UW;
1397 case R_RX_DIR8UW: return R_RX_NONE;
1398
1399 case R_RX_RH_32_OP: return R_RX_RH_24_OP;
1400 case R_RX_RH_24_OP: return R_RX_RH_16_OP;
1401 case R_RX_RH_16_OP: return R_RX_DIR8;
1402
1403 case R_RX_ABS32: return R_RX_ABS24S;
1404 case R_RX_ABS24S: return R_RX_ABS16S;
1405 case R_RX_ABS16: return R_RX_ABS8;
1406 case R_RX_ABS16U: return R_RX_ABS8U;
1407 case R_RX_ABS16S: return R_RX_ABS8S;
1408 case R_RX_ABS8: return R_RX_NONE;
1409 case R_RX_ABS8U: return R_RX_NONE;
1410 case R_RX_ABS8S: return R_RX_NONE;
1411 case R_RX_ABS24S_PCREL: return R_RX_ABS16S_PCREL;
1412 case R_RX_ABS16S_PCREL: return R_RX_ABS8S_PCREL;
1413 case R_RX_ABS8S_PCREL: return R_RX_NONE;
1414 case R_RX_ABS16UL: return R_RX_ABS8UL;
1415 case R_RX_ABS16UW: return R_RX_ABS8UW;
1416 case R_RX_ABS8UL: return R_RX_NONE;
1417 case R_RX_ABS8UW: return R_RX_NONE;
1418 }
1419 return r;
1420 };
1421
1422 /* Delete some bytes from a section while relaxing. */
1423
1424 static bfd_boolean
1425 elf32_rx_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, int count,
1426 Elf_Internal_Rela *alignment_rel, int force_snip)
1427 {
1428 Elf_Internal_Shdr * symtab_hdr;
1429 unsigned int sec_shndx;
1430 bfd_byte * contents;
1431 Elf_Internal_Rela * irel;
1432 Elf_Internal_Rela * irelend;
1433 Elf_Internal_Sym * isym;
1434 Elf_Internal_Sym * isymend;
1435 bfd_vma toaddr;
1436 unsigned int symcount;
1437 struct elf_link_hash_entry ** sym_hashes;
1438 struct elf_link_hash_entry ** end_hashes;
1439
1440 if (!alignment_rel)
1441 force_snip = 1;
1442
1443 sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
1444
1445 contents = elf_section_data (sec)->this_hdr.contents;
1446
1447 /* The deletion must stop at the next alignment boundary, if
1448 ALIGNMENT_REL is non-NULL. */
1449 toaddr = sec->size;
1450 if (alignment_rel)
1451 toaddr = alignment_rel->r_offset;
1452
1453 irel = elf_section_data (sec)->relocs;
1454 irelend = irel + sec->reloc_count;
1455
1456 /* Actually delete the bytes. */
1457 memmove (contents + addr, contents + addr + count,
1458 (size_t) (toaddr - addr - count));
1459
1460 /* If we don't have an alignment marker to worry about, we can just
1461 shrink the section. Otherwise, we have to fill in the newly
1462 created gap with NOP insns (0x03). */
1463 if (force_snip)
1464 sec->size -= count;
1465 else
1466 memset (contents + toaddr - count, 0x03, count);
1467
1468 /* Adjust all the relocs. */
1469 for (irel = elf_section_data (sec)->relocs; irel < irelend; irel++)
1470 {
1471 /* Get the new reloc address. */
1472 if (irel->r_offset > addr
1473 && (irel->r_offset < toaddr
1474 || (force_snip && irel->r_offset == toaddr)))
1475 irel->r_offset -= count;
1476
1477 /* If we see an ALIGN marker at the end of the gap, we move it
1478 to the beginning of the gap, since marking these gaps is what
1479 they're for. */
1480 if (irel->r_offset == toaddr
1481 && ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1482 && irel->r_addend & RX_RELAXA_ALIGN)
1483 irel->r_offset -= count;
1484 }
1485
1486 /* Adjust the local symbols defined in this section. */
1487 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1488 isym = (Elf_Internal_Sym *) symtab_hdr->contents;
1489 isymend = isym + symtab_hdr->sh_info;
1490
1491 for (; isym < isymend; isym++)
1492 {
1493 /* If the symbol is in the range of memory we just moved, we
1494 have to adjust its value. */
1495 if (isym->st_shndx == sec_shndx
1496 && isym->st_value > addr
1497 && isym->st_value < toaddr)
1498 isym->st_value -= count;
1499
1500 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1501 *end* is in the moved bytes but it's *start* isn't), then we
1502 must adjust its size. */
1503 if (isym->st_shndx == sec_shndx
1504 && isym->st_value < addr
1505 && isym->st_value + isym->st_size > addr
1506 && isym->st_value + isym->st_size < toaddr)
1507 isym->st_size -= count;
1508 }
1509
1510 /* Now adjust the global symbols defined in this section. */
1511 symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
1512 - symtab_hdr->sh_info);
1513 sym_hashes = elf_sym_hashes (abfd);
1514 end_hashes = sym_hashes + symcount;
1515
1516 for (; sym_hashes < end_hashes; sym_hashes++)
1517 {
1518 struct elf_link_hash_entry *sym_hash = *sym_hashes;
1519
1520 if ((sym_hash->root.type == bfd_link_hash_defined
1521 || sym_hash->root.type == bfd_link_hash_defweak)
1522 && sym_hash->root.u.def.section == sec)
1523 {
1524 /* As above, adjust the value if needed. */
1525 if (sym_hash->root.u.def.value > addr
1526 && sym_hash->root.u.def.value < toaddr)
1527 sym_hash->root.u.def.value -= count;
1528
1529 /* As above, adjust the size if needed. */
1530 if (sym_hash->root.u.def.value < addr
1531 && sym_hash->root.u.def.value + sym_hash->size > addr
1532 && sym_hash->root.u.def.value + sym_hash->size < toaddr)
1533 sym_hash->size -= count;
1534 }
1535 }
1536
1537 return TRUE;
1538 }
1539
1540 /* Used to sort relocs by address. If relocs have the same address,
1541 we maintain their relative order, except that R_RX_RH_RELAX
1542 alignment relocs must be the first reloc for any given address. */
1543
1544 static void
1545 reloc_bubblesort (Elf_Internal_Rela * r, int count)
1546 {
1547 int i;
1548 bfd_boolean again;
1549 bfd_boolean swappit;
1550
1551 /* This is almost a classic bubblesort. It's the slowest sort, but
1552 we're taking advantage of the fact that the relocations are
1553 mostly in order already (the assembler emits them that way) and
1554 we need relocs with the same address to remain in the same
1555 relative order. */
1556 again = TRUE;
1557 while (again)
1558 {
1559 again = FALSE;
1560 for (i = 0; i < count - 1; i ++)
1561 {
1562 if (r[i].r_offset > r[i + 1].r_offset)
1563 swappit = TRUE;
1564 else if (r[i].r_offset < r[i + 1].r_offset)
1565 swappit = FALSE;
1566 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1567 && (r[i + 1].r_addend & RX_RELAXA_ALIGN))
1568 swappit = TRUE;
1569 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1570 && (r[i + 1].r_addend & RX_RELAXA_ELIGN)
1571 && !(ELF32_R_TYPE (r[i].r_info) == R_RX_RH_RELAX
1572 && (r[i].r_addend & RX_RELAXA_ALIGN)))
1573 swappit = TRUE;
1574 else
1575 swappit = FALSE;
1576
1577 if (swappit)
1578 {
1579 Elf_Internal_Rela tmp;
1580
1581 tmp = r[i];
1582 r[i] = r[i + 1];
1583 r[i + 1] = tmp;
1584 /* If we do move a reloc back, re-scan to see if it
1585 needs to be moved even further back. This avoids
1586 most of the O(n^2) behavior for our cases. */
1587 if (i > 0)
1588 i -= 2;
1589 again = TRUE;
1590 }
1591 }
1592 }
1593 }
1594
1595
1596 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1597 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1598 lrel, abfd, sec, link_info, scale)
1599
1600 static bfd_vma
1601 rx_offset_for_reloc (bfd * abfd,
1602 Elf_Internal_Rela * rel,
1603 Elf_Internal_Shdr * symtab_hdr,
1604 Elf_External_Sym_Shndx * shndx_buf ATTRIBUTE_UNUSED,
1605 Elf_Internal_Sym * intsyms,
1606 Elf_Internal_Rela ** lrel,
1607 bfd * input_bfd,
1608 asection * input_section,
1609 struct bfd_link_info * info,
1610 int * scale)
1611 {
1612 bfd_vma symval;
1613 bfd_reloc_status_type r;
1614
1615 *scale = 1;
1616
1617 /* REL is the first of 1..N relocations. We compute the symbol
1618 value for each relocation, then combine them if needed. LREL
1619 gets a pointer to the last relocation used. */
1620 while (1)
1621 {
1622 int32_t tmp1, tmp2;
1623
1624 /* Get the value of the symbol referred to by the reloc. */
1625 if (ELF32_R_SYM (rel->r_info) < symtab_hdr->sh_info)
1626 {
1627 /* A local symbol. */
1628 Elf_Internal_Sym *isym;
1629 asection *ssec;
1630
1631 isym = intsyms + ELF32_R_SYM (rel->r_info);
1632
1633 if (isym->st_shndx == SHN_UNDEF)
1634 ssec = bfd_und_section_ptr;
1635 else if (isym->st_shndx == SHN_ABS)
1636 ssec = bfd_abs_section_ptr;
1637 else if (isym->st_shndx == SHN_COMMON)
1638 ssec = bfd_com_section_ptr;
1639 else
1640 ssec = bfd_section_from_elf_index (abfd,
1641 isym->st_shndx);
1642
1643 /* Initial symbol value. */
1644 symval = isym->st_value;
1645
1646 /* GAS may have made this symbol relative to a section, in
1647 which case, we have to add the addend to find the
1648 symbol. */
1649 if (ELF_ST_TYPE (isym->st_info) == STT_SECTION)
1650 symval += rel->r_addend;
1651
1652 if (ssec)
1653 {
1654 if ((ssec->flags & SEC_MERGE)
1655 && ssec->sec_info_type == SEC_INFO_TYPE_MERGE)
1656 symval = _bfd_merged_section_offset (abfd, & ssec,
1657 elf_section_data (ssec)->sec_info,
1658 symval);
1659 }
1660
1661 /* Now make the offset relative to where the linker is putting it. */
1662 if (ssec)
1663 symval +=
1664 ssec->output_section->vma + ssec->output_offset;
1665
1666 symval += rel->r_addend;
1667 }
1668 else
1669 {
1670 unsigned long indx;
1671 struct elf_link_hash_entry * h;
1672
1673 /* An external symbol. */
1674 indx = ELF32_R_SYM (rel->r_info) - symtab_hdr->sh_info;
1675 h = elf_sym_hashes (abfd)[indx];
1676 BFD_ASSERT (h != NULL);
1677
1678 if (h->root.type != bfd_link_hash_defined
1679 && h->root.type != bfd_link_hash_defweak)
1680 {
1681 /* This appears to be a reference to an undefined
1682 symbol. Just ignore it--it will be caught by the
1683 regular reloc processing. */
1684 if (lrel)
1685 *lrel = rel;
1686 return 0;
1687 }
1688
1689 symval = (h->root.u.def.value
1690 + h->root.u.def.section->output_section->vma
1691 + h->root.u.def.section->output_offset);
1692
1693 symval += rel->r_addend;
1694 }
1695
1696 switch (ELF32_R_TYPE (rel->r_info))
1697 {
1698 case R_RX_SYM:
1699 RX_STACK_PUSH (symval);
1700 break;
1701
1702 case R_RX_OPneg:
1703 RX_STACK_POP (tmp1);
1704 tmp1 = - tmp1;
1705 RX_STACK_PUSH (tmp1);
1706 break;
1707
1708 case R_RX_OPadd:
1709 RX_STACK_POP (tmp1);
1710 RX_STACK_POP (tmp2);
1711 tmp1 += tmp2;
1712 RX_STACK_PUSH (tmp1);
1713 break;
1714
1715 case R_RX_OPsub:
1716 RX_STACK_POP (tmp1);
1717 RX_STACK_POP (tmp2);
1718 tmp2 -= tmp1;
1719 RX_STACK_PUSH (tmp2);
1720 break;
1721
1722 case R_RX_OPmul:
1723 RX_STACK_POP (tmp1);
1724 RX_STACK_POP (tmp2);
1725 tmp1 *= tmp2;
1726 RX_STACK_PUSH (tmp1);
1727 break;
1728
1729 case R_RX_OPdiv:
1730 RX_STACK_POP (tmp1);
1731 RX_STACK_POP (tmp2);
1732 tmp1 /= tmp2;
1733 RX_STACK_PUSH (tmp1);
1734 break;
1735
1736 case R_RX_OPshla:
1737 RX_STACK_POP (tmp1);
1738 RX_STACK_POP (tmp2);
1739 tmp1 <<= tmp2;
1740 RX_STACK_PUSH (tmp1);
1741 break;
1742
1743 case R_RX_OPshra:
1744 RX_STACK_POP (tmp1);
1745 RX_STACK_POP (tmp2);
1746 tmp1 >>= tmp2;
1747 RX_STACK_PUSH (tmp1);
1748 break;
1749
1750 case R_RX_OPsctsize:
1751 RX_STACK_PUSH (input_section->size);
1752 break;
1753
1754 case R_RX_OPscttop:
1755 RX_STACK_PUSH (input_section->output_section->vma);
1756 break;
1757
1758 case R_RX_OPand:
1759 RX_STACK_POP (tmp1);
1760 RX_STACK_POP (tmp2);
1761 tmp1 &= tmp2;
1762 RX_STACK_PUSH (tmp1);
1763 break;
1764
1765 case R_RX_OPor:
1766 RX_STACK_POP (tmp1);
1767 RX_STACK_POP (tmp2);
1768 tmp1 |= tmp2;
1769 RX_STACK_PUSH (tmp1);
1770 break;
1771
1772 case R_RX_OPxor:
1773 RX_STACK_POP (tmp1);
1774 RX_STACK_POP (tmp2);
1775 tmp1 ^= tmp2;
1776 RX_STACK_PUSH (tmp1);
1777 break;
1778
1779 case R_RX_OPnot:
1780 RX_STACK_POP (tmp1);
1781 tmp1 = ~ tmp1;
1782 RX_STACK_PUSH (tmp1);
1783 break;
1784
1785 case R_RX_OPmod:
1786 RX_STACK_POP (tmp1);
1787 RX_STACK_POP (tmp2);
1788 tmp1 %= tmp2;
1789 RX_STACK_PUSH (tmp1);
1790 break;
1791
1792 case R_RX_OPromtop:
1793 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1794 break;
1795
1796 case R_RX_OPramtop:
1797 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1798 break;
1799
1800 case R_RX_DIR16UL:
1801 case R_RX_DIR8UL:
1802 case R_RX_ABS16UL:
1803 case R_RX_ABS8UL:
1804 if (rx_stack_top)
1805 RX_STACK_POP (symval);
1806 if (lrel)
1807 *lrel = rel;
1808 *scale = 4;
1809 return symval;
1810
1811 case R_RX_DIR16UW:
1812 case R_RX_DIR8UW:
1813 case R_RX_ABS16UW:
1814 case R_RX_ABS8UW:
1815 if (rx_stack_top)
1816 RX_STACK_POP (symval);
1817 if (lrel)
1818 *lrel = rel;
1819 *scale = 2;
1820 return symval;
1821
1822 default:
1823 if (rx_stack_top)
1824 RX_STACK_POP (symval);
1825 if (lrel)
1826 *lrel = rel;
1827 return symval;
1828 }
1829
1830 rel ++;
1831 }
1832 }
1833
1834 static void
1835 move_reloc (Elf_Internal_Rela * irel, Elf_Internal_Rela * srel, int delta)
1836 {
1837 bfd_vma old_offset = srel->r_offset;
1838
1839 irel ++;
1840 while (irel <= srel)
1841 {
1842 if (irel->r_offset == old_offset)
1843 irel->r_offset += delta;
1844 irel ++;
1845 }
1846 }
1847
1848 /* Relax one section. */
1849
1850 static bfd_boolean
1851 elf32_rx_relax_section (bfd * abfd,
1852 asection * sec,
1853 struct bfd_link_info * link_info,
1854 bfd_boolean * again,
1855 bfd_boolean allow_pcrel3)
1856 {
1857 Elf_Internal_Shdr * symtab_hdr;
1858 Elf_Internal_Shdr * shndx_hdr;
1859 Elf_Internal_Rela * internal_relocs;
1860 Elf_Internal_Rela * free_relocs = NULL;
1861 Elf_Internal_Rela * irel;
1862 Elf_Internal_Rela * srel;
1863 Elf_Internal_Rela * irelend;
1864 Elf_Internal_Rela * next_alignment;
1865 Elf_Internal_Rela * prev_alignment;
1866 bfd_byte * contents = NULL;
1867 bfd_byte * free_contents = NULL;
1868 Elf_Internal_Sym * intsyms = NULL;
1869 Elf_Internal_Sym * free_intsyms = NULL;
1870 Elf_External_Sym_Shndx * shndx_buf = NULL;
1871 bfd_vma pc;
1872 bfd_vma sec_start;
1873 bfd_vma symval = 0;
1874 int pcrel = 0;
1875 int code = 0;
1876 int section_alignment_glue;
1877 /* how much to scale the relocation by - 1, 2, or 4. */
1878 int scale;
1879
1880 /* Assume nothing changes. */
1881 *again = FALSE;
1882
1883 /* We don't have to do anything for a relocatable link, if
1884 this section does not have relocs, or if this is not a
1885 code section. */
1886 if (link_info->relocatable
1887 || (sec->flags & SEC_RELOC) == 0
1888 || sec->reloc_count == 0
1889 || (sec->flags & SEC_CODE) == 0)
1890 return TRUE;
1891
1892 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1893 shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr;
1894
1895 sec_start = sec->output_section->vma + sec->output_offset;
1896
1897 /* Get the section contents. */
1898 if (elf_section_data (sec)->this_hdr.contents != NULL)
1899 contents = elf_section_data (sec)->this_hdr.contents;
1900 /* Go get them off disk. */
1901 else
1902 {
1903 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
1904 goto error_return;
1905 elf_section_data (sec)->this_hdr.contents = contents;
1906 }
1907
1908 /* Read this BFD's symbols. */
1909 /* Get cached copy if it exists. */
1910 if (symtab_hdr->contents != NULL)
1911 intsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
1912 else
1913 {
1914 intsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL);
1915 symtab_hdr->contents = (bfd_byte *) intsyms;
1916 }
1917
1918 if (shndx_hdr->sh_size != 0)
1919 {
1920 bfd_size_type amt;
1921
1922 amt = symtab_hdr->sh_info;
1923 amt *= sizeof (Elf_External_Sym_Shndx);
1924 shndx_buf = (Elf_External_Sym_Shndx *) bfd_malloc (amt);
1925 if (shndx_buf == NULL)
1926 goto error_return;
1927 if (bfd_seek (abfd, shndx_hdr->sh_offset, SEEK_SET) != 0
1928 || bfd_bread (shndx_buf, amt, abfd) != amt)
1929 goto error_return;
1930 shndx_hdr->contents = (bfd_byte *) shndx_buf;
1931 }
1932
1933 /* Get a copy of the native relocations. */
1934 internal_relocs = (_bfd_elf_link_read_relocs
1935 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
1936 link_info->keep_memory));
1937 if (internal_relocs == NULL)
1938 goto error_return;
1939 if (! link_info->keep_memory)
1940 free_relocs = internal_relocs;
1941
1942 /* The RL_ relocs must be just before the operand relocs they go
1943 with, so we must sort them to guarantee this. We use bubblesort
1944 instead of qsort so we can guarantee that relocs with the same
1945 address remain in the same relative order. */
1946 reloc_bubblesort (internal_relocs, sec->reloc_count);
1947
1948 /* Walk through them looking for relaxing opportunities. */
1949 irelend = internal_relocs + sec->reloc_count;
1950
1951 /* This will either be NULL or a pointer to the next alignment
1952 relocation. */
1953 next_alignment = internal_relocs;
1954 /* This will be the previous alignment, although at first it points
1955 to the first real relocation. */
1956 prev_alignment = internal_relocs;
1957
1958 /* We calculate worst case shrinkage caused by alignment directives.
1959 No fool-proof, but better than either ignoring the problem or
1960 doing heavy duty analysis of all the alignment markers in all
1961 input sections. */
1962 section_alignment_glue = 0;
1963 for (irel = internal_relocs; irel < irelend; irel++)
1964 if (ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1965 && irel->r_addend & RX_RELAXA_ALIGN)
1966 {
1967 int this_glue = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1968
1969 if (section_alignment_glue < this_glue)
1970 section_alignment_glue = this_glue;
1971 }
1972 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1973 shrinkage. */
1974 section_alignment_glue *= 2;
1975
1976 for (irel = internal_relocs; irel < irelend; irel++)
1977 {
1978 unsigned char *insn;
1979 int nrelocs;
1980
1981 /* The insns we care about are all marked with one of these. */
1982 if (ELF32_R_TYPE (irel->r_info) != R_RX_RH_RELAX)
1983 continue;
1984
1985 if (irel->r_addend & RX_RELAXA_ALIGN
1986 || next_alignment == internal_relocs)
1987 {
1988 /* When we delete bytes, we need to maintain all the alignments
1989 indicated. In addition, we need to be careful about relaxing
1990 jumps across alignment boundaries - these displacements
1991 *grow* when we delete bytes. For now, don't shrink
1992 displacements across an alignment boundary, just in case.
1993 Note that this only affects relocations to the same
1994 section. */
1995 prev_alignment = next_alignment;
1996 next_alignment += 2;
1997 while (next_alignment < irelend
1998 && (ELF32_R_TYPE (next_alignment->r_info) != R_RX_RH_RELAX
1999 || !(next_alignment->r_addend & RX_RELAXA_ELIGN)))
2000 next_alignment ++;
2001 if (next_alignment >= irelend || next_alignment->r_offset == 0)
2002 next_alignment = NULL;
2003 }
2004
2005 /* When we hit alignment markers, see if we've shrunk enough
2006 before them to reduce the gap without violating the alignment
2007 requirements. */
2008 if (irel->r_addend & RX_RELAXA_ALIGN)
2009 {
2010 /* At this point, the next relocation *should* be the ELIGN
2011 end marker. */
2012 Elf_Internal_Rela *erel = irel + 1;
2013 unsigned int alignment, nbytes;
2014
2015 if (ELF32_R_TYPE (erel->r_info) != R_RX_RH_RELAX)
2016 continue;
2017 if (!(erel->r_addend & RX_RELAXA_ELIGN))
2018 continue;
2019
2020 alignment = 1 << (irel->r_addend & RX_RELAXA_ANUM);
2021
2022 if (erel->r_offset - irel->r_offset < alignment)
2023 continue;
2024
2025 nbytes = erel->r_offset - irel->r_offset;
2026 nbytes /= alignment;
2027 nbytes *= alignment;
2028
2029 elf32_rx_relax_delete_bytes (abfd, sec, erel->r_offset-nbytes, nbytes, next_alignment,
2030 erel->r_offset == sec->size);
2031 *again = TRUE;
2032
2033 continue;
2034 }
2035
2036 if (irel->r_addend & RX_RELAXA_ELIGN)
2037 continue;
2038
2039 insn = contents + irel->r_offset;
2040
2041 nrelocs = irel->r_addend & RX_RELAXA_RNUM;
2042
2043 /* At this point, we have an insn that is a candidate for linker
2044 relaxation. There are NRELOCS relocs following that may be
2045 relaxed, although each reloc may be made of more than one
2046 reloc entry (such as gp-rel symbols). */
2047
2048 /* Get the value of the symbol referred to by the reloc. Just
2049 in case this is the last reloc in the list, use the RL's
2050 addend to choose between this reloc (no addend) or the next
2051 (yes addend, which means at least one following reloc). */
2052
2053 /* srel points to the "current" reloction for this insn -
2054 actually the last reloc for a given operand, which is the one
2055 we need to update. We check the relaxations in the same
2056 order that the relocations happen, so we'll just push it
2057 along as we go. */
2058 srel = irel;
2059
2060 pc = sec->output_section->vma + sec->output_offset
2061 + srel->r_offset;
2062
2063 #define GET_RELOC \
2064 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
2065 pcrel = symval - pc + srel->r_addend; \
2066 nrelocs --;
2067
2068 #define SNIPNR(offset, nbytes) \
2069 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
2070 #define SNIP(offset, nbytes, newtype) \
2071 SNIPNR (offset, nbytes); \
2072 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2073
2074 /* The order of these bit tests must match the order that the
2075 relocs appear in. Since we sorted those by offset, we can
2076 predict them. */
2077
2078 /* Note that the numbers in, say, DSP6 are the bit offsets of
2079 the code fields that describe the operand. Bits number 0 for
2080 the MSB of insn[0]. */
2081
2082 /* DSP* codes:
2083 0 00 [reg]
2084 1 01 dsp:8[reg]
2085 2 10 dsp:16[reg]
2086 3 11 reg */
2087 if (irel->r_addend & RX_RELAXA_DSP6)
2088 {
2089 GET_RELOC;
2090
2091 code = insn[0] & 3;
2092 if (code == 2 && symval/scale <= 255)
2093 {
2094 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2095 insn[0] &= 0xfc;
2096 insn[0] |= 0x01;
2097 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2098 if (newrel != ELF32_R_TYPE (srel->r_info))
2099 {
2100 SNIP (3, 1, newrel);
2101 *again = TRUE;
2102 }
2103 }
2104
2105 else if (code == 1 && symval == 0)
2106 {
2107 insn[0] &= 0xfc;
2108 SNIP (2, 1, R_RX_NONE);
2109 *again = TRUE;
2110 }
2111
2112 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2113 else if (code == 1 && symval/scale <= 31
2114 /* Decodable bits. */
2115 && (insn[0] & 0xcc) == 0xcc
2116 /* Width. */
2117 && (insn[0] & 0x30) != 0x30
2118 /* Register MSBs. */
2119 && (insn[1] & 0x88) == 0x00)
2120 {
2121 int newrel = 0;
2122
2123 insn[0] = 0x88 | (insn[0] & 0x30);
2124 /* The register fields are in the right place already. */
2125
2126 /* We can't relax this new opcode. */
2127 irel->r_addend = 0;
2128
2129 switch ((insn[0] & 0x30) >> 4)
2130 {
2131 case 0:
2132 newrel = R_RX_RH_ABS5p5B;
2133 break;
2134 case 1:
2135 newrel = R_RX_RH_ABS5p5W;
2136 break;
2137 case 2:
2138 newrel = R_RX_RH_ABS5p5L;
2139 break;
2140 }
2141
2142 move_reloc (irel, srel, -2);
2143 SNIP (2, 1, newrel);
2144 }
2145
2146 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2147 else if (code == 1 && symval/scale <= 31
2148 /* Decodable bits. */
2149 && (insn[0] & 0xf8) == 0x58
2150 /* Register MSBs. */
2151 && (insn[1] & 0x88) == 0x00)
2152 {
2153 int newrel = 0;
2154
2155 insn[0] = 0xb0 | ((insn[0] & 0x04) << 1);
2156 /* The register fields are in the right place already. */
2157
2158 /* We can't relax this new opcode. */
2159 irel->r_addend = 0;
2160
2161 switch ((insn[0] & 0x08) >> 3)
2162 {
2163 case 0:
2164 newrel = R_RX_RH_ABS5p5B;
2165 break;
2166 case 1:
2167 newrel = R_RX_RH_ABS5p5W;
2168 break;
2169 }
2170
2171 move_reloc (irel, srel, -2);
2172 SNIP (2, 1, newrel);
2173 }
2174 }
2175
2176 /* A DSP4 operand always follows a DSP6 operand, even if there's
2177 no relocation for it. We have to read the code out of the
2178 opcode to calculate the offset of the operand. */
2179 if (irel->r_addend & RX_RELAXA_DSP4)
2180 {
2181 int code6, offset = 0;
2182
2183 GET_RELOC;
2184
2185 code6 = insn[0] & 0x03;
2186 switch (code6)
2187 {
2188 case 0: offset = 2; break;
2189 case 1: offset = 3; break;
2190 case 2: offset = 4; break;
2191 case 3: offset = 2; break;
2192 }
2193
2194 code = (insn[0] & 0x0c) >> 2;
2195
2196 if (code == 2 && symval / scale <= 255)
2197 {
2198 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2199
2200 insn[0] &= 0xf3;
2201 insn[0] |= 0x04;
2202 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2203 if (newrel != ELF32_R_TYPE (srel->r_info))
2204 {
2205 SNIP (offset+1, 1, newrel);
2206 *again = TRUE;
2207 }
2208 }
2209
2210 else if (code == 1 && symval == 0)
2211 {
2212 insn[0] &= 0xf3;
2213 SNIP (offset, 1, R_RX_NONE);
2214 *again = TRUE;
2215 }
2216 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2217 else if (code == 1 && symval/scale <= 31
2218 /* Decodable bits. */
2219 && (insn[0] & 0xc3) == 0xc3
2220 /* Width. */
2221 && (insn[0] & 0x30) != 0x30
2222 /* Register MSBs. */
2223 && (insn[1] & 0x88) == 0x00)
2224 {
2225 int newrel = 0;
2226
2227 insn[0] = 0x80 | (insn[0] & 0x30);
2228 /* The register fields are in the right place already. */
2229
2230 /* We can't relax this new opcode. */
2231 irel->r_addend = 0;
2232
2233 switch ((insn[0] & 0x30) >> 4)
2234 {
2235 case 0:
2236 newrel = R_RX_RH_ABS5p5B;
2237 break;
2238 case 1:
2239 newrel = R_RX_RH_ABS5p5W;
2240 break;
2241 case 2:
2242 newrel = R_RX_RH_ABS5p5L;
2243 break;
2244 }
2245
2246 move_reloc (irel, srel, -2);
2247 SNIP (2, 1, newrel);
2248 }
2249 }
2250
2251 /* These always occur alone, but the offset depends on whether
2252 it's a MEMEX opcode (0x06) or not. */
2253 if (irel->r_addend & RX_RELAXA_DSP14)
2254 {
2255 int offset;
2256 GET_RELOC;
2257
2258 if (insn[0] == 0x06)
2259 offset = 3;
2260 else
2261 offset = 4;
2262
2263 code = insn[1] & 3;
2264
2265 if (code == 2 && symval / scale <= 255)
2266 {
2267 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2268
2269 insn[1] &= 0xfc;
2270 insn[1] |= 0x01;
2271 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2272 if (newrel != ELF32_R_TYPE (srel->r_info))
2273 {
2274 SNIP (offset, 1, newrel);
2275 *again = TRUE;
2276 }
2277 }
2278 else if (code == 1 && symval == 0)
2279 {
2280 insn[1] &= 0xfc;
2281 SNIP (offset, 1, R_RX_NONE);
2282 *again = TRUE;
2283 }
2284 }
2285
2286 /* IMM* codes:
2287 0 00 imm:32
2288 1 01 simm:8
2289 2 10 simm:16
2290 3 11 simm:24. */
2291
2292 /* These always occur alone. */
2293 if (irel->r_addend & RX_RELAXA_IMM6)
2294 {
2295 long ssymval;
2296
2297 GET_RELOC;
2298
2299 /* These relocations sign-extend, so we must do signed compares. */
2300 ssymval = (long) symval;
2301
2302 code = insn[0] & 0x03;
2303
2304 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2305 {
2306 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2307
2308 insn[0] &= 0xfc;
2309 insn[0] |= 0x03;
2310 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2311 if (newrel != ELF32_R_TYPE (srel->r_info))
2312 {
2313 SNIP (2, 1, newrel);
2314 *again = TRUE;
2315 }
2316 }
2317
2318 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2319 {
2320 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2321
2322 insn[0] &= 0xfc;
2323 insn[0] |= 0x02;
2324 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2325 if (newrel != ELF32_R_TYPE (srel->r_info))
2326 {
2327 SNIP (2, 1, newrel);
2328 *again = TRUE;
2329 }
2330 }
2331
2332 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2333 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2334 /* Decodable bits. */
2335 && (insn[0] & 0xfc) == 0x74
2336 /* Decodable bits. */
2337 && ((insn[1] & 0xf0) == 0x00))
2338 {
2339 int newrel;
2340
2341 insn[0] = 0x75;
2342 insn[1] = 0x50 | (insn[1] & 0x0f);
2343
2344 /* We can't relax this new opcode. */
2345 irel->r_addend = 0;
2346
2347 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2348 newrel = R_RX_ABS8U;
2349 else
2350 newrel = R_RX_DIR8U;
2351
2352 SNIP (2, 1, newrel);
2353 *again = TRUE;
2354 }
2355
2356 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2357 {
2358 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2359
2360 insn[0] &= 0xfc;
2361 insn[0] |= 0x01;
2362 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2363 if (newrel != ELF32_R_TYPE (srel->r_info))
2364 {
2365 SNIP (2, 1, newrel);
2366 *again = TRUE;
2367 }
2368 }
2369
2370 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2371 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2372 /* Decodable bits and immediate type. */
2373 && insn[0] == 0x75
2374 /* Decodable bits. */
2375 && (insn[1] & 0xc0) == 0x00)
2376 {
2377 static const int newop[4] = { 1, 3, 4, 5 };
2378
2379 insn[0] = 0x60 | newop[insn[1] >> 4];
2380 /* The register number doesn't move. */
2381
2382 /* We can't relax this new opcode. */
2383 irel->r_addend = 0;
2384
2385 move_reloc (irel, srel, -1);
2386
2387 SNIP (2, 1, R_RX_RH_UIMM4p8);
2388 *again = TRUE;
2389 }
2390
2391 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2392 else if (code == 1 && ssymval <= 15 && ssymval >= -15
2393 /* Decodable bits and immediate type. */
2394 && insn[0] == 0x71
2395 /* Same register for source and destination. */
2396 && ((insn[1] >> 4) == (insn[1] & 0x0f)))
2397 {
2398 int newrel;
2399
2400 /* Note that we can't turn "add $0,Rs" into a NOP
2401 because the flags need to be set right. */
2402
2403 if (ssymval < 0)
2404 {
2405 insn[0] = 0x60; /* Subtract. */
2406 newrel = R_RX_RH_UNEG4p8;
2407 }
2408 else
2409 {
2410 insn[0] = 0x62; /* Add. */
2411 newrel = R_RX_RH_UIMM4p8;
2412 }
2413
2414 /* The register number is in the right place. */
2415
2416 /* We can't relax this new opcode. */
2417 irel->r_addend = 0;
2418
2419 move_reloc (irel, srel, -1);
2420
2421 SNIP (2, 1, newrel);
2422 *again = TRUE;
2423 }
2424 }
2425
2426 /* These are either matched with a DSP6 (2-byte base) or an id24
2427 (3-byte base). */
2428 if (irel->r_addend & RX_RELAXA_IMM12)
2429 {
2430 int dspcode, offset = 0;
2431 long ssymval;
2432
2433 GET_RELOC;
2434
2435 if ((insn[0] & 0xfc) == 0xfc)
2436 dspcode = 1; /* Just something with one byte operand. */
2437 else
2438 dspcode = insn[0] & 3;
2439 switch (dspcode)
2440 {
2441 case 0: offset = 2; break;
2442 case 1: offset = 3; break;
2443 case 2: offset = 4; break;
2444 case 3: offset = 2; break;
2445 }
2446
2447 /* These relocations sign-extend, so we must do signed compares. */
2448 ssymval = (long) symval;
2449
2450 code = (insn[1] >> 2) & 3;
2451 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2452 {
2453 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2454
2455 insn[1] &= 0xf3;
2456 insn[1] |= 0x0c;
2457 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2458 if (newrel != ELF32_R_TYPE (srel->r_info))
2459 {
2460 SNIP (offset, 1, newrel);
2461 *again = TRUE;
2462 }
2463 }
2464
2465 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2466 {
2467 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2468
2469 insn[1] &= 0xf3;
2470 insn[1] |= 0x08;
2471 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2472 if (newrel != ELF32_R_TYPE (srel->r_info))
2473 {
2474 SNIP (offset, 1, newrel);
2475 *again = TRUE;
2476 }
2477 }
2478
2479 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2480 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2481 /* Decodable bits. */
2482 && insn[0] == 0xfb
2483 /* Decodable bits. */
2484 && ((insn[1] & 0x03) == 0x02))
2485 {
2486 int newrel;
2487
2488 insn[0] = 0x75;
2489 insn[1] = 0x40 | (insn[1] >> 4);
2490
2491 /* We can't relax this new opcode. */
2492 irel->r_addend = 0;
2493
2494 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2495 newrel = R_RX_ABS8U;
2496 else
2497 newrel = R_RX_DIR8U;
2498
2499 SNIP (2, 1, newrel);
2500 *again = TRUE;
2501 }
2502
2503 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2504 {
2505 unsigned int newrel = ELF32_R_TYPE(srel->r_info);
2506
2507 insn[1] &= 0xf3;
2508 insn[1] |= 0x04;
2509 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2510 if (newrel != ELF32_R_TYPE(srel->r_info))
2511 {
2512 SNIP (offset, 1, newrel);
2513 *again = TRUE;
2514 }
2515 }
2516
2517 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2518 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2519 /* Decodable bits. */
2520 && insn[0] == 0xfb
2521 /* Decodable bits. */
2522 && ((insn[1] & 0x03) == 0x02))
2523 {
2524 insn[0] = 0x66;
2525 insn[1] = insn[1] >> 4;
2526
2527 /* We can't relax this new opcode. */
2528 irel->r_addend = 0;
2529
2530 move_reloc (irel, srel, -1);
2531
2532 SNIP (2, 1, R_RX_RH_UIMM4p8);
2533 *again = TRUE;
2534 }
2535 }
2536
2537 if (irel->r_addend & RX_RELAXA_BRA)
2538 {
2539 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2540 int max_pcrel3 = 4;
2541 int alignment_glue = 0;
2542
2543 GET_RELOC;
2544
2545 /* Branches over alignment chunks are problematic, as
2546 deleting bytes here makes the branch *further* away. We
2547 can be agressive with branches within this alignment
2548 block, but not branches outside it. */
2549 if ((prev_alignment == NULL
2550 || symval < (bfd_vma)(sec_start + prev_alignment->r_offset))
2551 && (next_alignment == NULL
2552 || symval > (bfd_vma)(sec_start + next_alignment->r_offset)))
2553 alignment_glue = section_alignment_glue;
2554
2555 if (ELF32_R_TYPE(srel[1].r_info) == R_RX_RH_RELAX
2556 && srel[1].r_addend & RX_RELAXA_BRA
2557 && srel[1].r_offset < irel->r_offset + pcrel)
2558 max_pcrel3 ++;
2559
2560 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2561
2562 /* The values we compare PCREL with are not what you'd
2563 expect; they're off by a little to compensate for (1)
2564 where the reloc is relative to the insn, and (2) how much
2565 the insn is going to change when we relax it. */
2566
2567 /* These we have to decode. */
2568 switch (insn[0])
2569 {
2570 case 0x04: /* BRA pcdsp:24 */
2571 if (-32768 + alignment_glue <= pcrel
2572 && pcrel <= 32765 - alignment_glue)
2573 {
2574 insn[0] = 0x38;
2575 SNIP (3, 1, newrel);
2576 *again = TRUE;
2577 }
2578 break;
2579
2580 case 0x38: /* BRA pcdsp:16 */
2581 if (-128 + alignment_glue <= pcrel
2582 && pcrel <= 127 - alignment_glue)
2583 {
2584 insn[0] = 0x2e;
2585 SNIP (2, 1, newrel);
2586 *again = TRUE;
2587 }
2588 break;
2589
2590 case 0x2e: /* BRA pcdsp:8 */
2591 /* Note that there's a risk here of shortening things so
2592 much that we no longer fit this reloc; it *should*
2593 only happen when you branch across a branch, and that
2594 branch also devolves into BRA.S. "Real" code should
2595 be OK. */
2596 if (max_pcrel3 + alignment_glue <= pcrel
2597 && pcrel <= 10 - alignment_glue
2598 && allow_pcrel3)
2599 {
2600 insn[0] = 0x08;
2601 SNIP (1, 1, newrel);
2602 move_reloc (irel, srel, -1);
2603 *again = TRUE;
2604 }
2605 break;
2606
2607 case 0x05: /* BSR pcdsp:24 */
2608 if (-32768 + alignment_glue <= pcrel
2609 && pcrel <= 32765 - alignment_glue)
2610 {
2611 insn[0] = 0x39;
2612 SNIP (1, 1, newrel);
2613 *again = TRUE;
2614 }
2615 break;
2616
2617 case 0x3a: /* BEQ.W pcdsp:16 */
2618 case 0x3b: /* BNE.W pcdsp:16 */
2619 if (-128 + alignment_glue <= pcrel
2620 && pcrel <= 127 - alignment_glue)
2621 {
2622 insn[0] = 0x20 | (insn[0] & 1);
2623 SNIP (1, 1, newrel);
2624 *again = TRUE;
2625 }
2626 break;
2627
2628 case 0x20: /* BEQ.B pcdsp:8 */
2629 case 0x21: /* BNE.B pcdsp:8 */
2630 if (max_pcrel3 + alignment_glue <= pcrel
2631 && pcrel - alignment_glue <= 10
2632 && allow_pcrel3)
2633 {
2634 insn[0] = 0x10 | ((insn[0] & 1) << 3);
2635 SNIP (1, 1, newrel);
2636 move_reloc (irel, srel, -1);
2637 *again = TRUE;
2638 }
2639 break;
2640
2641 case 0x16: /* synthetic BNE dsp24 */
2642 case 0x1e: /* synthetic BEQ dsp24 */
2643 if (-32767 + alignment_glue <= pcrel
2644 && pcrel <= 32766 - alignment_glue
2645 && insn[1] == 0x04)
2646 {
2647 if (insn[0] == 0x16)
2648 insn[0] = 0x3b;
2649 else
2650 insn[0] = 0x3a;
2651 /* We snip out the bytes at the end else the reloc
2652 will get moved too, and too much. */
2653 SNIP (3, 2, newrel);
2654 move_reloc (irel, srel, -1);
2655 *again = TRUE;
2656 }
2657 break;
2658 }
2659
2660 /* Special case - synthetic conditional branches, pcrel24.
2661 Note that EQ and NE have been handled above. */
2662 if ((insn[0] & 0xf0) == 0x20
2663 && insn[1] == 0x06
2664 && insn[2] == 0x04
2665 && srel->r_offset != irel->r_offset + 1
2666 && -32767 + alignment_glue <= pcrel
2667 && pcrel <= 32766 - alignment_glue)
2668 {
2669 insn[1] = 0x05;
2670 insn[2] = 0x38;
2671 SNIP (5, 1, newrel);
2672 *again = TRUE;
2673 }
2674
2675 /* Special case - synthetic conditional branches, pcrel16 */
2676 if ((insn[0] & 0xf0) == 0x20
2677 && insn[1] == 0x05
2678 && insn[2] == 0x38
2679 && srel->r_offset != irel->r_offset + 1
2680 && -127 + alignment_glue <= pcrel
2681 && pcrel <= 126 - alignment_glue)
2682 {
2683 int cond = (insn[0] & 0x0f) ^ 0x01;
2684
2685 insn[0] = 0x20 | cond;
2686 /* By moving the reloc first, we avoid having
2687 delete_bytes move it also. */
2688 move_reloc (irel, srel, -2);
2689 SNIP (2, 3, newrel);
2690 *again = TRUE;
2691 }
2692 }
2693
2694 BFD_ASSERT (nrelocs == 0);
2695
2696 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2697 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2698 because it may have one or two relocations. */
2699 if ((insn[0] & 0xfc) == 0xf8
2700 && (insn[1] & 0x80) == 0x00
2701 && (insn[0] & 0x03) != 0x03)
2702 {
2703 int dcode, icode, reg, ioff, dscale, ilen;
2704 bfd_vma disp_val = 0;
2705 long imm_val = 0;
2706 Elf_Internal_Rela * disp_rel = 0;
2707 Elf_Internal_Rela * imm_rel = 0;
2708
2709 /* Reset this. */
2710 srel = irel;
2711
2712 dcode = insn[0] & 0x03;
2713 icode = (insn[1] >> 2) & 0x03;
2714 reg = (insn[1] >> 4) & 0x0f;
2715
2716 ioff = dcode == 1 ? 3 : dcode == 2 ? 4 : 2;
2717
2718 /* Figure out what the dispacement is. */
2719 if (dcode == 1 || dcode == 2)
2720 {
2721 /* There's a displacement. See if there's a reloc for it. */
2722 if (srel[1].r_offset == irel->r_offset + 2)
2723 {
2724 GET_RELOC;
2725 disp_val = symval;
2726 disp_rel = srel;
2727 }
2728 else
2729 {
2730 if (dcode == 1)
2731 disp_val = insn[2];
2732 else
2733 {
2734 #if RX_OPCODE_BIG_ENDIAN
2735 disp_val = insn[2] * 256 + insn[3];
2736 #else
2737 disp_val = insn[2] + insn[3] * 256;
2738 #endif
2739 }
2740 switch (insn[1] & 3)
2741 {
2742 case 1:
2743 disp_val *= 2;
2744 scale = 2;
2745 break;
2746 case 2:
2747 disp_val *= 4;
2748 scale = 4;
2749 break;
2750 }
2751 }
2752 }
2753
2754 dscale = scale;
2755
2756 /* Figure out what the immediate is. */
2757 if (srel[1].r_offset == irel->r_offset + ioff)
2758 {
2759 GET_RELOC;
2760 imm_val = (long) symval;
2761 imm_rel = srel;
2762 }
2763 else
2764 {
2765 unsigned char * ip = insn + ioff;
2766
2767 switch (icode)
2768 {
2769 case 1:
2770 /* For byte writes, we don't sign extend. Makes the math easier later. */
2771 if (scale == 1)
2772 imm_val = ip[0];
2773 else
2774 imm_val = (char) ip[0];
2775 break;
2776 case 2:
2777 #if RX_OPCODE_BIG_ENDIAN
2778 imm_val = ((char) ip[0] << 8) | ip[1];
2779 #else
2780 imm_val = ((char) ip[1] << 8) | ip[0];
2781 #endif
2782 break;
2783 case 3:
2784 #if RX_OPCODE_BIG_ENDIAN
2785 imm_val = ((char) ip[0] << 16) | (ip[1] << 8) | ip[2];
2786 #else
2787 imm_val = ((char) ip[2] << 16) | (ip[1] << 8) | ip[0];
2788 #endif
2789 break;
2790 case 0:
2791 #if RX_OPCODE_BIG_ENDIAN
2792 imm_val = (ip[0] << 24) | (ip[1] << 16) | (ip[2] << 8) | ip[3];
2793 #else
2794 imm_val = (ip[3] << 24) | (ip[2] << 16) | (ip[1] << 8) | ip[0];
2795 #endif
2796 break;
2797 }
2798 }
2799
2800 ilen = 2;
2801
2802 switch (dcode)
2803 {
2804 case 1:
2805 ilen += 1;
2806 break;
2807 case 2:
2808 ilen += 2;
2809 break;
2810 }
2811
2812 switch (icode)
2813 {
2814 case 1:
2815 ilen += 1;
2816 break;
2817 case 2:
2818 ilen += 2;
2819 break;
2820 case 3:
2821 ilen += 3;
2822 break;
2823 case 4:
2824 ilen += 4;
2825 break;
2826 }
2827
2828 /* The shortcut happens when the immediate is 0..255,
2829 register r0 to r7, and displacement (scaled) 0..31. */
2830
2831 if (0 <= imm_val && imm_val <= 255
2832 && 0 <= reg && reg <= 7
2833 && disp_val / dscale <= 31)
2834 {
2835 insn[0] = 0x3c | (insn[1] & 0x03);
2836 insn[1] = (((disp_val / dscale) << 3) & 0x80) | (reg << 4) | ((disp_val/dscale) & 0x0f);
2837 insn[2] = imm_val;
2838
2839 if (disp_rel)
2840 {
2841 int newrel = R_RX_NONE;
2842
2843 switch (dscale)
2844 {
2845 case 1:
2846 newrel = R_RX_RH_ABS5p8B;
2847 break;
2848 case 2:
2849 newrel = R_RX_RH_ABS5p8W;
2850 break;
2851 case 4:
2852 newrel = R_RX_RH_ABS5p8L;
2853 break;
2854 }
2855 disp_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (disp_rel->r_info), newrel);
2856 move_reloc (irel, disp_rel, -1);
2857 }
2858 if (imm_rel)
2859 {
2860 imm_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (imm_rel->r_info), R_RX_DIR8U);
2861 move_reloc (disp_rel ? disp_rel : irel,
2862 imm_rel,
2863 irel->r_offset - imm_rel->r_offset + 2);
2864 }
2865
2866 SNIPNR (3, ilen - 3);
2867 *again = TRUE;
2868
2869 /* We can't relax this new opcode. */
2870 irel->r_addend = 0;
2871 }
2872 }
2873 }
2874
2875 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2876 whatever they're branching over won't shrink any more. If we're
2877 basically done here, do one more pass just for branches - but
2878 don't request a pass after that one! */
2879 if (!*again && !allow_pcrel3)
2880 {
2881 bfd_boolean ignored;
2882
2883 elf32_rx_relax_section (abfd, sec, link_info, &ignored, TRUE);
2884 }
2885
2886 return TRUE;
2887
2888 error_return:
2889 if (free_relocs != NULL)
2890 free (free_relocs);
2891
2892 if (free_contents != NULL)
2893 free (free_contents);
2894
2895 if (shndx_buf != NULL)
2896 {
2897 shndx_hdr->contents = NULL;
2898 free (shndx_buf);
2899 }
2900
2901 if (free_intsyms != NULL)
2902 free (free_intsyms);
2903
2904 return FALSE;
2905 }
2906
2907 static bfd_boolean
2908 elf32_rx_relax_section_wrapper (bfd * abfd,
2909 asection * sec,
2910 struct bfd_link_info * link_info,
2911 bfd_boolean * again)
2912 {
2913 return elf32_rx_relax_section (abfd, sec, link_info, again, FALSE);
2914 }
2915 \f
2916 /* Function to set the ELF flag bits. */
2917
2918 static bfd_boolean
2919 rx_elf_set_private_flags (bfd * abfd, flagword flags)
2920 {
2921 elf_elfheader (abfd)->e_flags = flags;
2922 elf_flags_init (abfd) = TRUE;
2923 return TRUE;
2924 }
2925
2926 static bfd_boolean no_warn_mismatch = FALSE;
2927 static bfd_boolean ignore_lma = TRUE;
2928
2929 void bfd_elf32_rx_set_target_flags (bfd_boolean, bfd_boolean);
2930
2931 void
2932 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch,
2933 bfd_boolean user_ignore_lma)
2934 {
2935 no_warn_mismatch = user_no_warn_mismatch;
2936 ignore_lma = user_ignore_lma;
2937 }
2938
2939 /* Converts FLAGS into a descriptive string.
2940 Returns a static pointer. */
2941
2942 static const char *
2943 describe_flags (flagword flags)
2944 {
2945 static char buf [128];
2946
2947 buf[0] = 0;
2948
2949 if (flags & E_FLAG_RX_64BIT_DOUBLES)
2950 strcat (buf, "64-bit doubles");
2951 else
2952 strcat (buf, "32-bit doubles");
2953
2954 if (flags & E_FLAG_RX_DSP)
2955 strcat (buf, ", dsp");
2956 else
2957 strcat (buf, ", no dsp");
2958
2959 if (flags & E_FLAG_RX_PID)
2960 strcat (buf, ", pid");
2961 else
2962 strcat (buf, ", no pid");
2963
2964 if (flags & E_FLAG_RX_ABI)
2965 strcat (buf, ", RX ABI");
2966 else
2967 strcat (buf, ", GCC ABI");
2968
2969 return buf;
2970 }
2971
2972 /* Merge backend specific data from an object file to the output
2973 object file when linking. */
2974
2975 static bfd_boolean
2976 rx_elf_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
2977 {
2978 flagword old_flags;
2979 flagword new_flags;
2980 bfd_boolean error = FALSE;
2981
2982 new_flags = elf_elfheader (ibfd)->e_flags;
2983 old_flags = elf_elfheader (obfd)->e_flags;
2984
2985 if (!elf_flags_init (obfd))
2986 {
2987 /* First call, no flags set. */
2988 elf_flags_init (obfd) = TRUE;
2989 elf_elfheader (obfd)->e_flags = new_flags;
2990 }
2991 else if (old_flags != new_flags)
2992 {
2993 flagword known_flags;
2994
2995 known_flags = E_FLAG_RX_ABI | E_FLAG_RX_64BIT_DOUBLES
2996 | E_FLAG_RX_DSP | E_FLAG_RX_PID;
2997
2998 if ((old_flags ^ new_flags) & known_flags)
2999 {
3000 /* Only complain if flag bits we care about do not match.
3001 Other bits may be set, since older binaries did use some
3002 deprecated flags. */
3003 if (no_warn_mismatch)
3004 {
3005 elf_elfheader (obfd)->e_flags = (new_flags | old_flags) & known_flags;
3006 }
3007 else
3008 {
3009 _bfd_error_handler ("There is a conflict merging the ELF header flags from %s",
3010 bfd_get_filename (ibfd));
3011 _bfd_error_handler (" the input file's flags: %s",
3012 describe_flags (new_flags));
3013 _bfd_error_handler (" the output file's flags: %s",
3014 describe_flags (old_flags));
3015 error = TRUE;
3016 }
3017 }
3018 else
3019 elf_elfheader (obfd)->e_flags = new_flags & known_flags;
3020 }
3021
3022 if (error)
3023 bfd_set_error (bfd_error_bad_value);
3024
3025 return !error;
3026 }
3027 \f
3028 static bfd_boolean
3029 rx_elf_print_private_bfd_data (bfd * abfd, void * ptr)
3030 {
3031 FILE * file = (FILE *) ptr;
3032 flagword flags;
3033
3034 BFD_ASSERT (abfd != NULL && ptr != NULL);
3035
3036 /* Print normal ELF private data. */
3037 _bfd_elf_print_private_bfd_data (abfd, ptr);
3038
3039 flags = elf_elfheader (abfd)->e_flags;
3040 fprintf (file, _("private flags = 0x%lx:"), (long) flags);
3041
3042 fprintf (file, "%s", describe_flags (flags));
3043 return TRUE;
3044 }
3045
3046 /* Return the MACH for an e_flags value. */
3047
3048 static int
3049 elf32_rx_machine (bfd * abfd ATTRIBUTE_UNUSED)
3050 {
3051 #if 0 /* FIXME: EF_RX_CPU_MASK collides with E_FLAG_RX_...
3052 Need to sort out how these flag bits are used.
3053 For now we assume that the flags are OK. */
3054 if ((elf_elfheader (abfd)->e_flags & EF_RX_CPU_MASK) == EF_RX_CPU_RX)
3055 #endif
3056 return bfd_mach_rx;
3057
3058 return 0;
3059 }
3060
3061 static bfd_boolean
3062 rx_elf_object_p (bfd * abfd)
3063 {
3064 int i;
3065 unsigned int u;
3066 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
3067 int nphdrs = elf_elfheader (abfd)->e_phnum;
3068 sec_ptr bsec;
3069 static int saw_be = FALSE;
3070
3071 /* We never want to automatically choose the non-swapping big-endian
3072 target. The user can only get that explicitly, such as with -I
3073 and objcopy. */
3074 if (abfd->xvec == &bfd_elf32_rx_be_ns_vec
3075 && abfd->target_defaulted)
3076 return FALSE;
3077
3078 /* BFD->target_defaulted is not set to TRUE when a target is chosen
3079 as a fallback, so we check for "scanning" to know when to stop
3080 using the non-swapping target. */
3081 if (abfd->xvec == &bfd_elf32_rx_be_ns_vec
3082 && saw_be)
3083 return FALSE;
3084 if (abfd->xvec == &bfd_elf32_rx_be_vec)
3085 saw_be = TRUE;
3086
3087 bfd_default_set_arch_mach (abfd, bfd_arch_rx,
3088 elf32_rx_machine (abfd));
3089
3090 /* For each PHDR in the object, we must find some section that
3091 corresponds (based on matching file offsets) and use its VMA
3092 information to reconstruct the p_vaddr field we clobbered when we
3093 wrote it out. */
3094 for (i=0; i<nphdrs; i++)
3095 {
3096 for (u=0; u<elf_tdata(abfd)->num_elf_sections; u++)
3097 {
3098 Elf_Internal_Shdr *sec = elf_tdata(abfd)->elf_sect_ptr[u];
3099
3100 if (phdr[i].p_filesz
3101 && phdr[i].p_offset <= (bfd_vma) sec->sh_offset
3102 && (bfd_vma)sec->sh_offset <= phdr[i].p_offset + (phdr[i].p_filesz - 1))
3103 {
3104 /* Found one! The difference between the two addresses,
3105 plus the difference between the two file offsets, is
3106 enough information to reconstruct the lma. */
3107
3108 /* Example where they aren't:
3109 PHDR[1] = lma fffc0100 offset 00002010 size 00000100
3110 SEC[6] = vma 00000050 offset 00002050 size 00000040
3111
3112 The correct LMA for the section is fffc0140 + (2050-2010).
3113 */
3114
3115 phdr[i].p_vaddr = sec->sh_addr + (sec->sh_offset - phdr[i].p_offset);
3116 break;
3117 }
3118 }
3119
3120 /* We must update the bfd sections as well, so we don't stop
3121 with one match. */
3122 bsec = abfd->sections;
3123 while (bsec)
3124 {
3125 if (phdr[i].p_filesz
3126 && phdr[i].p_vaddr <= bsec->vma
3127 && bsec->vma <= phdr[i].p_vaddr + (phdr[i].p_filesz - 1))
3128 {
3129 bsec->lma = phdr[i].p_paddr + (bsec->vma - phdr[i].p_vaddr);
3130 }
3131 bsec = bsec->next;
3132 }
3133 }
3134
3135 return TRUE;
3136 }
3137 \f
3138
3139 #ifdef DEBUG
3140 void
3141 rx_dump_symtab (bfd * abfd, void * internal_syms, void * external_syms)
3142 {
3143 size_t locsymcount;
3144 Elf_Internal_Sym * isymbuf;
3145 Elf_Internal_Sym * isymend;
3146 Elf_Internal_Sym * isym;
3147 Elf_Internal_Shdr * symtab_hdr;
3148 bfd_boolean free_internal = FALSE, free_external = FALSE;
3149 char * st_info_str;
3150 char * st_info_stb_str;
3151 char * st_other_str;
3152 char * st_shndx_str;
3153
3154 if (! internal_syms)
3155 {
3156 internal_syms = bfd_malloc (1000);
3157 free_internal = 1;
3158 }
3159 if (! external_syms)
3160 {
3161 external_syms = bfd_malloc (1000);
3162 free_external = 1;
3163 }
3164
3165 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3166 locsymcount = symtab_hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym;
3167 if (free_internal)
3168 isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
3169 symtab_hdr->sh_info, 0,
3170 internal_syms, external_syms, NULL);
3171 else
3172 isymbuf = internal_syms;
3173 isymend = isymbuf + locsymcount;
3174
3175 for (isym = isymbuf ; isym < isymend ; isym++)
3176 {
3177 switch (ELF_ST_TYPE (isym->st_info))
3178 {
3179 case STT_FUNC: st_info_str = "STT_FUNC"; break;
3180 case STT_SECTION: st_info_str = "STT_SECTION"; break;
3181 case STT_FILE: st_info_str = "STT_FILE"; break;
3182 case STT_OBJECT: st_info_str = "STT_OBJECT"; break;
3183 case STT_TLS: st_info_str = "STT_TLS"; break;
3184 default: st_info_str = "";
3185 }
3186 switch (ELF_ST_BIND (isym->st_info))
3187 {
3188 case STB_LOCAL: st_info_stb_str = "STB_LOCAL"; break;
3189 case STB_GLOBAL: st_info_stb_str = "STB_GLOBAL"; break;
3190 default: st_info_stb_str = "";
3191 }
3192 switch (ELF_ST_VISIBILITY (isym->st_other))
3193 {
3194 case STV_DEFAULT: st_other_str = "STV_DEFAULT"; break;
3195 case STV_INTERNAL: st_other_str = "STV_INTERNAL"; break;
3196 case STV_PROTECTED: st_other_str = "STV_PROTECTED"; break;
3197 default: st_other_str = "";
3198 }
3199 switch (isym->st_shndx)
3200 {
3201 case SHN_ABS: st_shndx_str = "SHN_ABS"; break;
3202 case SHN_COMMON: st_shndx_str = "SHN_COMMON"; break;
3203 case SHN_UNDEF: st_shndx_str = "SHN_UNDEF"; break;
3204 default: st_shndx_str = "";
3205 }
3206
3207 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3208 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3209 isym,
3210 (unsigned long) isym->st_value,
3211 (unsigned long) isym->st_size,
3212 isym->st_name,
3213 bfd_elf_string_from_elf_section (abfd, symtab_hdr->sh_link,
3214 isym->st_name),
3215 isym->st_info, st_info_str, st_info_stb_str,
3216 isym->st_other, st_other_str,
3217 isym->st_shndx, st_shndx_str);
3218 }
3219 if (free_internal)
3220 free (internal_syms);
3221 if (free_external)
3222 free (external_syms);
3223 }
3224
3225 char *
3226 rx_get_reloc (long reloc)
3227 {
3228 if (0 <= reloc && reloc < R_RX_max)
3229 return rx_elf_howto_table[reloc].name;
3230 return "";
3231 }
3232 #endif /* DEBUG */
3233
3234 \f
3235 /* We must take care to keep the on-disk copy of any code sections
3236 that are fully linked swapped if the target is big endian, to match
3237 the Renesas tools. */
3238
3239 /* The rule is: big endian object that are final-link executables,
3240 have code sections stored with 32-bit words swapped relative to
3241 what you'd get by default. */
3242
3243 static bfd_boolean
3244 rx_get_section_contents (bfd * abfd,
3245 sec_ptr section,
3246 void * location,
3247 file_ptr offset,
3248 bfd_size_type count)
3249 {
3250 int exec = (abfd->flags & EXEC_P) ? 1 : 0;
3251 int s_code = (section->flags & SEC_CODE) ? 1 : 0;
3252 bfd_boolean rv;
3253
3254 #ifdef DJDEBUG
3255 fprintf (stderr, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3256 (long) offset, (long) count, section->name,
3257 bfd_big_endian(abfd) ? "be" : "le",
3258 exec, s_code, (long unsigned) section->filepos,
3259 (long unsigned) offset);
3260 #endif
3261
3262 if (exec && s_code && bfd_big_endian (abfd))
3263 {
3264 char * cloc = (char *) location;
3265 bfd_size_type cnt, end_cnt;
3266
3267 rv = TRUE;
3268
3269 /* Fetch and swap unaligned bytes at the beginning. */
3270 if (offset % 4)
3271 {
3272 char buf[4];
3273
3274 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3275 (offset & -4), 4);
3276 if (!rv)
3277 return FALSE;
3278
3279 bfd_putb32 (bfd_getl32 (buf), buf);
3280
3281 cnt = 4 - (offset % 4);
3282 if (cnt > count)
3283 cnt = count;
3284
3285 memcpy (location, buf + (offset % 4), cnt);
3286
3287 count -= cnt;
3288 offset += cnt;
3289 cloc += count;
3290 }
3291
3292 end_cnt = count % 4;
3293
3294 /* Fetch and swap the middle bytes. */
3295 if (count >= 4)
3296 {
3297 rv = _bfd_generic_get_section_contents (abfd, section, cloc, offset,
3298 count - end_cnt);
3299 if (!rv)
3300 return FALSE;
3301
3302 for (cnt = count; cnt >= 4; cnt -= 4, cloc += 4)
3303 bfd_putb32 (bfd_getl32 (cloc), cloc);
3304 }
3305
3306 /* Fetch and swap the end bytes. */
3307 if (end_cnt > 0)
3308 {
3309 char buf[4];
3310
3311 /* Fetch the end bytes. */
3312 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3313 offset + count - end_cnt, 4);
3314 if (!rv)
3315 return FALSE;
3316
3317 bfd_putb32 (bfd_getl32 (buf), buf);
3318 memcpy (cloc, buf, end_cnt);
3319 }
3320 }
3321 else
3322 rv = _bfd_generic_get_section_contents (abfd, section, location, offset, count);
3323
3324 return rv;
3325 }
3326
3327 #ifdef DJDEBUG
3328 static bfd_boolean
3329 rx2_set_section_contents (bfd * abfd,
3330 sec_ptr section,
3331 const void * location,
3332 file_ptr offset,
3333 bfd_size_type count)
3334 {
3335 bfd_size_type i;
3336
3337 fprintf (stderr, " set sec %s %08x loc %p offset %#x count %#x\n",
3338 section->name, (unsigned) section->vma, location, (int) offset, (int) count);
3339 for (i = 0; i < count; i++)
3340 {
3341 if (i % 16 == 0 && i > 0)
3342 fprintf (stderr, "\n");
3343
3344 if (i % 16 && i % 4 == 0)
3345 fprintf (stderr, " ");
3346
3347 if (i % 16 == 0)
3348 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3349
3350 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3351 }
3352 fprintf (stderr, "\n");
3353
3354 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3355 }
3356 #define _bfd_elf_set_section_contents rx2_set_section_contents
3357 #endif
3358
3359 static bfd_boolean
3360 rx_set_section_contents (bfd * abfd,
3361 sec_ptr section,
3362 const void * location,
3363 file_ptr offset,
3364 bfd_size_type count)
3365 {
3366 bfd_boolean exec = (abfd->flags & EXEC_P) ? TRUE : FALSE;
3367 bfd_boolean s_code = (section->flags & SEC_CODE) ? TRUE : FALSE;
3368 bfd_boolean rv;
3369 char * swapped_data = NULL;
3370 bfd_size_type i;
3371 bfd_vma caddr = section->vma + offset;
3372 file_ptr faddr = 0;
3373 bfd_size_type scount;
3374
3375 #ifdef DJDEBUG
3376 bfd_size_type i;
3377
3378 fprintf (stderr, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3379 (long) offset, (long) count, section->name,
3380 bfd_big_endian (abfd) ? "be" : "le",
3381 exec, s_code);
3382
3383 for (i = 0; i < count; i++)
3384 {
3385 int a = section->vma + offset + i;
3386
3387 if (a % 16 == 0 && a > 0)
3388 fprintf (stderr, "\n");
3389
3390 if (a % 16 && a % 4 == 0)
3391 fprintf (stderr, " ");
3392
3393 if (a % 16 == 0 || i == 0)
3394 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3395
3396 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3397 }
3398
3399 fprintf (stderr, "\n");
3400 #endif
3401
3402 if (! exec || ! s_code || ! bfd_big_endian (abfd))
3403 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3404
3405 while (count > 0 && caddr > 0 && caddr % 4)
3406 {
3407 switch (caddr % 4)
3408 {
3409 case 0: faddr = offset + 3; break;
3410 case 1: faddr = offset + 1; break;
3411 case 2: faddr = offset - 1; break;
3412 case 3: faddr = offset - 3; break;
3413 }
3414
3415 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3416 if (! rv)
3417 return rv;
3418
3419 location ++;
3420 offset ++;
3421 count --;
3422 caddr ++;
3423 }
3424
3425 scount = (int)(count / 4) * 4;
3426 if (scount > 0)
3427 {
3428 char * cloc = (char *) location;
3429
3430 swapped_data = (char *) bfd_alloc (abfd, count);
3431
3432 for (i = 0; i < count; i += 4)
3433 {
3434 bfd_vma v = bfd_getl32 (cloc + i);
3435 bfd_putb32 (v, swapped_data + i);
3436 }
3437
3438 rv = _bfd_elf_set_section_contents (abfd, section, swapped_data, offset, scount);
3439
3440 if (!rv)
3441 return rv;
3442 }
3443
3444 count -= scount;
3445 location += scount;
3446 offset += scount;
3447
3448 if (count > 0)
3449 {
3450 caddr = section->vma + offset;
3451 while (count > 0)
3452 {
3453 switch (caddr % 4)
3454 {
3455 case 0: faddr = offset + 3; break;
3456 case 1: faddr = offset + 1; break;
3457 case 2: faddr = offset - 1; break;
3458 case 3: faddr = offset - 3; break;
3459 }
3460 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3461 if (! rv)
3462 return rv;
3463
3464 location ++;
3465 offset ++;
3466 count --;
3467 caddr ++;
3468 }
3469 }
3470
3471 return TRUE;
3472 }
3473
3474 static bfd_boolean
3475 rx_final_link (bfd * abfd, struct bfd_link_info * info)
3476 {
3477 asection * o;
3478
3479 for (o = abfd->sections; o != NULL; o = o->next)
3480 {
3481 #ifdef DJDEBUG
3482 fprintf (stderr, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3483 o->name, o->flags, o->vma, o->lma, o->size, o->rawsize);
3484 #endif
3485 if (o->flags & SEC_CODE
3486 && bfd_big_endian (abfd)
3487 && o->size % 4)
3488 {
3489 #ifdef DJDEBUG
3490 fprintf (stderr, "adjusting...\n");
3491 #endif
3492 o->size += 4 - (o->size % 4);
3493 }
3494 }
3495
3496 return bfd_elf_final_link (abfd, info);
3497 }
3498
3499 static bfd_boolean
3500 elf32_rx_modify_program_headers (bfd * abfd ATTRIBUTE_UNUSED,
3501 struct bfd_link_info * info ATTRIBUTE_UNUSED)
3502 {
3503 const struct elf_backend_data * bed;
3504 struct elf_obj_tdata * tdata;
3505 Elf_Internal_Phdr * phdr;
3506 unsigned int count;
3507 unsigned int i;
3508
3509 bed = get_elf_backend_data (abfd);
3510 tdata = elf_tdata (abfd);
3511 phdr = tdata->phdr;
3512 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
3513
3514 if (ignore_lma)
3515 for (i = count; i-- != 0;)
3516 if (phdr[i].p_type == PT_LOAD)
3517 {
3518 /* The Renesas tools expect p_paddr to be zero. However,
3519 there is no other way to store the writable data in ROM for
3520 startup initialization. So, we let the linker *think*
3521 we're using paddr and vaddr the "usual" way, but at the
3522 last minute we move the paddr into the vaddr (which is what
3523 the simulator uses) and zero out paddr. Note that this
3524 does not affect the section headers, just the program
3525 headers. We hope. */
3526 phdr[i].p_vaddr = phdr[i].p_paddr;
3527 #if 0 /* If we zero out p_paddr, then the LMA in the section table
3528 becomes wrong. */
3529 phdr[i].p_paddr = 0;
3530 #endif
3531 }
3532
3533 return TRUE;
3534 }
3535
3536 /* The default literal sections should always be marked as "code" (i.e.,
3537 SHF_EXECINSTR). This is particularly important for big-endian mode
3538 when we do not want their contents byte reversed. */
3539 static const struct bfd_elf_special_section elf32_rx_special_sections[] =
3540 {
3541 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3542 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3543 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3544 { NULL, 0, 0, 0, 0 }
3545 };
3546 \f
3547 #define ELF_ARCH bfd_arch_rx
3548 #define ELF_MACHINE_CODE EM_RX
3549 #define ELF_MAXPAGESIZE 0x1000
3550
3551 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3552 #define TARGET_BIG_NAME "elf32-rx-be"
3553
3554 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3555 #define TARGET_LITTLE_NAME "elf32-rx-le"
3556
3557 #define elf_info_to_howto_rel NULL
3558 #define elf_info_to_howto rx_info_to_howto_rela
3559 #define elf_backend_object_p rx_elf_object_p
3560 #define elf_backend_relocate_section rx_elf_relocate_section
3561 #define elf_symbol_leading_char ('_')
3562 #define elf_backend_can_gc_sections 1
3563 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3564
3565 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3566 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3567 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3568 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3569 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3570 #define bfd_elf32_get_section_contents rx_get_section_contents
3571 #define bfd_elf32_set_section_contents rx_set_section_contents
3572 #define bfd_elf32_bfd_final_link rx_final_link
3573 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3574 #define elf_backend_special_sections elf32_rx_special_sections
3575
3576 #include "elf32-target.h"
3577
3578 /* We define a second big-endian target that doesn't have the custom
3579 section get/set hooks, for times when we want to preserve the
3580 pre-swapped .text sections (like objcopy). */
3581
3582 #undef TARGET_BIG_SYM
3583 #define TARGET_BIG_SYM bfd_elf32_rx_be_ns_vec
3584 #undef TARGET_BIG_NAME
3585 #define TARGET_BIG_NAME "elf32-rx-be-ns"
3586 #undef TARGET_LITTLE_SYM
3587
3588 #undef bfd_elf32_get_section_contents
3589 #undef bfd_elf32_set_section_contents
3590
3591 #undef elf32_bed
3592 #define elf32_bed elf32_rx_be_ns_bed
3593
3594 #include "elf32-target.h"
This page took 0.102769 seconds and 5 git commands to generate.