* elf32-rx.c (rx_elf_relocate_section): Fix typo: move
[deliverable/binutils-gdb.git] / bfd / elf32-rx.c
1 /* Renesas RX specific support for 32-bit ELF.
2 Copyright (C) 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfd_stdint.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/rx.h"
27 #include "libiberty.h"
28
29 #define RX_OPCODE_BIG_ENDIAN 0
30
31 /* This is a meta-target that's used only with objcopy, to avoid the
32 endian-swap we would otherwise get. We check for this in
33 rx_elf_object_p(). */
34 const bfd_target bfd_elf32_rx_be_ns_vec;
35 const bfd_target bfd_elf32_rx_be_vec;
36
37 #ifdef DEBUG
38 char * rx_get_reloc (long);
39 void rx_dump_symtab (bfd *, void *, void *);
40 #endif
41
42 #define RXREL(n,sz,bit,shift,complain,pcrel) \
43 HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
44 bfd_elf_generic_reloc, "R_RX_" #n, FALSE, 0, ~0, FALSE)
45
46 /* Note that the relocations around 0x7f are internal to this file;
47 feel free to move them as needed to avoid conflicts with published
48 relocation numbers. */
49
50 static reloc_howto_type rx_elf_howto_table [] =
51 {
52 RXREL (NONE, 0, 0, 0, dont, FALSE),
53 RXREL (DIR32, 2, 32, 0, signed, FALSE),
54 RXREL (DIR24S, 2, 24, 0, signed, FALSE),
55 RXREL (DIR16, 1, 16, 0, dont, FALSE),
56 RXREL (DIR16U, 1, 16, 0, unsigned, FALSE),
57 RXREL (DIR16S, 1, 16, 0, signed, FALSE),
58 RXREL (DIR8, 0, 8, 0, dont, FALSE),
59 RXREL (DIR8U, 0, 8, 0, unsigned, FALSE),
60 RXREL (DIR8S, 0, 8, 0, signed, FALSE),
61 RXREL (DIR24S_PCREL, 2, 24, 0, signed, TRUE),
62 RXREL (DIR16S_PCREL, 1, 16, 0, signed, TRUE),
63 RXREL (DIR8S_PCREL, 0, 8, 0, signed, TRUE),
64 RXREL (DIR16UL, 1, 16, 2, unsigned, FALSE),
65 RXREL (DIR16UW, 1, 16, 1, unsigned, FALSE),
66 RXREL (DIR8UL, 0, 8, 2, unsigned, FALSE),
67 RXREL (DIR8UW, 0, 8, 1, unsigned, FALSE),
68 RXREL (DIR32_REV, 1, 16, 0, dont, FALSE),
69 RXREL (DIR16_REV, 1, 16, 0, dont, FALSE),
70 RXREL (DIR3U_PCREL, 0, 3, 0, dont, TRUE),
71
72 EMPTY_HOWTO (0x13),
73 EMPTY_HOWTO (0x14),
74 EMPTY_HOWTO (0x15),
75 EMPTY_HOWTO (0x16),
76 EMPTY_HOWTO (0x17),
77 EMPTY_HOWTO (0x18),
78 EMPTY_HOWTO (0x19),
79 EMPTY_HOWTO (0x1a),
80 EMPTY_HOWTO (0x1b),
81 EMPTY_HOWTO (0x1c),
82 EMPTY_HOWTO (0x1d),
83 EMPTY_HOWTO (0x1e),
84 EMPTY_HOWTO (0x1f),
85
86 RXREL (RH_3_PCREL, 0, 3, 0, signed, TRUE),
87 RXREL (RH_16_OP, 1, 16, 0, signed, FALSE),
88 RXREL (RH_24_OP, 2, 24, 0, signed, FALSE),
89 RXREL (RH_32_OP, 2, 32, 0, signed, FALSE),
90 RXREL (RH_24_UNS, 2, 24, 0, unsigned, FALSE),
91 RXREL (RH_8_NEG, 0, 8, 0, signed, FALSE),
92 RXREL (RH_16_NEG, 1, 16, 0, signed, FALSE),
93 RXREL (RH_24_NEG, 2, 24, 0, signed, FALSE),
94 RXREL (RH_32_NEG, 2, 32, 0, signed, FALSE),
95 RXREL (RH_DIFF, 2, 32, 0, signed, FALSE),
96 RXREL (RH_GPRELB, 1, 16, 0, unsigned, FALSE),
97 RXREL (RH_GPRELW, 1, 16, 0, unsigned, FALSE),
98 RXREL (RH_GPRELL, 1, 16, 0, unsigned, FALSE),
99 RXREL (RH_RELAX, 0, 0, 0, dont, FALSE),
100
101 EMPTY_HOWTO (0x2e),
102 EMPTY_HOWTO (0x2f),
103 EMPTY_HOWTO (0x30),
104 EMPTY_HOWTO (0x31),
105 EMPTY_HOWTO (0x32),
106 EMPTY_HOWTO (0x33),
107 EMPTY_HOWTO (0x34),
108 EMPTY_HOWTO (0x35),
109 EMPTY_HOWTO (0x36),
110 EMPTY_HOWTO (0x37),
111 EMPTY_HOWTO (0x38),
112 EMPTY_HOWTO (0x39),
113 EMPTY_HOWTO (0x3a),
114 EMPTY_HOWTO (0x3b),
115 EMPTY_HOWTO (0x3c),
116 EMPTY_HOWTO (0x3d),
117 EMPTY_HOWTO (0x3e),
118 EMPTY_HOWTO (0x3f),
119 EMPTY_HOWTO (0x40),
120
121 RXREL (ABS32, 2, 32, 0, dont, FALSE),
122 RXREL (ABS24S, 2, 24, 0, signed, FALSE),
123 RXREL (ABS16, 1, 16, 0, dont, FALSE),
124 RXREL (ABS16U, 1, 16, 0, unsigned, FALSE),
125 RXREL (ABS16S, 1, 16, 0, signed, FALSE),
126 RXREL (ABS8, 0, 8, 0, dont, FALSE),
127 RXREL (ABS8U, 0, 8, 0, unsigned, FALSE),
128 RXREL (ABS8S, 0, 8, 0, signed, FALSE),
129 RXREL (ABS24S_PCREL, 2, 24, 0, signed, TRUE),
130 RXREL (ABS16S_PCREL, 1, 16, 0, signed, TRUE),
131 RXREL (ABS8S_PCREL, 0, 8, 0, signed, TRUE),
132 RXREL (ABS16UL, 1, 16, 0, unsigned, FALSE),
133 RXREL (ABS16UW, 1, 16, 0, unsigned, FALSE),
134 RXREL (ABS8UL, 0, 8, 0, unsigned, FALSE),
135 RXREL (ABS8UW, 0, 8, 0, unsigned, FALSE),
136 RXREL (ABS32_REV, 2, 32, 0, dont, FALSE),
137 RXREL (ABS16_REV, 1, 16, 0, dont, FALSE),
138
139 #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
140
141 EMPTY_HOWTO (0x52),
142 EMPTY_HOWTO (0x53),
143 EMPTY_HOWTO (0x54),
144 EMPTY_HOWTO (0x55),
145 EMPTY_HOWTO (0x56),
146 EMPTY_HOWTO (0x57),
147 EMPTY_HOWTO (0x58),
148 EMPTY_HOWTO (0x59),
149 EMPTY_HOWTO (0x5a),
150 EMPTY_HOWTO (0x5b),
151 EMPTY_HOWTO (0x5c),
152 EMPTY_HOWTO (0x5d),
153 EMPTY_HOWTO (0x5e),
154 EMPTY_HOWTO (0x5f),
155 EMPTY_HOWTO (0x60),
156 EMPTY_HOWTO (0x61),
157 EMPTY_HOWTO (0x62),
158 EMPTY_HOWTO (0x63),
159 EMPTY_HOWTO (0x64),
160 EMPTY_HOWTO (0x65),
161 EMPTY_HOWTO (0x66),
162 EMPTY_HOWTO (0x67),
163 EMPTY_HOWTO (0x68),
164 EMPTY_HOWTO (0x69),
165 EMPTY_HOWTO (0x6a),
166 EMPTY_HOWTO (0x6b),
167 EMPTY_HOWTO (0x6c),
168 EMPTY_HOWTO (0x6d),
169 EMPTY_HOWTO (0x6e),
170 EMPTY_HOWTO (0x6f),
171 EMPTY_HOWTO (0x70),
172 EMPTY_HOWTO (0x71),
173 EMPTY_HOWTO (0x72),
174 EMPTY_HOWTO (0x73),
175 EMPTY_HOWTO (0x74),
176 EMPTY_HOWTO (0x75),
177 EMPTY_HOWTO (0x76),
178 EMPTY_HOWTO (0x77),
179
180 /* These are internal. */
181 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
182 /* ---- ---- 4--- 3210. */
183 #define R_RX_RH_ABS5p8B 0x78
184 RXREL (RH_ABS5p8B, 0, 0, 0, dont, FALSE),
185 #define R_RX_RH_ABS5p8W 0x79
186 RXREL (RH_ABS5p8W, 0, 0, 0, dont, FALSE),
187 #define R_RX_RH_ABS5p8L 0x7a
188 RXREL (RH_ABS5p8L, 0, 0, 0, dont, FALSE),
189 /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
190 /* ---- -432 1--- 0---. */
191 #define R_RX_RH_ABS5p5B 0x7b
192 RXREL (RH_ABS5p5B, 0, 0, 0, dont, FALSE),
193 #define R_RX_RH_ABS5p5W 0x7c
194 RXREL (RH_ABS5p5W, 0, 0, 0, dont, FALSE),
195 #define R_RX_RH_ABS5p5L 0x7d
196 RXREL (RH_ABS5p5L, 0, 0, 0, dont, FALSE),
197 /* A 4-bit unsigned immediate at bit position 8. */
198 #define R_RX_RH_UIMM4p8 0x7e
199 RXREL (RH_UIMM4p8, 0, 0, 0, dont, FALSE),
200 /* A 4-bit negative unsigned immediate at bit position 8. */
201 #define R_RX_RH_UNEG4p8 0x7f
202 RXREL (RH_UNEG4p8, 0, 0, 0, dont, FALSE),
203 /* End of internal relocs. */
204
205 RXREL (SYM, 2, 32, 0, dont, FALSE),
206 RXREL (OPneg, 2, 32, 0, dont, FALSE),
207 RXREL (OPadd, 2, 32, 0, dont, FALSE),
208 RXREL (OPsub, 2, 32, 0, dont, FALSE),
209 RXREL (OPmul, 2, 32, 0, dont, FALSE),
210 RXREL (OPdiv, 2, 32, 0, dont, FALSE),
211 RXREL (OPshla, 2, 32, 0, dont, FALSE),
212 RXREL (OPshra, 2, 32, 0, dont, FALSE),
213 RXREL (OPsctsize, 2, 32, 0, dont, FALSE),
214 RXREL (OPscttop, 2, 32, 0, dont, FALSE),
215 RXREL (OPand, 2, 32, 0, dont, FALSE),
216 RXREL (OPor, 2, 32, 0, dont, FALSE),
217 RXREL (OPxor, 2, 32, 0, dont, FALSE),
218 RXREL (OPnot, 2, 32, 0, dont, FALSE),
219 RXREL (OPmod, 2, 32, 0, dont, FALSE),
220 RXREL (OPromtop, 2, 32, 0, dont, FALSE),
221 RXREL (OPramtop, 2, 32, 0, dont, FALSE)
222 };
223 \f
224 /* Map BFD reloc types to RX ELF reloc types. */
225
226 struct rx_reloc_map
227 {
228 bfd_reloc_code_real_type bfd_reloc_val;
229 unsigned int rx_reloc_val;
230 };
231
232 static const struct rx_reloc_map rx_reloc_map [] =
233 {
234 { BFD_RELOC_NONE, R_RX_NONE },
235 { BFD_RELOC_8, R_RX_DIR8S },
236 { BFD_RELOC_16, R_RX_DIR16S },
237 { BFD_RELOC_24, R_RX_DIR24S },
238 { BFD_RELOC_32, R_RX_DIR32 },
239 { BFD_RELOC_RX_16_OP, R_RX_DIR16 },
240 { BFD_RELOC_RX_DIR3U_PCREL, R_RX_DIR3U_PCREL },
241 { BFD_RELOC_8_PCREL, R_RX_DIR8S_PCREL },
242 { BFD_RELOC_16_PCREL, R_RX_DIR16S_PCREL },
243 { BFD_RELOC_24_PCREL, R_RX_DIR24S_PCREL },
244 { BFD_RELOC_RX_8U, R_RX_DIR8U },
245 { BFD_RELOC_RX_16U, R_RX_DIR16U },
246 { BFD_RELOC_RX_24U, R_RX_RH_24_UNS },
247 { BFD_RELOC_RX_NEG8, R_RX_RH_8_NEG },
248 { BFD_RELOC_RX_NEG16, R_RX_RH_16_NEG },
249 { BFD_RELOC_RX_NEG24, R_RX_RH_24_NEG },
250 { BFD_RELOC_RX_NEG32, R_RX_RH_32_NEG },
251 { BFD_RELOC_RX_DIFF, R_RX_RH_DIFF },
252 { BFD_RELOC_RX_GPRELB, R_RX_RH_GPRELB },
253 { BFD_RELOC_RX_GPRELW, R_RX_RH_GPRELW },
254 { BFD_RELOC_RX_GPRELL, R_RX_RH_GPRELL },
255 { BFD_RELOC_RX_RELAX, R_RX_RH_RELAX },
256 { BFD_RELOC_RX_SYM, R_RX_SYM },
257 { BFD_RELOC_RX_OP_SUBTRACT, R_RX_OPsub },
258 { BFD_RELOC_RX_OP_NEG, R_RX_OPneg },
259 { BFD_RELOC_RX_ABS8, R_RX_ABS8 },
260 { BFD_RELOC_RX_ABS16, R_RX_ABS16 },
261 { BFD_RELOC_RX_ABS16_REV, R_RX_ABS16_REV },
262 { BFD_RELOC_RX_ABS32, R_RX_ABS32 },
263 { BFD_RELOC_RX_ABS32_REV, R_RX_ABS32_REV },
264 { BFD_RELOC_RX_ABS16UL, R_RX_ABS16UL },
265 { BFD_RELOC_RX_ABS16UW, R_RX_ABS16UW },
266 { BFD_RELOC_RX_ABS16U, R_RX_ABS16U }
267 };
268
269 #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
270
271 static reloc_howto_type *
272 rx_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
273 bfd_reloc_code_real_type code)
274 {
275 unsigned int i;
276
277 if (code == BFD_RELOC_RX_32_OP)
278 return rx_elf_howto_table + R_RX_DIR32;
279
280 for (i = ARRAY_SIZE (rx_reloc_map); --i;)
281 if (rx_reloc_map [i].bfd_reloc_val == code)
282 return rx_elf_howto_table + rx_reloc_map[i].rx_reloc_val;
283
284 return NULL;
285 }
286
287 static reloc_howto_type *
288 rx_reloc_name_lookup (bfd * abfd ATTRIBUTE_UNUSED, const char * r_name)
289 {
290 unsigned int i;
291
292 for (i = 0; i < ARRAY_SIZE (rx_elf_howto_table); i++)
293 if (rx_elf_howto_table[i].name != NULL
294 && strcasecmp (rx_elf_howto_table[i].name, r_name) == 0)
295 return rx_elf_howto_table + i;
296
297 return NULL;
298 }
299
300 /* Set the howto pointer for an RX ELF reloc. */
301
302 static void
303 rx_info_to_howto_rela (bfd * abfd ATTRIBUTE_UNUSED,
304 arelent * cache_ptr,
305 Elf_Internal_Rela * dst)
306 {
307 unsigned int r_type;
308
309 r_type = ELF32_R_TYPE (dst->r_info);
310 BFD_ASSERT (r_type < (unsigned int) R_RX_max);
311 cache_ptr->howto = rx_elf_howto_table + r_type;
312 }
313 \f
314 static bfd_vma
315 get_symbol_value (const char * name,
316 bfd_reloc_status_type * status,
317 struct bfd_link_info * info,
318 bfd * input_bfd,
319 asection * input_section,
320 int offset)
321 {
322 bfd_vma value = 0;
323 struct bfd_link_hash_entry * h;
324
325 h = bfd_link_hash_lookup (info->hash, name, FALSE, FALSE, TRUE);
326
327 if (h == NULL
328 || (h->type != bfd_link_hash_defined
329 && h->type != bfd_link_hash_defweak))
330 * status = info->callbacks->undefined_symbol
331 (info, name, input_bfd, input_section, offset, TRUE);
332 else
333 value = (h->u.def.value
334 + h->u.def.section->output_section->vma
335 + h->u.def.section->output_offset);
336
337 return value;
338 }
339
340 static bfd_vma
341 get_gp (bfd_reloc_status_type * status,
342 struct bfd_link_info * info,
343 bfd * abfd,
344 asection * sec,
345 int offset)
346 {
347 static bfd_boolean cached = FALSE;
348 static bfd_vma cached_value = 0;
349
350 if (!cached)
351 {
352 cached_value = get_symbol_value ("__gp", status, info, abfd, sec, offset);
353 cached = TRUE;
354 }
355 return cached_value;
356 }
357
358 static bfd_vma
359 get_romstart (bfd_reloc_status_type * status,
360 struct bfd_link_info * info,
361 bfd * abfd,
362 asection * sec,
363 int offset)
364 {
365 static bfd_boolean cached = FALSE;
366 static bfd_vma cached_value = 0;
367
368 if (!cached)
369 {
370 cached_value = get_symbol_value ("_start", status, info, abfd, sec, offset);
371 cached = TRUE;
372 }
373 return cached_value;
374 }
375
376 static bfd_vma
377 get_ramstart (bfd_reloc_status_type * status,
378 struct bfd_link_info * info,
379 bfd * abfd,
380 asection * sec,
381 int offset)
382 {
383 static bfd_boolean cached = FALSE;
384 static bfd_vma cached_value = 0;
385
386 if (!cached)
387 {
388 cached_value = get_symbol_value ("__datastart", status, info, abfd, sec, offset);
389 cached = TRUE;
390 }
391 return cached_value;
392 }
393
394 #define NUM_STACK_ENTRIES 16
395 static int32_t rx_stack [ NUM_STACK_ENTRIES ];
396 static unsigned int rx_stack_top;
397
398 #define RX_STACK_PUSH(val) \
399 do \
400 { \
401 if (rx_stack_top < NUM_STACK_ENTRIES) \
402 rx_stack [rx_stack_top ++] = (val); \
403 else \
404 r = bfd_reloc_dangerous; \
405 } \
406 while (0)
407
408 #define RX_STACK_POP(dest) \
409 do \
410 { \
411 if (rx_stack_top > 0) \
412 (dest) = rx_stack [-- rx_stack_top]; \
413 else \
414 (dest) = 0, r = bfd_reloc_dangerous; \
415 } \
416 while (0)
417
418 /* Relocate an RX ELF section.
419 There is some attempt to make this function usable for many architectures,
420 both USE_REL and USE_RELA ['twould be nice if such a critter existed],
421 if only to serve as a learning tool.
422
423 The RELOCATE_SECTION function is called by the new ELF backend linker
424 to handle the relocations for a section.
425
426 The relocs are always passed as Rela structures; if the section
427 actually uses Rel structures, the r_addend field will always be
428 zero.
429
430 This function is responsible for adjusting the section contents as
431 necessary, and (if using Rela relocs and generating a relocatable
432 output file) adjusting the reloc addend as necessary.
433
434 This function does not have to worry about setting the reloc
435 address or the reloc symbol index.
436
437 LOCAL_SYMS is a pointer to the swapped in local symbols.
438
439 LOCAL_SECTIONS is an array giving the section in the input file
440 corresponding to the st_shndx field of each local symbol.
441
442 The global hash table entry for the global symbols can be found
443 via elf_sym_hashes (input_bfd).
444
445 When generating relocatable output, this function must handle
446 STB_LOCAL/STT_SECTION symbols specially. The output symbol is
447 going to be the section symbol corresponding to the output
448 section, which means that the addend must be adjusted
449 accordingly. */
450
451 static bfd_boolean
452 rx_elf_relocate_section
453 (bfd * output_bfd,
454 struct bfd_link_info * info,
455 bfd * input_bfd,
456 asection * input_section,
457 bfd_byte * contents,
458 Elf_Internal_Rela * relocs,
459 Elf_Internal_Sym * local_syms,
460 asection ** local_sections)
461 {
462 Elf_Internal_Shdr * symtab_hdr;
463 struct elf_link_hash_entry ** sym_hashes;
464 Elf_Internal_Rela * rel;
465 Elf_Internal_Rela * relend;
466 bfd_boolean pid_mode;
467 bfd_boolean saw_subtract = FALSE;
468
469 if (elf_elfheader (output_bfd)->e_flags & E_FLAG_RX_PID)
470 pid_mode = TRUE;
471 else
472 pid_mode = FALSE;
473
474 symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
475 sym_hashes = elf_sym_hashes (input_bfd);
476 relend = relocs + input_section->reloc_count;
477 for (rel = relocs; rel < relend; rel ++)
478 {
479 reloc_howto_type * howto;
480 unsigned long r_symndx;
481 Elf_Internal_Sym * sym;
482 asection * sec;
483 struct elf_link_hash_entry * h;
484 bfd_vma relocation;
485 bfd_reloc_status_type r;
486 const char * name = NULL;
487 bfd_boolean unresolved_reloc = TRUE;
488 int r_type;
489
490 r_type = ELF32_R_TYPE (rel->r_info);
491 r_symndx = ELF32_R_SYM (rel->r_info);
492
493 howto = rx_elf_howto_table + ELF32_R_TYPE (rel->r_info);
494 h = NULL;
495 sym = NULL;
496 sec = NULL;
497 relocation = 0;
498
499 if (rx_stack_top == 0)
500 saw_subtract = FALSE;
501
502 if (r_symndx < symtab_hdr->sh_info)
503 {
504 sym = local_syms + r_symndx;
505 sec = local_sections [r_symndx];
506 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, & sec, rel);
507
508 name = bfd_elf_string_from_elf_section
509 (input_bfd, symtab_hdr->sh_link, sym->st_name);
510 name = (sym->st_name == 0) ? bfd_section_name (input_bfd, sec) : name;
511 }
512 else
513 {
514 bfd_boolean warned;
515
516 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
517 r_symndx, symtab_hdr, sym_hashes, h,
518 sec, relocation, unresolved_reloc,
519 warned);
520
521 name = h->root.root.string;
522 }
523
524 if (sec != NULL && discarded_section (sec))
525 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
526 rel, 1, relend, howto, 0, contents);
527
528 if (info->relocatable)
529 {
530 /* This is a relocatable link. We don't have to change
531 anything, unless the reloc is against a section symbol,
532 in which case we have to adjust according to where the
533 section symbol winds up in the output section. */
534 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
535 rel->r_addend += sec->output_offset;
536 continue;
537 }
538
539 if (h != NULL && h->root.type == bfd_link_hash_undefweak)
540 /* If the symbol is undefined and weak
541 then the relocation resolves to zero. */
542 relocation = 0;
543 else
544 {
545 if (howto->pc_relative)
546 {
547 relocation -= (input_section->output_section->vma
548 + input_section->output_offset
549 + rel->r_offset);
550 if (r_type != R_RX_RH_3_PCREL
551 && r_type != R_RX_DIR3U_PCREL)
552 relocation ++;
553 }
554
555 relocation += rel->r_addend;
556 }
557
558 r = bfd_reloc_ok;
559
560 #define RANGE(a,b) if (a > (long) relocation || (long) relocation > b) r = bfd_reloc_overflow
561 #define ALIGN(m) if (relocation & m) r = bfd_reloc_other;
562 #define OP(i) (contents[rel->r_offset + (i)])
563 #define WARN_REDHAT(type) \
564 _bfd_error_handler (_("%B:%A: Warning: deprecated Red Hat reloc " type " detected against: %s."), \
565 input_bfd, input_section, name)
566
567 /* Check for unsafe relocs in PID mode. These are any relocs where
568 an absolute address is being computed. There are special cases
569 for relocs against symbols that are known to be referenced in
570 crt0.o before the PID base address register has been initialised. */
571 #define UNSAFE_FOR_PID \
572 do \
573 { \
574 if (pid_mode \
575 && sec != NULL \
576 && sec->flags & SEC_READONLY \
577 && !(input_section->flags & SEC_DEBUGGING) \
578 && strcmp (name, "__pid_base") != 0 \
579 && strcmp (name, "__gp") != 0 \
580 && strcmp (name, "__romdatastart") != 0 \
581 && !saw_subtract) \
582 _bfd_error_handler (_("%B(%A): unsafe PID relocation %s at 0x%08lx (against %s in %s)"), \
583 input_bfd, input_section, howto->name, \
584 input_section->output_section->vma + input_section->output_offset + rel->r_offset, \
585 name, sec->name); \
586 } \
587 while (0)
588
589 /* Opcode relocs are always big endian. Data relocs are bi-endian. */
590 switch (r_type)
591 {
592 case R_RX_NONE:
593 break;
594
595 case R_RX_RH_RELAX:
596 break;
597
598 case R_RX_RH_3_PCREL:
599 WARN_REDHAT ("RX_RH_3_PCREL");
600 RANGE (3, 10);
601 OP (0) &= 0xf8;
602 OP (0) |= relocation & 0x07;
603 break;
604
605 case R_RX_RH_8_NEG:
606 WARN_REDHAT ("RX_RH_8_NEG");
607 relocation = - relocation;
608 case R_RX_DIR8S_PCREL:
609 UNSAFE_FOR_PID;
610 RANGE (-128, 127);
611 OP (0) = relocation;
612 break;
613
614 case R_RX_DIR8S:
615 UNSAFE_FOR_PID;
616 RANGE (-128, 255);
617 OP (0) = relocation;
618 break;
619
620 case R_RX_DIR8U:
621 UNSAFE_FOR_PID;
622 RANGE (0, 255);
623 OP (0) = relocation;
624 break;
625
626 case R_RX_RH_16_NEG:
627 WARN_REDHAT ("RX_RH_16_NEG");
628 relocation = - relocation;
629 case R_RX_DIR16S_PCREL:
630 UNSAFE_FOR_PID;
631 RANGE (-32768, 32767);
632 #if RX_OPCODE_BIG_ENDIAN
633 #else
634 OP (0) = relocation;
635 OP (1) = relocation >> 8;
636 #endif
637 break;
638
639 case R_RX_RH_16_OP:
640 WARN_REDHAT ("RX_RH_16_OP");
641 UNSAFE_FOR_PID;
642 RANGE (-32768, 32767);
643 #if RX_OPCODE_BIG_ENDIAN
644 OP (1) = relocation;
645 OP (0) = relocation >> 8;
646 #else
647 OP (0) = relocation;
648 OP (1) = relocation >> 8;
649 #endif
650 break;
651
652 case R_RX_DIR16S:
653 UNSAFE_FOR_PID;
654 RANGE (-32768, 65535);
655 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
656 {
657 OP (1) = relocation;
658 OP (0) = relocation >> 8;
659 }
660 else
661 {
662 OP (0) = relocation;
663 OP (1) = relocation >> 8;
664 }
665 break;
666
667 case R_RX_DIR16U:
668 UNSAFE_FOR_PID;
669 RANGE (0, 65536);
670 #if RX_OPCODE_BIG_ENDIAN
671 OP (1) = relocation;
672 OP (0) = relocation >> 8;
673 #else
674 OP (0) = relocation;
675 OP (1) = relocation >> 8;
676 #endif
677 break;
678
679 case R_RX_DIR16:
680 UNSAFE_FOR_PID;
681 RANGE (-32768, 65536);
682 #if RX_OPCODE_BIG_ENDIAN
683 OP (1) = relocation;
684 OP (0) = relocation >> 8;
685 #else
686 OP (0) = relocation;
687 OP (1) = relocation >> 8;
688 #endif
689 break;
690
691 case R_RX_DIR16_REV:
692 UNSAFE_FOR_PID;
693 RANGE (-32768, 65536);
694 #if RX_OPCODE_BIG_ENDIAN
695 OP (0) = relocation;
696 OP (1) = relocation >> 8;
697 #else
698 OP (1) = relocation;
699 OP (0) = relocation >> 8;
700 #endif
701 break;
702
703 case R_RX_DIR3U_PCREL:
704 RANGE (3, 10);
705 OP (0) &= 0xf8;
706 OP (0) |= relocation & 0x07;
707 break;
708
709 case R_RX_RH_24_NEG:
710 UNSAFE_FOR_PID;
711 WARN_REDHAT ("RX_RH_24_NEG");
712 relocation = - relocation;
713 case R_RX_DIR24S_PCREL:
714 RANGE (-0x800000, 0x7fffff);
715 #if RX_OPCODE_BIG_ENDIAN
716 OP (2) = relocation;
717 OP (1) = relocation >> 8;
718 OP (0) = relocation >> 16;
719 #else
720 OP (0) = relocation;
721 OP (1) = relocation >> 8;
722 OP (2) = relocation >> 16;
723 #endif
724 break;
725
726 case R_RX_RH_24_OP:
727 UNSAFE_FOR_PID;
728 WARN_REDHAT ("RX_RH_24_OP");
729 RANGE (-0x800000, 0x7fffff);
730 #if RX_OPCODE_BIG_ENDIAN
731 OP (2) = relocation;
732 OP (1) = relocation >> 8;
733 OP (0) = relocation >> 16;
734 #else
735 OP (0) = relocation;
736 OP (1) = relocation >> 8;
737 OP (2) = relocation >> 16;
738 #endif
739 break;
740
741 case R_RX_DIR24S:
742 UNSAFE_FOR_PID;
743 RANGE (-0x800000, 0x7fffff);
744 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
745 {
746 OP (2) = relocation;
747 OP (1) = relocation >> 8;
748 OP (0) = relocation >> 16;
749 }
750 else
751 {
752 OP (0) = relocation;
753 OP (1) = relocation >> 8;
754 OP (2) = relocation >> 16;
755 }
756 break;
757
758 case R_RX_RH_24_UNS:
759 UNSAFE_FOR_PID;
760 WARN_REDHAT ("RX_RH_24_UNS");
761 RANGE (0, 0xffffff);
762 #if RX_OPCODE_BIG_ENDIAN
763 OP (2) = relocation;
764 OP (1) = relocation >> 8;
765 OP (0) = relocation >> 16;
766 #else
767 OP (0) = relocation;
768 OP (1) = relocation >> 8;
769 OP (2) = relocation >> 16;
770 #endif
771 break;
772
773 case R_RX_RH_32_NEG:
774 UNSAFE_FOR_PID;
775 WARN_REDHAT ("RX_RH_32_NEG");
776 relocation = - relocation;
777 #if RX_OPCODE_BIG_ENDIAN
778 OP (3) = relocation;
779 OP (2) = relocation >> 8;
780 OP (1) = relocation >> 16;
781 OP (0) = relocation >> 24;
782 #else
783 OP (0) = relocation;
784 OP (1) = relocation >> 8;
785 OP (2) = relocation >> 16;
786 OP (3) = relocation >> 24;
787 #endif
788 break;
789
790 case R_RX_RH_32_OP:
791 UNSAFE_FOR_PID;
792 WARN_REDHAT ("RX_RH_32_OP");
793 #if RX_OPCODE_BIG_ENDIAN
794 OP (3) = relocation;
795 OP (2) = relocation >> 8;
796 OP (1) = relocation >> 16;
797 OP (0) = relocation >> 24;
798 #else
799 OP (0) = relocation;
800 OP (1) = relocation >> 8;
801 OP (2) = relocation >> 16;
802 OP (3) = relocation >> 24;
803 #endif
804 break;
805
806 case R_RX_DIR32:
807 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
808 {
809 OP (3) = relocation;
810 OP (2) = relocation >> 8;
811 OP (1) = relocation >> 16;
812 OP (0) = relocation >> 24;
813 }
814 else
815 {
816 OP (0) = relocation;
817 OP (1) = relocation >> 8;
818 OP (2) = relocation >> 16;
819 OP (3) = relocation >> 24;
820 }
821 break;
822
823 case R_RX_DIR32_REV:
824 if (BIGE (output_bfd))
825 {
826 OP (0) = relocation;
827 OP (1) = relocation >> 8;
828 OP (2) = relocation >> 16;
829 OP (3) = relocation >> 24;
830 }
831 else
832 {
833 OP (3) = relocation;
834 OP (2) = relocation >> 8;
835 OP (1) = relocation >> 16;
836 OP (0) = relocation >> 24;
837 }
838 break;
839
840 case R_RX_RH_DIFF:
841 {
842 bfd_vma val;
843 WARN_REDHAT ("RX_RH_DIFF");
844 val = bfd_get_32 (output_bfd, & OP (0));
845 val -= relocation;
846 bfd_put_32 (output_bfd, val, & OP (0));
847 }
848 break;
849
850 case R_RX_RH_GPRELB:
851 WARN_REDHAT ("RX_RH_GPRELB");
852 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
853 RANGE (0, 65535);
854 #if RX_OPCODE_BIG_ENDIAN
855 OP (1) = relocation;
856 OP (0) = relocation >> 8;
857 #else
858 OP (0) = relocation;
859 OP (1) = relocation >> 8;
860 #endif
861 break;
862
863 case R_RX_RH_GPRELW:
864 WARN_REDHAT ("RX_RH_GPRELW");
865 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
866 ALIGN (1);
867 relocation >>= 1;
868 RANGE (0, 65535);
869 #if RX_OPCODE_BIG_ENDIAN
870 OP (1) = relocation;
871 OP (0) = relocation >> 8;
872 #else
873 OP (0) = relocation;
874 OP (1) = relocation >> 8;
875 #endif
876 break;
877
878 case R_RX_RH_GPRELL:
879 WARN_REDHAT ("RX_RH_GPRELL");
880 relocation -= get_gp (&r, info, input_bfd, input_section, rel->r_offset);
881 ALIGN (3);
882 relocation >>= 2;
883 RANGE (0, 65535);
884 #if RX_OPCODE_BIG_ENDIAN
885 OP (1) = relocation;
886 OP (0) = relocation >> 8;
887 #else
888 OP (0) = relocation;
889 OP (1) = relocation >> 8;
890 #endif
891 break;
892
893 /* Internal relocations just for relaxation: */
894 case R_RX_RH_ABS5p5B:
895 RX_STACK_POP (relocation);
896 RANGE (0, 31);
897 OP (0) &= 0xf8;
898 OP (0) |= relocation >> 2;
899 OP (1) &= 0x77;
900 OP (1) |= (relocation << 6) & 0x80;
901 OP (1) |= (relocation << 3) & 0x08;
902 break;
903
904 case R_RX_RH_ABS5p5W:
905 RX_STACK_POP (relocation);
906 RANGE (0, 62);
907 ALIGN (1);
908 relocation >>= 1;
909 OP (0) &= 0xf8;
910 OP (0) |= relocation >> 2;
911 OP (1) &= 0x77;
912 OP (1) |= (relocation << 6) & 0x80;
913 OP (1) |= (relocation << 3) & 0x08;
914 break;
915
916 case R_RX_RH_ABS5p5L:
917 RX_STACK_POP (relocation);
918 RANGE (0, 124);
919 ALIGN (3);
920 relocation >>= 2;
921 OP (0) &= 0xf8;
922 OP (0) |= relocation >> 2;
923 OP (1) &= 0x77;
924 OP (1) |= (relocation << 6) & 0x80;
925 OP (1) |= (relocation << 3) & 0x08;
926 break;
927
928 case R_RX_RH_ABS5p8B:
929 RX_STACK_POP (relocation);
930 RANGE (0, 31);
931 OP (0) &= 0x70;
932 OP (0) |= (relocation << 3) & 0x80;
933 OP (0) |= relocation & 0x0f;
934 break;
935
936 case R_RX_RH_ABS5p8W:
937 RX_STACK_POP (relocation);
938 RANGE (0, 62);
939 ALIGN (1);
940 relocation >>= 1;
941 OP (0) &= 0x70;
942 OP (0) |= (relocation << 3) & 0x80;
943 OP (0) |= relocation & 0x0f;
944 break;
945
946 case R_RX_RH_ABS5p8L:
947 RX_STACK_POP (relocation);
948 RANGE (0, 124);
949 ALIGN (3);
950 relocation >>= 2;
951 OP (0) &= 0x70;
952 OP (0) |= (relocation << 3) & 0x80;
953 OP (0) |= relocation & 0x0f;
954 break;
955
956 case R_RX_RH_UIMM4p8:
957 RANGE (0, 15);
958 OP (0) &= 0x0f;
959 OP (0) |= relocation << 4;
960 break;
961
962 case R_RX_RH_UNEG4p8:
963 RANGE (-15, 0);
964 OP (0) &= 0x0f;
965 OP (0) |= (-relocation) << 4;
966 break;
967
968 /* Complex reloc handling: */
969
970 case R_RX_ABS32:
971 UNSAFE_FOR_PID;
972 RX_STACK_POP (relocation);
973 #if RX_OPCODE_BIG_ENDIAN
974 OP (3) = relocation;
975 OP (2) = relocation >> 8;
976 OP (1) = relocation >> 16;
977 OP (0) = relocation >> 24;
978 #else
979 OP (0) = relocation;
980 OP (1) = relocation >> 8;
981 OP (2) = relocation >> 16;
982 OP (3) = relocation >> 24;
983 #endif
984 break;
985
986 case R_RX_ABS32_REV:
987 UNSAFE_FOR_PID;
988 RX_STACK_POP (relocation);
989 #if RX_OPCODE_BIG_ENDIAN
990 OP (0) = relocation;
991 OP (1) = relocation >> 8;
992 OP (2) = relocation >> 16;
993 OP (3) = relocation >> 24;
994 #else
995 OP (3) = relocation;
996 OP (2) = relocation >> 8;
997 OP (1) = relocation >> 16;
998 OP (0) = relocation >> 24;
999 #endif
1000 break;
1001
1002 case R_RX_ABS24S_PCREL:
1003 case R_RX_ABS24S:
1004 UNSAFE_FOR_PID;
1005 RX_STACK_POP (relocation);
1006 RANGE (-0x800000, 0x7fffff);
1007 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
1008 {
1009 OP (2) = relocation;
1010 OP (1) = relocation >> 8;
1011 OP (0) = relocation >> 16;
1012 }
1013 else
1014 {
1015 OP (0) = relocation;
1016 OP (1) = relocation >> 8;
1017 OP (2) = relocation >> 16;
1018 }
1019 break;
1020
1021 case R_RX_ABS16:
1022 UNSAFE_FOR_PID;
1023 RX_STACK_POP (relocation);
1024 RANGE (-32768, 65535);
1025 #if RX_OPCODE_BIG_ENDIAN
1026 OP (1) = relocation;
1027 OP (0) = relocation >> 8;
1028 #else
1029 OP (0) = relocation;
1030 OP (1) = relocation >> 8;
1031 #endif
1032 break;
1033
1034 case R_RX_ABS16_REV:
1035 UNSAFE_FOR_PID;
1036 RX_STACK_POP (relocation);
1037 RANGE (-32768, 65535);
1038 #if RX_OPCODE_BIG_ENDIAN
1039 OP (0) = relocation;
1040 OP (1) = relocation >> 8;
1041 #else
1042 OP (1) = relocation;
1043 OP (0) = relocation >> 8;
1044 #endif
1045 break;
1046
1047 case R_RX_ABS16S_PCREL:
1048 case R_RX_ABS16S:
1049 RX_STACK_POP (relocation);
1050 RANGE (-32768, 32767);
1051 if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
1052 {
1053 OP (1) = relocation;
1054 OP (0) = relocation >> 8;
1055 }
1056 else
1057 {
1058 OP (0) = relocation;
1059 OP (1) = relocation >> 8;
1060 }
1061 break;
1062
1063 case R_RX_ABS16U:
1064 UNSAFE_FOR_PID;
1065 RX_STACK_POP (relocation);
1066 RANGE (0, 65536);
1067 #if RX_OPCODE_BIG_ENDIAN
1068 OP (1) = relocation;
1069 OP (0) = relocation >> 8;
1070 #else
1071 OP (0) = relocation;
1072 OP (1) = relocation >> 8;
1073 #endif
1074 break;
1075
1076 case R_RX_ABS16UL:
1077 UNSAFE_FOR_PID;
1078 RX_STACK_POP (relocation);
1079 relocation >>= 2;
1080 RANGE (0, 65536);
1081 #if RX_OPCODE_BIG_ENDIAN
1082 OP (1) = relocation;
1083 OP (0) = relocation >> 8;
1084 #else
1085 OP (0) = relocation;
1086 OP (1) = relocation >> 8;
1087 #endif
1088 break;
1089
1090 case R_RX_ABS16UW:
1091 UNSAFE_FOR_PID;
1092 RX_STACK_POP (relocation);
1093 relocation >>= 1;
1094 RANGE (0, 65536);
1095 #if RX_OPCODE_BIG_ENDIAN
1096 OP (1) = relocation;
1097 OP (0) = relocation >> 8;
1098 #else
1099 OP (0) = relocation;
1100 OP (1) = relocation >> 8;
1101 #endif
1102 break;
1103
1104 case R_RX_ABS8:
1105 UNSAFE_FOR_PID;
1106 RX_STACK_POP (relocation);
1107 RANGE (-128, 255);
1108 OP (0) = relocation;
1109 break;
1110
1111 case R_RX_ABS8U:
1112 UNSAFE_FOR_PID;
1113 RX_STACK_POP (relocation);
1114 RANGE (0, 255);
1115 OP (0) = relocation;
1116 break;
1117
1118 case R_RX_ABS8UL:
1119 UNSAFE_FOR_PID;
1120 RX_STACK_POP (relocation);
1121 relocation >>= 2;
1122 RANGE (0, 255);
1123 OP (0) = relocation;
1124 break;
1125
1126 case R_RX_ABS8UW:
1127 UNSAFE_FOR_PID;
1128 RX_STACK_POP (relocation);
1129 relocation >>= 1;
1130 RANGE (0, 255);
1131 OP (0) = relocation;
1132 break;
1133
1134 case R_RX_ABS8S:
1135 UNSAFE_FOR_PID;
1136 case R_RX_ABS8S_PCREL:
1137 RX_STACK_POP (relocation);
1138 RANGE (-128, 127);
1139 OP (0) = relocation;
1140 break;
1141
1142 case R_RX_SYM:
1143 if (r_symndx < symtab_hdr->sh_info)
1144 RX_STACK_PUSH (sec->output_section->vma
1145 + sec->output_offset
1146 + sym->st_value
1147 + rel->r_addend);
1148 else
1149 {
1150 if (h != NULL
1151 && (h->root.type == bfd_link_hash_defined
1152 || h->root.type == bfd_link_hash_defweak))
1153 RX_STACK_PUSH (h->root.u.def.value
1154 + sec->output_section->vma
1155 + sec->output_offset
1156 + rel->r_addend);
1157 else
1158 _bfd_error_handler (_("Warning: RX_SYM reloc with an unknown symbol"));
1159 }
1160 break;
1161
1162 case R_RX_OPneg:
1163 {
1164 int32_t tmp;
1165
1166 saw_subtract = TRUE;
1167 RX_STACK_POP (tmp);
1168 tmp = - tmp;
1169 RX_STACK_PUSH (tmp);
1170 }
1171 break;
1172
1173 case R_RX_OPadd:
1174 {
1175 int32_t tmp1, tmp2;
1176
1177 RX_STACK_POP (tmp1);
1178 RX_STACK_POP (tmp2);
1179 tmp1 += tmp2;
1180 RX_STACK_PUSH (tmp1);
1181 }
1182 break;
1183
1184 case R_RX_OPsub:
1185 {
1186 int32_t tmp1, tmp2;
1187
1188 saw_subtract = TRUE;
1189 RX_STACK_POP (tmp1);
1190 RX_STACK_POP (tmp2);
1191 tmp2 -= tmp1;
1192 RX_STACK_PUSH (tmp2);
1193 }
1194 break;
1195
1196 case R_RX_OPmul:
1197 {
1198 int32_t tmp1, tmp2;
1199
1200 RX_STACK_POP (tmp1);
1201 RX_STACK_POP (tmp2);
1202 tmp1 *= tmp2;
1203 RX_STACK_PUSH (tmp1);
1204 }
1205 break;
1206
1207 case R_RX_OPdiv:
1208 {
1209 int32_t tmp1, tmp2;
1210
1211 RX_STACK_POP (tmp1);
1212 RX_STACK_POP (tmp2);
1213 tmp1 /= tmp2;
1214 RX_STACK_PUSH (tmp1);
1215 }
1216 break;
1217
1218 case R_RX_OPshla:
1219 {
1220 int32_t tmp1, tmp2;
1221
1222 RX_STACK_POP (tmp1);
1223 RX_STACK_POP (tmp2);
1224 tmp1 <<= tmp2;
1225 RX_STACK_PUSH (tmp1);
1226 }
1227 break;
1228
1229 case R_RX_OPshra:
1230 {
1231 int32_t tmp1, tmp2;
1232
1233 RX_STACK_POP (tmp1);
1234 RX_STACK_POP (tmp2);
1235 tmp1 >>= tmp2;
1236 RX_STACK_PUSH (tmp1);
1237 }
1238 break;
1239
1240 case R_RX_OPsctsize:
1241 RX_STACK_PUSH (input_section->size);
1242 break;
1243
1244 case R_RX_OPscttop:
1245 RX_STACK_PUSH (input_section->output_section->vma);
1246 break;
1247
1248 case R_RX_OPand:
1249 {
1250 int32_t tmp1, tmp2;
1251
1252 RX_STACK_POP (tmp1);
1253 RX_STACK_POP (tmp2);
1254 tmp1 &= tmp2;
1255 RX_STACK_PUSH (tmp1);
1256 }
1257 break;
1258
1259 case R_RX_OPor:
1260 {
1261 int32_t tmp1, tmp2;
1262
1263 RX_STACK_POP (tmp1);
1264 RX_STACK_POP (tmp2);
1265 tmp1 |= tmp2;
1266 RX_STACK_PUSH (tmp1);
1267 }
1268 break;
1269
1270 case R_RX_OPxor:
1271 {
1272 int32_t tmp1, tmp2;
1273
1274 RX_STACK_POP (tmp1);
1275 RX_STACK_POP (tmp2);
1276 tmp1 ^= tmp2;
1277 RX_STACK_PUSH (tmp1);
1278 }
1279 break;
1280
1281 case R_RX_OPnot:
1282 {
1283 int32_t tmp;
1284
1285 RX_STACK_POP (tmp);
1286 tmp = ~ tmp;
1287 RX_STACK_PUSH (tmp);
1288 }
1289 break;
1290
1291 case R_RX_OPmod:
1292 {
1293 int32_t tmp1, tmp2;
1294
1295 RX_STACK_POP (tmp1);
1296 RX_STACK_POP (tmp2);
1297 tmp1 %= tmp2;
1298 RX_STACK_PUSH (tmp1);
1299 }
1300 break;
1301
1302 case R_RX_OPromtop:
1303 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1304 break;
1305
1306 case R_RX_OPramtop:
1307 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1308 break;
1309
1310 default:
1311 r = bfd_reloc_notsupported;
1312 break;
1313 }
1314
1315 if (r != bfd_reloc_ok)
1316 {
1317 const char * msg = NULL;
1318
1319 switch (r)
1320 {
1321 case bfd_reloc_overflow:
1322 /* Catch the case of a missing function declaration
1323 and emit a more helpful error message. */
1324 if (r_type == R_RX_DIR24S_PCREL)
1325 msg = _("%B(%A): error: call to undefined function '%s'");
1326 else
1327 r = info->callbacks->reloc_overflow
1328 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
1329 input_bfd, input_section, rel->r_offset);
1330 break;
1331
1332 case bfd_reloc_undefined:
1333 r = info->callbacks->undefined_symbol
1334 (info, name, input_bfd, input_section, rel->r_offset,
1335 TRUE);
1336 break;
1337
1338 case bfd_reloc_other:
1339 msg = _("%B(%A): warning: unaligned access to symbol '%s' in the small data area");
1340 break;
1341
1342 case bfd_reloc_outofrange:
1343 msg = _("%B(%A): internal error: out of range error");
1344 break;
1345
1346 case bfd_reloc_notsupported:
1347 msg = _("%B(%A): internal error: unsupported relocation error");
1348 break;
1349
1350 case bfd_reloc_dangerous:
1351 msg = _("%B(%A): internal error: dangerous relocation");
1352 break;
1353
1354 default:
1355 msg = _("%B(%A): internal error: unknown error");
1356 break;
1357 }
1358
1359 if (msg)
1360 _bfd_error_handler (msg, input_bfd, input_section, name);
1361
1362 if (! r)
1363 return FALSE;
1364 }
1365 }
1366
1367 return TRUE;
1368 }
1369 \f
1370 /* Relaxation Support. */
1371
1372 /* Progression of relocations from largest operand size to smallest
1373 operand size. */
1374
1375 static int
1376 next_smaller_reloc (int r)
1377 {
1378 switch (r)
1379 {
1380 case R_RX_DIR32: return R_RX_DIR24S;
1381 case R_RX_DIR24S: return R_RX_DIR16S;
1382 case R_RX_DIR16S: return R_RX_DIR8S;
1383 case R_RX_DIR8S: return R_RX_NONE;
1384
1385 case R_RX_DIR16: return R_RX_DIR8;
1386 case R_RX_DIR8: return R_RX_NONE;
1387
1388 case R_RX_DIR16U: return R_RX_DIR8U;
1389 case R_RX_DIR8U: return R_RX_NONE;
1390
1391 case R_RX_DIR24S_PCREL: return R_RX_DIR16S_PCREL;
1392 case R_RX_DIR16S_PCREL: return R_RX_DIR8S_PCREL;
1393 case R_RX_DIR8S_PCREL: return R_RX_DIR3U_PCREL;
1394
1395 case R_RX_DIR16UL: return R_RX_DIR8UL;
1396 case R_RX_DIR8UL: return R_RX_NONE;
1397 case R_RX_DIR16UW: return R_RX_DIR8UW;
1398 case R_RX_DIR8UW: return R_RX_NONE;
1399
1400 case R_RX_RH_32_OP: return R_RX_RH_24_OP;
1401 case R_RX_RH_24_OP: return R_RX_RH_16_OP;
1402 case R_RX_RH_16_OP: return R_RX_DIR8;
1403
1404 case R_RX_ABS32: return R_RX_ABS24S;
1405 case R_RX_ABS24S: return R_RX_ABS16S;
1406 case R_RX_ABS16: return R_RX_ABS8;
1407 case R_RX_ABS16U: return R_RX_ABS8U;
1408 case R_RX_ABS16S: return R_RX_ABS8S;
1409 case R_RX_ABS8: return R_RX_NONE;
1410 case R_RX_ABS8U: return R_RX_NONE;
1411 case R_RX_ABS8S: return R_RX_NONE;
1412 case R_RX_ABS24S_PCREL: return R_RX_ABS16S_PCREL;
1413 case R_RX_ABS16S_PCREL: return R_RX_ABS8S_PCREL;
1414 case R_RX_ABS8S_PCREL: return R_RX_NONE;
1415 case R_RX_ABS16UL: return R_RX_ABS8UL;
1416 case R_RX_ABS16UW: return R_RX_ABS8UW;
1417 case R_RX_ABS8UL: return R_RX_NONE;
1418 case R_RX_ABS8UW: return R_RX_NONE;
1419 }
1420 return r;
1421 };
1422
1423 /* Delete some bytes from a section while relaxing. */
1424
1425 static bfd_boolean
1426 elf32_rx_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, int count,
1427 Elf_Internal_Rela *alignment_rel, int force_snip)
1428 {
1429 Elf_Internal_Shdr * symtab_hdr;
1430 unsigned int sec_shndx;
1431 bfd_byte * contents;
1432 Elf_Internal_Rela * irel;
1433 Elf_Internal_Rela * irelend;
1434 Elf_Internal_Sym * isym;
1435 Elf_Internal_Sym * isymend;
1436 bfd_vma toaddr;
1437 unsigned int symcount;
1438 struct elf_link_hash_entry ** sym_hashes;
1439 struct elf_link_hash_entry ** end_hashes;
1440
1441 if (!alignment_rel)
1442 force_snip = 1;
1443
1444 sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
1445
1446 contents = elf_section_data (sec)->this_hdr.contents;
1447
1448 /* The deletion must stop at the next alignment boundary, if
1449 ALIGNMENT_REL is non-NULL. */
1450 toaddr = sec->size;
1451 if (alignment_rel)
1452 toaddr = alignment_rel->r_offset;
1453
1454 irel = elf_section_data (sec)->relocs;
1455 irelend = irel + sec->reloc_count;
1456
1457 /* Actually delete the bytes. */
1458 memmove (contents + addr, contents + addr + count,
1459 (size_t) (toaddr - addr - count));
1460
1461 /* If we don't have an alignment marker to worry about, we can just
1462 shrink the section. Otherwise, we have to fill in the newly
1463 created gap with NOP insns (0x03). */
1464 if (force_snip)
1465 sec->size -= count;
1466 else
1467 memset (contents + toaddr - count, 0x03, count);
1468
1469 /* Adjust all the relocs. */
1470 for (irel = elf_section_data (sec)->relocs; irel < irelend; irel++)
1471 {
1472 /* Get the new reloc address. */
1473 if (irel->r_offset > addr
1474 && (irel->r_offset < toaddr
1475 || (force_snip && irel->r_offset == toaddr)))
1476 irel->r_offset -= count;
1477
1478 /* If we see an ALIGN marker at the end of the gap, we move it
1479 to the beginning of the gap, since marking these gaps is what
1480 they're for. */
1481 if (irel->r_offset == toaddr
1482 && ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1483 && irel->r_addend & RX_RELAXA_ALIGN)
1484 irel->r_offset -= count;
1485 }
1486
1487 /* Adjust the local symbols defined in this section. */
1488 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1489 isym = (Elf_Internal_Sym *) symtab_hdr->contents;
1490 isymend = isym + symtab_hdr->sh_info;
1491
1492 for (; isym < isymend; isym++)
1493 {
1494 /* If the symbol is in the range of memory we just moved, we
1495 have to adjust its value. */
1496 if (isym->st_shndx == sec_shndx
1497 && isym->st_value > addr
1498 && isym->st_value < toaddr)
1499 isym->st_value -= count;
1500
1501 /* If the symbol *spans* the bytes we just deleted (i.e. it's
1502 *end* is in the moved bytes but it's *start* isn't), then we
1503 must adjust its size. */
1504 if (isym->st_shndx == sec_shndx
1505 && isym->st_value < addr
1506 && isym->st_value + isym->st_size > addr
1507 && isym->st_value + isym->st_size < toaddr)
1508 isym->st_size -= count;
1509 }
1510
1511 /* Now adjust the global symbols defined in this section. */
1512 symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
1513 - symtab_hdr->sh_info);
1514 sym_hashes = elf_sym_hashes (abfd);
1515 end_hashes = sym_hashes + symcount;
1516
1517 for (; sym_hashes < end_hashes; sym_hashes++)
1518 {
1519 struct elf_link_hash_entry *sym_hash = *sym_hashes;
1520
1521 if ((sym_hash->root.type == bfd_link_hash_defined
1522 || sym_hash->root.type == bfd_link_hash_defweak)
1523 && sym_hash->root.u.def.section == sec)
1524 {
1525 /* As above, adjust the value if needed. */
1526 if (sym_hash->root.u.def.value > addr
1527 && sym_hash->root.u.def.value < toaddr)
1528 sym_hash->root.u.def.value -= count;
1529
1530 /* As above, adjust the size if needed. */
1531 if (sym_hash->root.u.def.value < addr
1532 && sym_hash->root.u.def.value + sym_hash->size > addr
1533 && sym_hash->root.u.def.value + sym_hash->size < toaddr)
1534 sym_hash->size -= count;
1535 }
1536 }
1537
1538 return TRUE;
1539 }
1540
1541 /* Used to sort relocs by address. If relocs have the same address,
1542 we maintain their relative order, except that R_RX_RH_RELAX
1543 alignment relocs must be the first reloc for any given address. */
1544
1545 static void
1546 reloc_bubblesort (Elf_Internal_Rela * r, int count)
1547 {
1548 int i;
1549 bfd_boolean again;
1550 bfd_boolean swappit;
1551
1552 /* This is almost a classic bubblesort. It's the slowest sort, but
1553 we're taking advantage of the fact that the relocations are
1554 mostly in order already (the assembler emits them that way) and
1555 we need relocs with the same address to remain in the same
1556 relative order. */
1557 again = TRUE;
1558 while (again)
1559 {
1560 again = FALSE;
1561 for (i = 0; i < count - 1; i ++)
1562 {
1563 if (r[i].r_offset > r[i + 1].r_offset)
1564 swappit = TRUE;
1565 else if (r[i].r_offset < r[i + 1].r_offset)
1566 swappit = FALSE;
1567 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1568 && (r[i + 1].r_addend & RX_RELAXA_ALIGN))
1569 swappit = TRUE;
1570 else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
1571 && (r[i + 1].r_addend & RX_RELAXA_ELIGN)
1572 && !(ELF32_R_TYPE (r[i].r_info) == R_RX_RH_RELAX
1573 && (r[i].r_addend & RX_RELAXA_ALIGN)))
1574 swappit = TRUE;
1575 else
1576 swappit = FALSE;
1577
1578 if (swappit)
1579 {
1580 Elf_Internal_Rela tmp;
1581
1582 tmp = r[i];
1583 r[i] = r[i + 1];
1584 r[i + 1] = tmp;
1585 /* If we do move a reloc back, re-scan to see if it
1586 needs to be moved even further back. This avoids
1587 most of the O(n^2) behavior for our cases. */
1588 if (i > 0)
1589 i -= 2;
1590 again = TRUE;
1591 }
1592 }
1593 }
1594 }
1595
1596
1597 #define OFFSET_FOR_RELOC(rel, lrel, scale) \
1598 rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
1599 lrel, abfd, sec, link_info, scale)
1600
1601 static bfd_vma
1602 rx_offset_for_reloc (bfd * abfd,
1603 Elf_Internal_Rela * rel,
1604 Elf_Internal_Shdr * symtab_hdr,
1605 Elf_External_Sym_Shndx * shndx_buf ATTRIBUTE_UNUSED,
1606 Elf_Internal_Sym * intsyms,
1607 Elf_Internal_Rela ** lrel,
1608 bfd * input_bfd,
1609 asection * input_section,
1610 struct bfd_link_info * info,
1611 int * scale)
1612 {
1613 bfd_vma symval;
1614 bfd_reloc_status_type r;
1615
1616 *scale = 1;
1617
1618 /* REL is the first of 1..N relocations. We compute the symbol
1619 value for each relocation, then combine them if needed. LREL
1620 gets a pointer to the last relocation used. */
1621 while (1)
1622 {
1623 int32_t tmp1, tmp2;
1624
1625 /* Get the value of the symbol referred to by the reloc. */
1626 if (ELF32_R_SYM (rel->r_info) < symtab_hdr->sh_info)
1627 {
1628 /* A local symbol. */
1629 Elf_Internal_Sym *isym;
1630 asection *ssec;
1631
1632 isym = intsyms + ELF32_R_SYM (rel->r_info);
1633
1634 if (isym->st_shndx == SHN_UNDEF)
1635 ssec = bfd_und_section_ptr;
1636 else if (isym->st_shndx == SHN_ABS)
1637 ssec = bfd_abs_section_ptr;
1638 else if (isym->st_shndx == SHN_COMMON)
1639 ssec = bfd_com_section_ptr;
1640 else
1641 ssec = bfd_section_from_elf_index (abfd,
1642 isym->st_shndx);
1643
1644 /* Initial symbol value. */
1645 symval = isym->st_value;
1646
1647 /* GAS may have made this symbol relative to a section, in
1648 which case, we have to add the addend to find the
1649 symbol. */
1650 if (ELF_ST_TYPE (isym->st_info) == STT_SECTION)
1651 symval += rel->r_addend;
1652
1653 if (ssec)
1654 {
1655 if ((ssec->flags & SEC_MERGE)
1656 && ssec->sec_info_type == SEC_INFO_TYPE_MERGE)
1657 symval = _bfd_merged_section_offset (abfd, & ssec,
1658 elf_section_data (ssec)->sec_info,
1659 symval);
1660 }
1661
1662 /* Now make the offset relative to where the linker is putting it. */
1663 if (ssec)
1664 symval +=
1665 ssec->output_section->vma + ssec->output_offset;
1666
1667 symval += rel->r_addend;
1668 }
1669 else
1670 {
1671 unsigned long indx;
1672 struct elf_link_hash_entry * h;
1673
1674 /* An external symbol. */
1675 indx = ELF32_R_SYM (rel->r_info) - symtab_hdr->sh_info;
1676 h = elf_sym_hashes (abfd)[indx];
1677 BFD_ASSERT (h != NULL);
1678
1679 if (h->root.type != bfd_link_hash_defined
1680 && h->root.type != bfd_link_hash_defweak)
1681 {
1682 /* This appears to be a reference to an undefined
1683 symbol. Just ignore it--it will be caught by the
1684 regular reloc processing. */
1685 if (lrel)
1686 *lrel = rel;
1687 return 0;
1688 }
1689
1690 symval = (h->root.u.def.value
1691 + h->root.u.def.section->output_section->vma
1692 + h->root.u.def.section->output_offset);
1693
1694 symval += rel->r_addend;
1695 }
1696
1697 switch (ELF32_R_TYPE (rel->r_info))
1698 {
1699 case R_RX_SYM:
1700 RX_STACK_PUSH (symval);
1701 break;
1702
1703 case R_RX_OPneg:
1704 RX_STACK_POP (tmp1);
1705 tmp1 = - tmp1;
1706 RX_STACK_PUSH (tmp1);
1707 break;
1708
1709 case R_RX_OPadd:
1710 RX_STACK_POP (tmp1);
1711 RX_STACK_POP (tmp2);
1712 tmp1 += tmp2;
1713 RX_STACK_PUSH (tmp1);
1714 break;
1715
1716 case R_RX_OPsub:
1717 RX_STACK_POP (tmp1);
1718 RX_STACK_POP (tmp2);
1719 tmp2 -= tmp1;
1720 RX_STACK_PUSH (tmp2);
1721 break;
1722
1723 case R_RX_OPmul:
1724 RX_STACK_POP (tmp1);
1725 RX_STACK_POP (tmp2);
1726 tmp1 *= tmp2;
1727 RX_STACK_PUSH (tmp1);
1728 break;
1729
1730 case R_RX_OPdiv:
1731 RX_STACK_POP (tmp1);
1732 RX_STACK_POP (tmp2);
1733 tmp1 /= tmp2;
1734 RX_STACK_PUSH (tmp1);
1735 break;
1736
1737 case R_RX_OPshla:
1738 RX_STACK_POP (tmp1);
1739 RX_STACK_POP (tmp2);
1740 tmp1 <<= tmp2;
1741 RX_STACK_PUSH (tmp1);
1742 break;
1743
1744 case R_RX_OPshra:
1745 RX_STACK_POP (tmp1);
1746 RX_STACK_POP (tmp2);
1747 tmp1 >>= tmp2;
1748 RX_STACK_PUSH (tmp1);
1749 break;
1750
1751 case R_RX_OPsctsize:
1752 RX_STACK_PUSH (input_section->size);
1753 break;
1754
1755 case R_RX_OPscttop:
1756 RX_STACK_PUSH (input_section->output_section->vma);
1757 break;
1758
1759 case R_RX_OPand:
1760 RX_STACK_POP (tmp1);
1761 RX_STACK_POP (tmp2);
1762 tmp1 &= tmp2;
1763 RX_STACK_PUSH (tmp1);
1764 break;
1765
1766 case R_RX_OPor:
1767 RX_STACK_POP (tmp1);
1768 RX_STACK_POP (tmp2);
1769 tmp1 |= tmp2;
1770 RX_STACK_PUSH (tmp1);
1771 break;
1772
1773 case R_RX_OPxor:
1774 RX_STACK_POP (tmp1);
1775 RX_STACK_POP (tmp2);
1776 tmp1 ^= tmp2;
1777 RX_STACK_PUSH (tmp1);
1778 break;
1779
1780 case R_RX_OPnot:
1781 RX_STACK_POP (tmp1);
1782 tmp1 = ~ tmp1;
1783 RX_STACK_PUSH (tmp1);
1784 break;
1785
1786 case R_RX_OPmod:
1787 RX_STACK_POP (tmp1);
1788 RX_STACK_POP (tmp2);
1789 tmp1 %= tmp2;
1790 RX_STACK_PUSH (tmp1);
1791 break;
1792
1793 case R_RX_OPromtop:
1794 RX_STACK_PUSH (get_romstart (&r, info, input_bfd, input_section, rel->r_offset));
1795 break;
1796
1797 case R_RX_OPramtop:
1798 RX_STACK_PUSH (get_ramstart (&r, info, input_bfd, input_section, rel->r_offset));
1799 break;
1800
1801 case R_RX_DIR16UL:
1802 case R_RX_DIR8UL:
1803 case R_RX_ABS16UL:
1804 case R_RX_ABS8UL:
1805 if (rx_stack_top)
1806 RX_STACK_POP (symval);
1807 if (lrel)
1808 *lrel = rel;
1809 *scale = 4;
1810 return symval;
1811
1812 case R_RX_DIR16UW:
1813 case R_RX_DIR8UW:
1814 case R_RX_ABS16UW:
1815 case R_RX_ABS8UW:
1816 if (rx_stack_top)
1817 RX_STACK_POP (symval);
1818 if (lrel)
1819 *lrel = rel;
1820 *scale = 2;
1821 return symval;
1822
1823 default:
1824 if (rx_stack_top)
1825 RX_STACK_POP (symval);
1826 if (lrel)
1827 *lrel = rel;
1828 return symval;
1829 }
1830
1831 rel ++;
1832 }
1833 }
1834
1835 static void
1836 move_reloc (Elf_Internal_Rela * irel, Elf_Internal_Rela * srel, int delta)
1837 {
1838 bfd_vma old_offset = srel->r_offset;
1839
1840 irel ++;
1841 while (irel <= srel)
1842 {
1843 if (irel->r_offset == old_offset)
1844 irel->r_offset += delta;
1845 irel ++;
1846 }
1847 }
1848
1849 /* Relax one section. */
1850
1851 static bfd_boolean
1852 elf32_rx_relax_section (bfd * abfd,
1853 asection * sec,
1854 struct bfd_link_info * link_info,
1855 bfd_boolean * again,
1856 bfd_boolean allow_pcrel3)
1857 {
1858 Elf_Internal_Shdr * symtab_hdr;
1859 Elf_Internal_Shdr * shndx_hdr;
1860 Elf_Internal_Rela * internal_relocs;
1861 Elf_Internal_Rela * free_relocs = NULL;
1862 Elf_Internal_Rela * irel;
1863 Elf_Internal_Rela * srel;
1864 Elf_Internal_Rela * irelend;
1865 Elf_Internal_Rela * next_alignment;
1866 Elf_Internal_Rela * prev_alignment;
1867 bfd_byte * contents = NULL;
1868 bfd_byte * free_contents = NULL;
1869 Elf_Internal_Sym * intsyms = NULL;
1870 Elf_Internal_Sym * free_intsyms = NULL;
1871 Elf_External_Sym_Shndx * shndx_buf = NULL;
1872 bfd_vma pc;
1873 bfd_vma sec_start;
1874 bfd_vma symval = 0;
1875 int pcrel = 0;
1876 int code = 0;
1877 int section_alignment_glue;
1878 /* how much to scale the relocation by - 1, 2, or 4. */
1879 int scale;
1880
1881 /* Assume nothing changes. */
1882 *again = FALSE;
1883
1884 /* We don't have to do anything for a relocatable link, if
1885 this section does not have relocs, or if this is not a
1886 code section. */
1887 if (link_info->relocatable
1888 || (sec->flags & SEC_RELOC) == 0
1889 || sec->reloc_count == 0
1890 || (sec->flags & SEC_CODE) == 0)
1891 return TRUE;
1892
1893 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
1894 shndx_hdr = &elf_tdata (abfd)->symtab_shndx_hdr;
1895
1896 sec_start = sec->output_section->vma + sec->output_offset;
1897
1898 /* Get the section contents. */
1899 if (elf_section_data (sec)->this_hdr.contents != NULL)
1900 contents = elf_section_data (sec)->this_hdr.contents;
1901 /* Go get them off disk. */
1902 else
1903 {
1904 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
1905 goto error_return;
1906 elf_section_data (sec)->this_hdr.contents = contents;
1907 }
1908
1909 /* Read this BFD's symbols. */
1910 /* Get cached copy if it exists. */
1911 if (symtab_hdr->contents != NULL)
1912 intsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
1913 else
1914 {
1915 intsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL);
1916 symtab_hdr->contents = (bfd_byte *) intsyms;
1917 }
1918
1919 if (shndx_hdr->sh_size != 0)
1920 {
1921 bfd_size_type amt;
1922
1923 amt = symtab_hdr->sh_info;
1924 amt *= sizeof (Elf_External_Sym_Shndx);
1925 shndx_buf = (Elf_External_Sym_Shndx *) bfd_malloc (amt);
1926 if (shndx_buf == NULL)
1927 goto error_return;
1928 if (bfd_seek (abfd, shndx_hdr->sh_offset, SEEK_SET) != 0
1929 || bfd_bread (shndx_buf, amt, abfd) != amt)
1930 goto error_return;
1931 shndx_hdr->contents = (bfd_byte *) shndx_buf;
1932 }
1933
1934 /* Get a copy of the native relocations. */
1935 internal_relocs = (_bfd_elf_link_read_relocs
1936 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
1937 link_info->keep_memory));
1938 if (internal_relocs == NULL)
1939 goto error_return;
1940 if (! link_info->keep_memory)
1941 free_relocs = internal_relocs;
1942
1943 /* The RL_ relocs must be just before the operand relocs they go
1944 with, so we must sort them to guarantee this. We use bubblesort
1945 instead of qsort so we can guarantee that relocs with the same
1946 address remain in the same relative order. */
1947 reloc_bubblesort (internal_relocs, sec->reloc_count);
1948
1949 /* Walk through them looking for relaxing opportunities. */
1950 irelend = internal_relocs + sec->reloc_count;
1951
1952 /* This will either be NULL or a pointer to the next alignment
1953 relocation. */
1954 next_alignment = internal_relocs;
1955 /* This will be the previous alignment, although at first it points
1956 to the first real relocation. */
1957 prev_alignment = internal_relocs;
1958
1959 /* We calculate worst case shrinkage caused by alignment directives.
1960 No fool-proof, but better than either ignoring the problem or
1961 doing heavy duty analysis of all the alignment markers in all
1962 input sections. */
1963 section_alignment_glue = 0;
1964 for (irel = internal_relocs; irel < irelend; irel++)
1965 if (ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
1966 && irel->r_addend & RX_RELAXA_ALIGN)
1967 {
1968 int this_glue = 1 << (irel->r_addend & RX_RELAXA_ANUM);
1969
1970 if (section_alignment_glue < this_glue)
1971 section_alignment_glue = this_glue;
1972 }
1973 /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
1974 shrinkage. */
1975 section_alignment_glue *= 2;
1976
1977 for (irel = internal_relocs; irel < irelend; irel++)
1978 {
1979 unsigned char *insn;
1980 int nrelocs;
1981
1982 /* The insns we care about are all marked with one of these. */
1983 if (ELF32_R_TYPE (irel->r_info) != R_RX_RH_RELAX)
1984 continue;
1985
1986 if (irel->r_addend & RX_RELAXA_ALIGN
1987 || next_alignment == internal_relocs)
1988 {
1989 /* When we delete bytes, we need to maintain all the alignments
1990 indicated. In addition, we need to be careful about relaxing
1991 jumps across alignment boundaries - these displacements
1992 *grow* when we delete bytes. For now, don't shrink
1993 displacements across an alignment boundary, just in case.
1994 Note that this only affects relocations to the same
1995 section. */
1996 prev_alignment = next_alignment;
1997 next_alignment += 2;
1998 while (next_alignment < irelend
1999 && (ELF32_R_TYPE (next_alignment->r_info) != R_RX_RH_RELAX
2000 || !(next_alignment->r_addend & RX_RELAXA_ELIGN)))
2001 next_alignment ++;
2002 if (next_alignment >= irelend || next_alignment->r_offset == 0)
2003 next_alignment = NULL;
2004 }
2005
2006 /* When we hit alignment markers, see if we've shrunk enough
2007 before them to reduce the gap without violating the alignment
2008 requirements. */
2009 if (irel->r_addend & RX_RELAXA_ALIGN)
2010 {
2011 /* At this point, the next relocation *should* be the ELIGN
2012 end marker. */
2013 Elf_Internal_Rela *erel = irel + 1;
2014 unsigned int alignment, nbytes;
2015
2016 if (ELF32_R_TYPE (erel->r_info) != R_RX_RH_RELAX)
2017 continue;
2018 if (!(erel->r_addend & RX_RELAXA_ELIGN))
2019 continue;
2020
2021 alignment = 1 << (irel->r_addend & RX_RELAXA_ANUM);
2022
2023 if (erel->r_offset - irel->r_offset < alignment)
2024 continue;
2025
2026 nbytes = erel->r_offset - irel->r_offset;
2027 nbytes /= alignment;
2028 nbytes *= alignment;
2029
2030 elf32_rx_relax_delete_bytes (abfd, sec, erel->r_offset-nbytes, nbytes, next_alignment,
2031 erel->r_offset == sec->size);
2032 *again = TRUE;
2033
2034 continue;
2035 }
2036
2037 if (irel->r_addend & RX_RELAXA_ELIGN)
2038 continue;
2039
2040 insn = contents + irel->r_offset;
2041
2042 nrelocs = irel->r_addend & RX_RELAXA_RNUM;
2043
2044 /* At this point, we have an insn that is a candidate for linker
2045 relaxation. There are NRELOCS relocs following that may be
2046 relaxed, although each reloc may be made of more than one
2047 reloc entry (such as gp-rel symbols). */
2048
2049 /* Get the value of the symbol referred to by the reloc. Just
2050 in case this is the last reloc in the list, use the RL's
2051 addend to choose between this reloc (no addend) or the next
2052 (yes addend, which means at least one following reloc). */
2053
2054 /* srel points to the "current" reloction for this insn -
2055 actually the last reloc for a given operand, which is the one
2056 we need to update. We check the relaxations in the same
2057 order that the relocations happen, so we'll just push it
2058 along as we go. */
2059 srel = irel;
2060
2061 pc = sec->output_section->vma + sec->output_offset
2062 + srel->r_offset;
2063
2064 #define GET_RELOC \
2065 symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
2066 pcrel = symval - pc + srel->r_addend; \
2067 nrelocs --;
2068
2069 #define SNIPNR(offset, nbytes) \
2070 elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0);
2071 #define SNIP(offset, nbytes, newtype) \
2072 SNIPNR (offset, nbytes); \
2073 srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
2074
2075 /* The order of these bit tests must match the order that the
2076 relocs appear in. Since we sorted those by offset, we can
2077 predict them. */
2078
2079 /* Note that the numbers in, say, DSP6 are the bit offsets of
2080 the code fields that describe the operand. Bits number 0 for
2081 the MSB of insn[0]. */
2082
2083 /* DSP* codes:
2084 0 00 [reg]
2085 1 01 dsp:8[reg]
2086 2 10 dsp:16[reg]
2087 3 11 reg */
2088 if (irel->r_addend & RX_RELAXA_DSP6)
2089 {
2090 GET_RELOC;
2091
2092 code = insn[0] & 3;
2093 if (code == 2 && symval/scale <= 255)
2094 {
2095 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2096 insn[0] &= 0xfc;
2097 insn[0] |= 0x01;
2098 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2099 if (newrel != ELF32_R_TYPE (srel->r_info))
2100 {
2101 SNIP (3, 1, newrel);
2102 *again = TRUE;
2103 }
2104 }
2105
2106 else if (code == 1 && symval == 0)
2107 {
2108 insn[0] &= 0xfc;
2109 SNIP (2, 1, R_RX_NONE);
2110 *again = TRUE;
2111 }
2112
2113 /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
2114 else if (code == 1 && symval/scale <= 31
2115 /* Decodable bits. */
2116 && (insn[0] & 0xcc) == 0xcc
2117 /* Width. */
2118 && (insn[0] & 0x30) != 0x30
2119 /* Register MSBs. */
2120 && (insn[1] & 0x88) == 0x00)
2121 {
2122 int newrel = 0;
2123
2124 insn[0] = 0x88 | (insn[0] & 0x30);
2125 /* The register fields are in the right place already. */
2126
2127 /* We can't relax this new opcode. */
2128 irel->r_addend = 0;
2129
2130 switch ((insn[0] & 0x30) >> 4)
2131 {
2132 case 0:
2133 newrel = R_RX_RH_ABS5p5B;
2134 break;
2135 case 1:
2136 newrel = R_RX_RH_ABS5p5W;
2137 break;
2138 case 2:
2139 newrel = R_RX_RH_ABS5p5L;
2140 break;
2141 }
2142
2143 move_reloc (irel, srel, -2);
2144 SNIP (2, 1, newrel);
2145 }
2146
2147 /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
2148 else if (code == 1 && symval/scale <= 31
2149 /* Decodable bits. */
2150 && (insn[0] & 0xf8) == 0x58
2151 /* Register MSBs. */
2152 && (insn[1] & 0x88) == 0x00)
2153 {
2154 int newrel = 0;
2155
2156 insn[0] = 0xb0 | ((insn[0] & 0x04) << 1);
2157 /* The register fields are in the right place already. */
2158
2159 /* We can't relax this new opcode. */
2160 irel->r_addend = 0;
2161
2162 switch ((insn[0] & 0x08) >> 3)
2163 {
2164 case 0:
2165 newrel = R_RX_RH_ABS5p5B;
2166 break;
2167 case 1:
2168 newrel = R_RX_RH_ABS5p5W;
2169 break;
2170 }
2171
2172 move_reloc (irel, srel, -2);
2173 SNIP (2, 1, newrel);
2174 }
2175 }
2176
2177 /* A DSP4 operand always follows a DSP6 operand, even if there's
2178 no relocation for it. We have to read the code out of the
2179 opcode to calculate the offset of the operand. */
2180 if (irel->r_addend & RX_RELAXA_DSP4)
2181 {
2182 int code6, offset = 0;
2183
2184 GET_RELOC;
2185
2186 code6 = insn[0] & 0x03;
2187 switch (code6)
2188 {
2189 case 0: offset = 2; break;
2190 case 1: offset = 3; break;
2191 case 2: offset = 4; break;
2192 case 3: offset = 2; break;
2193 }
2194
2195 code = (insn[0] & 0x0c) >> 2;
2196
2197 if (code == 2 && symval / scale <= 255)
2198 {
2199 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2200
2201 insn[0] &= 0xf3;
2202 insn[0] |= 0x04;
2203 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2204 if (newrel != ELF32_R_TYPE (srel->r_info))
2205 {
2206 SNIP (offset+1, 1, newrel);
2207 *again = TRUE;
2208 }
2209 }
2210
2211 else if (code == 1 && symval == 0)
2212 {
2213 insn[0] &= 0xf3;
2214 SNIP (offset, 1, R_RX_NONE);
2215 *again = TRUE;
2216 }
2217 /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
2218 else if (code == 1 && symval/scale <= 31
2219 /* Decodable bits. */
2220 && (insn[0] & 0xc3) == 0xc3
2221 /* Width. */
2222 && (insn[0] & 0x30) != 0x30
2223 /* Register MSBs. */
2224 && (insn[1] & 0x88) == 0x00)
2225 {
2226 int newrel = 0;
2227
2228 insn[0] = 0x80 | (insn[0] & 0x30);
2229 /* The register fields are in the right place already. */
2230
2231 /* We can't relax this new opcode. */
2232 irel->r_addend = 0;
2233
2234 switch ((insn[0] & 0x30) >> 4)
2235 {
2236 case 0:
2237 newrel = R_RX_RH_ABS5p5B;
2238 break;
2239 case 1:
2240 newrel = R_RX_RH_ABS5p5W;
2241 break;
2242 case 2:
2243 newrel = R_RX_RH_ABS5p5L;
2244 break;
2245 }
2246
2247 move_reloc (irel, srel, -2);
2248 SNIP (2, 1, newrel);
2249 }
2250 }
2251
2252 /* These always occur alone, but the offset depends on whether
2253 it's a MEMEX opcode (0x06) or not. */
2254 if (irel->r_addend & RX_RELAXA_DSP14)
2255 {
2256 int offset;
2257 GET_RELOC;
2258
2259 if (insn[0] == 0x06)
2260 offset = 3;
2261 else
2262 offset = 4;
2263
2264 code = insn[1] & 3;
2265
2266 if (code == 2 && symval / scale <= 255)
2267 {
2268 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2269
2270 insn[1] &= 0xfc;
2271 insn[1] |= 0x01;
2272 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2273 if (newrel != ELF32_R_TYPE (srel->r_info))
2274 {
2275 SNIP (offset, 1, newrel);
2276 *again = TRUE;
2277 }
2278 }
2279 else if (code == 1 && symval == 0)
2280 {
2281 insn[1] &= 0xfc;
2282 SNIP (offset, 1, R_RX_NONE);
2283 *again = TRUE;
2284 }
2285 }
2286
2287 /* IMM* codes:
2288 0 00 imm:32
2289 1 01 simm:8
2290 2 10 simm:16
2291 3 11 simm:24. */
2292
2293 /* These always occur alone. */
2294 if (irel->r_addend & RX_RELAXA_IMM6)
2295 {
2296 long ssymval;
2297
2298 GET_RELOC;
2299
2300 /* These relocations sign-extend, so we must do signed compares. */
2301 ssymval = (long) symval;
2302
2303 code = insn[0] & 0x03;
2304
2305 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2306 {
2307 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2308
2309 insn[0] &= 0xfc;
2310 insn[0] |= 0x03;
2311 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2312 if (newrel != ELF32_R_TYPE (srel->r_info))
2313 {
2314 SNIP (2, 1, newrel);
2315 *again = TRUE;
2316 }
2317 }
2318
2319 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2320 {
2321 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2322
2323 insn[0] &= 0xfc;
2324 insn[0] |= 0x02;
2325 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2326 if (newrel != ELF32_R_TYPE (srel->r_info))
2327 {
2328 SNIP (2, 1, newrel);
2329 *again = TRUE;
2330 }
2331 }
2332
2333 /* Special case UIMM8 format: CMP #uimm8,Rdst. */
2334 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2335 /* Decodable bits. */
2336 && (insn[0] & 0xfc) == 0x74
2337 /* Decodable bits. */
2338 && ((insn[1] & 0xf0) == 0x00))
2339 {
2340 int newrel;
2341
2342 insn[0] = 0x75;
2343 insn[1] = 0x50 | (insn[1] & 0x0f);
2344
2345 /* We can't relax this new opcode. */
2346 irel->r_addend = 0;
2347
2348 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2349 newrel = R_RX_ABS8U;
2350 else
2351 newrel = R_RX_DIR8U;
2352
2353 SNIP (2, 1, newrel);
2354 *again = TRUE;
2355 }
2356
2357 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2358 {
2359 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2360
2361 insn[0] &= 0xfc;
2362 insn[0] |= 0x01;
2363 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2364 if (newrel != ELF32_R_TYPE (srel->r_info))
2365 {
2366 SNIP (2, 1, newrel);
2367 *again = TRUE;
2368 }
2369 }
2370
2371 /* Special case UIMM4 format: CMP, MUL, AND, OR. */
2372 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2373 /* Decodable bits and immediate type. */
2374 && insn[0] == 0x75
2375 /* Decodable bits. */
2376 && (insn[1] & 0xc0) == 0x00)
2377 {
2378 static const int newop[4] = { 1, 3, 4, 5 };
2379
2380 insn[0] = 0x60 | newop[insn[1] >> 4];
2381 /* The register number doesn't move. */
2382
2383 /* We can't relax this new opcode. */
2384 irel->r_addend = 0;
2385
2386 move_reloc (irel, srel, -1);
2387
2388 SNIP (2, 1, R_RX_RH_UIMM4p8);
2389 *again = TRUE;
2390 }
2391
2392 /* Special case UIMM4 format: ADD -> ADD/SUB. */
2393 else if (code == 1 && ssymval <= 15 && ssymval >= -15
2394 /* Decodable bits and immediate type. */
2395 && insn[0] == 0x71
2396 /* Same register for source and destination. */
2397 && ((insn[1] >> 4) == (insn[1] & 0x0f)))
2398 {
2399 int newrel;
2400
2401 /* Note that we can't turn "add $0,Rs" into a NOP
2402 because the flags need to be set right. */
2403
2404 if (ssymval < 0)
2405 {
2406 insn[0] = 0x60; /* Subtract. */
2407 newrel = R_RX_RH_UNEG4p8;
2408 }
2409 else
2410 {
2411 insn[0] = 0x62; /* Add. */
2412 newrel = R_RX_RH_UIMM4p8;
2413 }
2414
2415 /* The register number is in the right place. */
2416
2417 /* We can't relax this new opcode. */
2418 irel->r_addend = 0;
2419
2420 move_reloc (irel, srel, -1);
2421
2422 SNIP (2, 1, newrel);
2423 *again = TRUE;
2424 }
2425 }
2426
2427 /* These are either matched with a DSP6 (2-byte base) or an id24
2428 (3-byte base). */
2429 if (irel->r_addend & RX_RELAXA_IMM12)
2430 {
2431 int dspcode, offset = 0;
2432 long ssymval;
2433
2434 GET_RELOC;
2435
2436 if ((insn[0] & 0xfc) == 0xfc)
2437 dspcode = 1; /* Just something with one byte operand. */
2438 else
2439 dspcode = insn[0] & 3;
2440 switch (dspcode)
2441 {
2442 case 0: offset = 2; break;
2443 case 1: offset = 3; break;
2444 case 2: offset = 4; break;
2445 case 3: offset = 2; break;
2446 }
2447
2448 /* These relocations sign-extend, so we must do signed compares. */
2449 ssymval = (long) symval;
2450
2451 code = (insn[1] >> 2) & 3;
2452 if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
2453 {
2454 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2455
2456 insn[1] &= 0xf3;
2457 insn[1] |= 0x0c;
2458 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2459 if (newrel != ELF32_R_TYPE (srel->r_info))
2460 {
2461 SNIP (offset, 1, newrel);
2462 *again = TRUE;
2463 }
2464 }
2465
2466 else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
2467 {
2468 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2469
2470 insn[1] &= 0xf3;
2471 insn[1] |= 0x08;
2472 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2473 if (newrel != ELF32_R_TYPE (srel->r_info))
2474 {
2475 SNIP (offset, 1, newrel);
2476 *again = TRUE;
2477 }
2478 }
2479
2480 /* Special case UIMM8 format: MOV #uimm8,Rdst. */
2481 else if (code == 2 && ssymval <= 255 && ssymval >= 16
2482 /* Decodable bits. */
2483 && insn[0] == 0xfb
2484 /* Decodable bits. */
2485 && ((insn[1] & 0x03) == 0x02))
2486 {
2487 int newrel;
2488
2489 insn[0] = 0x75;
2490 insn[1] = 0x40 | (insn[1] >> 4);
2491
2492 /* We can't relax this new opcode. */
2493 irel->r_addend = 0;
2494
2495 if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
2496 newrel = R_RX_ABS8U;
2497 else
2498 newrel = R_RX_DIR8U;
2499
2500 SNIP (2, 1, newrel);
2501 *again = TRUE;
2502 }
2503
2504 else if (code == 2 && ssymval <= 127 && ssymval >= -128)
2505 {
2506 unsigned int newrel = ELF32_R_TYPE(srel->r_info);
2507
2508 insn[1] &= 0xf3;
2509 insn[1] |= 0x04;
2510 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2511 if (newrel != ELF32_R_TYPE(srel->r_info))
2512 {
2513 SNIP (offset, 1, newrel);
2514 *again = TRUE;
2515 }
2516 }
2517
2518 /* Special case UIMM4 format: MOV #uimm4,Rdst. */
2519 else if (code == 1 && ssymval <= 15 && ssymval >= 0
2520 /* Decodable bits. */
2521 && insn[0] == 0xfb
2522 /* Decodable bits. */
2523 && ((insn[1] & 0x03) == 0x02))
2524 {
2525 insn[0] = 0x66;
2526 insn[1] = insn[1] >> 4;
2527
2528 /* We can't relax this new opcode. */
2529 irel->r_addend = 0;
2530
2531 move_reloc (irel, srel, -1);
2532
2533 SNIP (2, 1, R_RX_RH_UIMM4p8);
2534 *again = TRUE;
2535 }
2536 }
2537
2538 if (irel->r_addend & RX_RELAXA_BRA)
2539 {
2540 unsigned int newrel = ELF32_R_TYPE (srel->r_info);
2541 int max_pcrel3 = 4;
2542 int alignment_glue = 0;
2543
2544 GET_RELOC;
2545
2546 /* Branches over alignment chunks are problematic, as
2547 deleting bytes here makes the branch *further* away. We
2548 can be agressive with branches within this alignment
2549 block, but not branches outside it. */
2550 if ((prev_alignment == NULL
2551 || symval < (bfd_vma)(sec_start + prev_alignment->r_offset))
2552 && (next_alignment == NULL
2553 || symval > (bfd_vma)(sec_start + next_alignment->r_offset)))
2554 alignment_glue = section_alignment_glue;
2555
2556 if (ELF32_R_TYPE(srel[1].r_info) == R_RX_RH_RELAX
2557 && srel[1].r_addend & RX_RELAXA_BRA
2558 && srel[1].r_offset < irel->r_offset + pcrel)
2559 max_pcrel3 ++;
2560
2561 newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
2562
2563 /* The values we compare PCREL with are not what you'd
2564 expect; they're off by a little to compensate for (1)
2565 where the reloc is relative to the insn, and (2) how much
2566 the insn is going to change when we relax it. */
2567
2568 /* These we have to decode. */
2569 switch (insn[0])
2570 {
2571 case 0x04: /* BRA pcdsp:24 */
2572 if (-32768 + alignment_glue <= pcrel
2573 && pcrel <= 32765 - alignment_glue)
2574 {
2575 insn[0] = 0x38;
2576 SNIP (3, 1, newrel);
2577 *again = TRUE;
2578 }
2579 break;
2580
2581 case 0x38: /* BRA pcdsp:16 */
2582 if (-128 + alignment_glue <= pcrel
2583 && pcrel <= 127 - alignment_glue)
2584 {
2585 insn[0] = 0x2e;
2586 SNIP (2, 1, newrel);
2587 *again = TRUE;
2588 }
2589 break;
2590
2591 case 0x2e: /* BRA pcdsp:8 */
2592 /* Note that there's a risk here of shortening things so
2593 much that we no longer fit this reloc; it *should*
2594 only happen when you branch across a branch, and that
2595 branch also devolves into BRA.S. "Real" code should
2596 be OK. */
2597 if (max_pcrel3 + alignment_glue <= pcrel
2598 && pcrel <= 10 - alignment_glue
2599 && allow_pcrel3)
2600 {
2601 insn[0] = 0x08;
2602 SNIP (1, 1, newrel);
2603 move_reloc (irel, srel, -1);
2604 *again = TRUE;
2605 }
2606 break;
2607
2608 case 0x05: /* BSR pcdsp:24 */
2609 if (-32768 + alignment_glue <= pcrel
2610 && pcrel <= 32765 - alignment_glue)
2611 {
2612 insn[0] = 0x39;
2613 SNIP (1, 1, newrel);
2614 *again = TRUE;
2615 }
2616 break;
2617
2618 case 0x3a: /* BEQ.W pcdsp:16 */
2619 case 0x3b: /* BNE.W pcdsp:16 */
2620 if (-128 + alignment_glue <= pcrel
2621 && pcrel <= 127 - alignment_glue)
2622 {
2623 insn[0] = 0x20 | (insn[0] & 1);
2624 SNIP (1, 1, newrel);
2625 *again = TRUE;
2626 }
2627 break;
2628
2629 case 0x20: /* BEQ.B pcdsp:8 */
2630 case 0x21: /* BNE.B pcdsp:8 */
2631 if (max_pcrel3 + alignment_glue <= pcrel
2632 && pcrel - alignment_glue <= 10
2633 && allow_pcrel3)
2634 {
2635 insn[0] = 0x10 | ((insn[0] & 1) << 3);
2636 SNIP (1, 1, newrel);
2637 move_reloc (irel, srel, -1);
2638 *again = TRUE;
2639 }
2640 break;
2641
2642 case 0x16: /* synthetic BNE dsp24 */
2643 case 0x1e: /* synthetic BEQ dsp24 */
2644 if (-32767 + alignment_glue <= pcrel
2645 && pcrel <= 32766 - alignment_glue
2646 && insn[1] == 0x04)
2647 {
2648 if (insn[0] == 0x16)
2649 insn[0] = 0x3b;
2650 else
2651 insn[0] = 0x3a;
2652 /* We snip out the bytes at the end else the reloc
2653 will get moved too, and too much. */
2654 SNIP (3, 2, newrel);
2655 move_reloc (irel, srel, -1);
2656 *again = TRUE;
2657 }
2658 break;
2659 }
2660
2661 /* Special case - synthetic conditional branches, pcrel24.
2662 Note that EQ and NE have been handled above. */
2663 if ((insn[0] & 0xf0) == 0x20
2664 && insn[1] == 0x06
2665 && insn[2] == 0x04
2666 && srel->r_offset != irel->r_offset + 1
2667 && -32767 + alignment_glue <= pcrel
2668 && pcrel <= 32766 - alignment_glue)
2669 {
2670 insn[1] = 0x05;
2671 insn[2] = 0x38;
2672 SNIP (5, 1, newrel);
2673 *again = TRUE;
2674 }
2675
2676 /* Special case - synthetic conditional branches, pcrel16 */
2677 if ((insn[0] & 0xf0) == 0x20
2678 && insn[1] == 0x05
2679 && insn[2] == 0x38
2680 && srel->r_offset != irel->r_offset + 1
2681 && -127 + alignment_glue <= pcrel
2682 && pcrel <= 126 - alignment_glue)
2683 {
2684 int cond = (insn[0] & 0x0f) ^ 0x01;
2685
2686 insn[0] = 0x20 | cond;
2687 /* By moving the reloc first, we avoid having
2688 delete_bytes move it also. */
2689 move_reloc (irel, srel, -2);
2690 SNIP (2, 3, newrel);
2691 *again = TRUE;
2692 }
2693 }
2694
2695 BFD_ASSERT (nrelocs == 0);
2696
2697 /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
2698 use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
2699 because it may have one or two relocations. */
2700 if ((insn[0] & 0xfc) == 0xf8
2701 && (insn[1] & 0x80) == 0x00
2702 && (insn[0] & 0x03) != 0x03)
2703 {
2704 int dcode, icode, reg, ioff, dscale, ilen;
2705 bfd_vma disp_val = 0;
2706 long imm_val = 0;
2707 Elf_Internal_Rela * disp_rel = 0;
2708 Elf_Internal_Rela * imm_rel = 0;
2709
2710 /* Reset this. */
2711 srel = irel;
2712
2713 dcode = insn[0] & 0x03;
2714 icode = (insn[1] >> 2) & 0x03;
2715 reg = (insn[1] >> 4) & 0x0f;
2716
2717 ioff = dcode == 1 ? 3 : dcode == 2 ? 4 : 2;
2718
2719 /* Figure out what the dispacement is. */
2720 if (dcode == 1 || dcode == 2)
2721 {
2722 /* There's a displacement. See if there's a reloc for it. */
2723 if (srel[1].r_offset == irel->r_offset + 2)
2724 {
2725 GET_RELOC;
2726 disp_val = symval;
2727 disp_rel = srel;
2728 }
2729 else
2730 {
2731 if (dcode == 1)
2732 disp_val = insn[2];
2733 else
2734 {
2735 #if RX_OPCODE_BIG_ENDIAN
2736 disp_val = insn[2] * 256 + insn[3];
2737 #else
2738 disp_val = insn[2] + insn[3] * 256;
2739 #endif
2740 }
2741 switch (insn[1] & 3)
2742 {
2743 case 1:
2744 disp_val *= 2;
2745 scale = 2;
2746 break;
2747 case 2:
2748 disp_val *= 4;
2749 scale = 4;
2750 break;
2751 }
2752 }
2753 }
2754
2755 dscale = scale;
2756
2757 /* Figure out what the immediate is. */
2758 if (srel[1].r_offset == irel->r_offset + ioff)
2759 {
2760 GET_RELOC;
2761 imm_val = (long) symval;
2762 imm_rel = srel;
2763 }
2764 else
2765 {
2766 unsigned char * ip = insn + ioff;
2767
2768 switch (icode)
2769 {
2770 case 1:
2771 /* For byte writes, we don't sign extend. Makes the math easier later. */
2772 if (scale == 1)
2773 imm_val = ip[0];
2774 else
2775 imm_val = (char) ip[0];
2776 break;
2777 case 2:
2778 #if RX_OPCODE_BIG_ENDIAN
2779 imm_val = ((char) ip[0] << 8) | ip[1];
2780 #else
2781 imm_val = ((char) ip[1] << 8) | ip[0];
2782 #endif
2783 break;
2784 case 3:
2785 #if RX_OPCODE_BIG_ENDIAN
2786 imm_val = ((char) ip[0] << 16) | (ip[1] << 8) | ip[2];
2787 #else
2788 imm_val = ((char) ip[2] << 16) | (ip[1] << 8) | ip[0];
2789 #endif
2790 break;
2791 case 0:
2792 #if RX_OPCODE_BIG_ENDIAN
2793 imm_val = (ip[0] << 24) | (ip[1] << 16) | (ip[2] << 8) | ip[3];
2794 #else
2795 imm_val = (ip[3] << 24) | (ip[2] << 16) | (ip[1] << 8) | ip[0];
2796 #endif
2797 break;
2798 }
2799 }
2800
2801 ilen = 2;
2802
2803 switch (dcode)
2804 {
2805 case 1:
2806 ilen += 1;
2807 break;
2808 case 2:
2809 ilen += 2;
2810 break;
2811 }
2812
2813 switch (icode)
2814 {
2815 case 1:
2816 ilen += 1;
2817 break;
2818 case 2:
2819 ilen += 2;
2820 break;
2821 case 3:
2822 ilen += 3;
2823 break;
2824 case 4:
2825 ilen += 4;
2826 break;
2827 }
2828
2829 /* The shortcut happens when the immediate is 0..255,
2830 register r0 to r7, and displacement (scaled) 0..31. */
2831
2832 if (0 <= imm_val && imm_val <= 255
2833 && 0 <= reg && reg <= 7
2834 && disp_val / dscale <= 31)
2835 {
2836 insn[0] = 0x3c | (insn[1] & 0x03);
2837 insn[1] = (((disp_val / dscale) << 3) & 0x80) | (reg << 4) | ((disp_val/dscale) & 0x0f);
2838 insn[2] = imm_val;
2839
2840 if (disp_rel)
2841 {
2842 int newrel = R_RX_NONE;
2843
2844 switch (dscale)
2845 {
2846 case 1:
2847 newrel = R_RX_RH_ABS5p8B;
2848 break;
2849 case 2:
2850 newrel = R_RX_RH_ABS5p8W;
2851 break;
2852 case 4:
2853 newrel = R_RX_RH_ABS5p8L;
2854 break;
2855 }
2856 disp_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (disp_rel->r_info), newrel);
2857 move_reloc (irel, disp_rel, -1);
2858 }
2859 if (imm_rel)
2860 {
2861 imm_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (imm_rel->r_info), R_RX_DIR8U);
2862 move_reloc (disp_rel ? disp_rel : irel,
2863 imm_rel,
2864 irel->r_offset - imm_rel->r_offset + 2);
2865 }
2866
2867 SNIPNR (3, ilen - 3);
2868 *again = TRUE;
2869
2870 /* We can't relax this new opcode. */
2871 irel->r_addend = 0;
2872 }
2873 }
2874 }
2875
2876 /* We can't reliably relax branches to DIR3U_PCREL unless we know
2877 whatever they're branching over won't shrink any more. If we're
2878 basically done here, do one more pass just for branches - but
2879 don't request a pass after that one! */
2880 if (!*again && !allow_pcrel3)
2881 {
2882 bfd_boolean ignored;
2883
2884 elf32_rx_relax_section (abfd, sec, link_info, &ignored, TRUE);
2885 }
2886
2887 return TRUE;
2888
2889 error_return:
2890 if (free_relocs != NULL)
2891 free (free_relocs);
2892
2893 if (free_contents != NULL)
2894 free (free_contents);
2895
2896 if (shndx_buf != NULL)
2897 {
2898 shndx_hdr->contents = NULL;
2899 free (shndx_buf);
2900 }
2901
2902 if (free_intsyms != NULL)
2903 free (free_intsyms);
2904
2905 return FALSE;
2906 }
2907
2908 static bfd_boolean
2909 elf32_rx_relax_section_wrapper (bfd * abfd,
2910 asection * sec,
2911 struct bfd_link_info * link_info,
2912 bfd_boolean * again)
2913 {
2914 return elf32_rx_relax_section (abfd, sec, link_info, again, FALSE);
2915 }
2916 \f
2917 /* Function to set the ELF flag bits. */
2918
2919 static bfd_boolean
2920 rx_elf_set_private_flags (bfd * abfd, flagword flags)
2921 {
2922 elf_elfheader (abfd)->e_flags = flags;
2923 elf_flags_init (abfd) = TRUE;
2924 return TRUE;
2925 }
2926
2927 static bfd_boolean no_warn_mismatch = FALSE;
2928 static bfd_boolean ignore_lma = TRUE;
2929
2930 void bfd_elf32_rx_set_target_flags (bfd_boolean, bfd_boolean);
2931
2932 void
2933 bfd_elf32_rx_set_target_flags (bfd_boolean user_no_warn_mismatch,
2934 bfd_boolean user_ignore_lma)
2935 {
2936 no_warn_mismatch = user_no_warn_mismatch;
2937 ignore_lma = user_ignore_lma;
2938 }
2939
2940 /* Converts FLAGS into a descriptive string.
2941 Returns a static pointer. */
2942
2943 static const char *
2944 describe_flags (flagword flags)
2945 {
2946 static char buf [128];
2947
2948 buf[0] = 0;
2949
2950 if (flags & E_FLAG_RX_64BIT_DOUBLES)
2951 strcat (buf, "64-bit doubles");
2952 else
2953 strcat (buf, "32-bit doubles");
2954
2955 if (flags & E_FLAG_RX_DSP)
2956 strcat (buf, ", dsp");
2957 else
2958 strcat (buf, ", no dsp");
2959
2960 if (flags & E_FLAG_RX_PID)
2961 strcat (buf, ", pid");
2962 else
2963 strcat (buf, ", no pid");
2964
2965 if (flags & E_FLAG_RX_ABI)
2966 strcat (buf, ", RX ABI");
2967 else
2968 strcat (buf, ", GCC ABI");
2969
2970 return buf;
2971 }
2972
2973 /* Merge backend specific data from an object file to the output
2974 object file when linking. */
2975
2976 static bfd_boolean
2977 rx_elf_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
2978 {
2979 flagword old_flags;
2980 flagword new_flags;
2981 bfd_boolean error = FALSE;
2982
2983 new_flags = elf_elfheader (ibfd)->e_flags;
2984 old_flags = elf_elfheader (obfd)->e_flags;
2985
2986 if (!elf_flags_init (obfd))
2987 {
2988 /* First call, no flags set. */
2989 elf_flags_init (obfd) = TRUE;
2990 elf_elfheader (obfd)->e_flags = new_flags;
2991 }
2992 else if (old_flags != new_flags)
2993 {
2994 flagword known_flags;
2995
2996 known_flags = E_FLAG_RX_ABI | E_FLAG_RX_64BIT_DOUBLES
2997 | E_FLAG_RX_DSP | E_FLAG_RX_PID;
2998
2999 if ((old_flags ^ new_flags) & known_flags)
3000 {
3001 /* Only complain if flag bits we care about do not match.
3002 Other bits may be set, since older binaries did use some
3003 deprecated flags. */
3004 if (no_warn_mismatch)
3005 {
3006 elf_elfheader (obfd)->e_flags = (new_flags | old_flags) & known_flags;
3007 }
3008 else
3009 {
3010 _bfd_error_handler ("There is a conflict merging the ELF header flags from %s",
3011 bfd_get_filename (ibfd));
3012 _bfd_error_handler (" the input file's flags: %s",
3013 describe_flags (new_flags));
3014 _bfd_error_handler (" the output file's flags: %s",
3015 describe_flags (old_flags));
3016 error = TRUE;
3017 }
3018 }
3019 else
3020 elf_elfheader (obfd)->e_flags = new_flags & known_flags;
3021 }
3022
3023 if (error)
3024 bfd_set_error (bfd_error_bad_value);
3025
3026 return !error;
3027 }
3028 \f
3029 static bfd_boolean
3030 rx_elf_print_private_bfd_data (bfd * abfd, void * ptr)
3031 {
3032 FILE * file = (FILE *) ptr;
3033 flagword flags;
3034
3035 BFD_ASSERT (abfd != NULL && ptr != NULL);
3036
3037 /* Print normal ELF private data. */
3038 _bfd_elf_print_private_bfd_data (abfd, ptr);
3039
3040 flags = elf_elfheader (abfd)->e_flags;
3041 fprintf (file, _("private flags = 0x%lx:"), (long) flags);
3042
3043 fprintf (file, "%s", describe_flags (flags));
3044 return TRUE;
3045 }
3046
3047 /* Return the MACH for an e_flags value. */
3048
3049 static int
3050 elf32_rx_machine (bfd * abfd ATTRIBUTE_UNUSED)
3051 {
3052 #if 0 /* FIXME: EF_RX_CPU_MASK collides with E_FLAG_RX_...
3053 Need to sort out how these flag bits are used.
3054 For now we assume that the flags are OK. */
3055 if ((elf_elfheader (abfd)->e_flags & EF_RX_CPU_MASK) == EF_RX_CPU_RX)
3056 #endif
3057 return bfd_mach_rx;
3058
3059 return 0;
3060 }
3061
3062 static bfd_boolean
3063 rx_elf_object_p (bfd * abfd)
3064 {
3065 int i;
3066 unsigned int u;
3067 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
3068 int nphdrs = elf_elfheader (abfd)->e_phnum;
3069 sec_ptr bsec;
3070 static int saw_be = FALSE;
3071
3072 /* We never want to automatically choose the non-swapping big-endian
3073 target. The user can only get that explicitly, such as with -I
3074 and objcopy. */
3075 if (abfd->xvec == &bfd_elf32_rx_be_ns_vec
3076 && abfd->target_defaulted)
3077 return FALSE;
3078
3079 /* BFD->target_defaulted is not set to TRUE when a target is chosen
3080 as a fallback, so we check for "scanning" to know when to stop
3081 using the non-swapping target. */
3082 if (abfd->xvec == &bfd_elf32_rx_be_ns_vec
3083 && saw_be)
3084 return FALSE;
3085 if (abfd->xvec == &bfd_elf32_rx_be_vec)
3086 saw_be = TRUE;
3087
3088 bfd_default_set_arch_mach (abfd, bfd_arch_rx,
3089 elf32_rx_machine (abfd));
3090
3091 /* For each PHDR in the object, we must find some section that
3092 corresponds (based on matching file offsets) and use its VMA
3093 information to reconstruct the p_vaddr field we clobbered when we
3094 wrote it out. */
3095 for (i=0; i<nphdrs; i++)
3096 {
3097 for (u=0; u<elf_tdata(abfd)->num_elf_sections; u++)
3098 {
3099 Elf_Internal_Shdr *sec = elf_tdata(abfd)->elf_sect_ptr[u];
3100
3101 if (phdr[i].p_filesz
3102 && phdr[i].p_offset <= (bfd_vma) sec->sh_offset
3103 && (bfd_vma)sec->sh_offset <= phdr[i].p_offset + (phdr[i].p_filesz - 1))
3104 {
3105 /* Found one! The difference between the two addresses,
3106 plus the difference between the two file offsets, is
3107 enough information to reconstruct the lma. */
3108
3109 /* Example where they aren't:
3110 PHDR[1] = lma fffc0100 offset 00002010 size 00000100
3111 SEC[6] = vma 00000050 offset 00002050 size 00000040
3112
3113 The correct LMA for the section is fffc0140 + (2050-2010).
3114 */
3115
3116 phdr[i].p_vaddr = sec->sh_addr + (sec->sh_offset - phdr[i].p_offset);
3117 break;
3118 }
3119 }
3120
3121 /* We must update the bfd sections as well, so we don't stop
3122 with one match. */
3123 bsec = abfd->sections;
3124 while (bsec)
3125 {
3126 if (phdr[i].p_filesz
3127 && phdr[i].p_vaddr <= bsec->vma
3128 && bsec->vma <= phdr[i].p_vaddr + (phdr[i].p_filesz - 1))
3129 {
3130 bsec->lma = phdr[i].p_paddr + (bsec->vma - phdr[i].p_vaddr);
3131 }
3132 bsec = bsec->next;
3133 }
3134 }
3135
3136 return TRUE;
3137 }
3138 \f
3139
3140 #ifdef DEBUG
3141 void
3142 rx_dump_symtab (bfd * abfd, void * internal_syms, void * external_syms)
3143 {
3144 size_t locsymcount;
3145 Elf_Internal_Sym * isymbuf;
3146 Elf_Internal_Sym * isymend;
3147 Elf_Internal_Sym * isym;
3148 Elf_Internal_Shdr * symtab_hdr;
3149 bfd_boolean free_internal = FALSE, free_external = FALSE;
3150 char * st_info_str;
3151 char * st_info_stb_str;
3152 char * st_other_str;
3153 char * st_shndx_str;
3154
3155 if (! internal_syms)
3156 {
3157 internal_syms = bfd_malloc (1000);
3158 free_internal = 1;
3159 }
3160 if (! external_syms)
3161 {
3162 external_syms = bfd_malloc (1000);
3163 free_external = 1;
3164 }
3165
3166 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3167 locsymcount = symtab_hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym;
3168 if (free_internal)
3169 isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
3170 symtab_hdr->sh_info, 0,
3171 internal_syms, external_syms, NULL);
3172 else
3173 isymbuf = internal_syms;
3174 isymend = isymbuf + locsymcount;
3175
3176 for (isym = isymbuf ; isym < isymend ; isym++)
3177 {
3178 switch (ELF_ST_TYPE (isym->st_info))
3179 {
3180 case STT_FUNC: st_info_str = "STT_FUNC";
3181 case STT_SECTION: st_info_str = "STT_SECTION";
3182 case STT_FILE: st_info_str = "STT_FILE";
3183 case STT_OBJECT: st_info_str = "STT_OBJECT";
3184 case STT_TLS: st_info_str = "STT_TLS";
3185 default: st_info_str = "";
3186 }
3187 switch (ELF_ST_BIND (isym->st_info))
3188 {
3189 case STB_LOCAL: st_info_stb_str = "STB_LOCAL";
3190 case STB_GLOBAL: st_info_stb_str = "STB_GLOBAL";
3191 default: st_info_stb_str = "";
3192 }
3193 switch (ELF_ST_VISIBILITY (isym->st_other))
3194 {
3195 case STV_DEFAULT: st_other_str = "STV_DEFAULT";
3196 case STV_INTERNAL: st_other_str = "STV_INTERNAL";
3197 case STV_PROTECTED: st_other_str = "STV_PROTECTED";
3198 default: st_other_str = "";
3199 }
3200 switch (isym->st_shndx)
3201 {
3202 case SHN_ABS: st_shndx_str = "SHN_ABS";
3203 case SHN_COMMON: st_shndx_str = "SHN_COMMON";
3204 case SHN_UNDEF: st_shndx_str = "SHN_UNDEF";
3205 default: st_shndx_str = "";
3206 }
3207
3208 printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
3209 "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
3210 isym,
3211 (unsigned long) isym->st_value,
3212 (unsigned long) isym->st_size,
3213 isym->st_name,
3214 bfd_elf_string_from_elf_section (abfd, symtab_hdr->sh_link,
3215 isym->st_name),
3216 isym->st_info, st_info_str, st_info_stb_str,
3217 isym->st_other, st_other_str,
3218 isym->st_shndx, st_shndx_str);
3219 }
3220 if (free_internal)
3221 free (internal_syms);
3222 if (free_external)
3223 free (external_syms);
3224 }
3225
3226 char *
3227 rx_get_reloc (long reloc)
3228 {
3229 if (0 <= reloc && reloc < R_RX_max)
3230 return rx_elf_howto_table[reloc].name;
3231 return "";
3232 }
3233 #endif /* DEBUG */
3234
3235 \f
3236 /* We must take care to keep the on-disk copy of any code sections
3237 that are fully linked swapped if the target is big endian, to match
3238 the Renesas tools. */
3239
3240 /* The rule is: big endian object that are final-link executables,
3241 have code sections stored with 32-bit words swapped relative to
3242 what you'd get by default. */
3243
3244 static bfd_boolean
3245 rx_get_section_contents (bfd * abfd,
3246 sec_ptr section,
3247 void * location,
3248 file_ptr offset,
3249 bfd_size_type count)
3250 {
3251 int exec = (abfd->flags & EXEC_P) ? 1 : 0;
3252 int s_code = (section->flags & SEC_CODE) ? 1 : 0;
3253 bfd_boolean rv;
3254
3255 #ifdef DJDEBUG
3256 fprintf (stderr, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
3257 (long) offset, (long) count, section->name,
3258 bfd_big_endian(abfd) ? "be" : "le",
3259 exec, s_code, (long unsigned) section->filepos,
3260 (long unsigned) offset);
3261 #endif
3262
3263 if (exec && s_code && bfd_big_endian (abfd))
3264 {
3265 char * cloc = (char *) location;
3266 bfd_size_type cnt, end_cnt;
3267
3268 rv = TRUE;
3269
3270 /* Fetch and swap unaligned bytes at the beginning. */
3271 if (offset % 4)
3272 {
3273 char buf[4];
3274
3275 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3276 (offset & -4), 4);
3277 if (!rv)
3278 return FALSE;
3279
3280 bfd_putb32 (bfd_getl32 (buf), buf);
3281
3282 cnt = 4 - (offset % 4);
3283 if (cnt > count)
3284 cnt = count;
3285
3286 memcpy (location, buf + (offset % 4), cnt);
3287
3288 count -= cnt;
3289 offset += cnt;
3290 cloc += count;
3291 }
3292
3293 end_cnt = count % 4;
3294
3295 /* Fetch and swap the middle bytes. */
3296 if (count >= 4)
3297 {
3298 rv = _bfd_generic_get_section_contents (abfd, section, cloc, offset,
3299 count - end_cnt);
3300 if (!rv)
3301 return FALSE;
3302
3303 for (cnt = count; cnt >= 4; cnt -= 4, cloc += 4)
3304 bfd_putb32 (bfd_getl32 (cloc), cloc);
3305 }
3306
3307 /* Fetch and swap the end bytes. */
3308 if (end_cnt > 0)
3309 {
3310 char buf[4];
3311
3312 /* Fetch the end bytes. */
3313 rv = _bfd_generic_get_section_contents (abfd, section, buf,
3314 offset + count - end_cnt, 4);
3315 if (!rv)
3316 return FALSE;
3317
3318 bfd_putb32 (bfd_getl32 (buf), buf);
3319 memcpy (cloc, buf, end_cnt);
3320 }
3321 }
3322 else
3323 rv = _bfd_generic_get_section_contents (abfd, section, location, offset, count);
3324
3325 return rv;
3326 }
3327
3328 #ifdef DJDEBUG
3329 static bfd_boolean
3330 rx2_set_section_contents (bfd * abfd,
3331 sec_ptr section,
3332 const void * location,
3333 file_ptr offset,
3334 bfd_size_type count)
3335 {
3336 bfd_size_type i;
3337
3338 fprintf (stderr, " set sec %s %08x loc %p offset %#x count %#x\n",
3339 section->name, (unsigned) section->vma, location, (int) offset, (int) count);
3340 for (i = 0; i < count; i++)
3341 {
3342 if (i % 16 == 0 && i > 0)
3343 fprintf (stderr, "\n");
3344
3345 if (i % 16 && i % 4 == 0)
3346 fprintf (stderr, " ");
3347
3348 if (i % 16 == 0)
3349 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3350
3351 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3352 }
3353 fprintf (stderr, "\n");
3354
3355 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3356 }
3357 #define _bfd_elf_set_section_contents rx2_set_section_contents
3358 #endif
3359
3360 static bfd_boolean
3361 rx_set_section_contents (bfd * abfd,
3362 sec_ptr section,
3363 const void * location,
3364 file_ptr offset,
3365 bfd_size_type count)
3366 {
3367 bfd_boolean exec = (abfd->flags & EXEC_P) ? TRUE : FALSE;
3368 bfd_boolean s_code = (section->flags & SEC_CODE) ? TRUE : FALSE;
3369 bfd_boolean rv;
3370 char * swapped_data = NULL;
3371 bfd_size_type i;
3372 bfd_vma caddr = section->vma + offset;
3373 file_ptr faddr = 0;
3374 bfd_size_type scount;
3375
3376 #ifdef DJDEBUG
3377 bfd_size_type i;
3378
3379 fprintf (stderr, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
3380 (long) offset, (long) count, section->name,
3381 bfd_big_endian (abfd) ? "be" : "le",
3382 exec, s_code);
3383
3384 for (i = 0; i < count; i++)
3385 {
3386 int a = section->vma + offset + i;
3387
3388 if (a % 16 == 0 && a > 0)
3389 fprintf (stderr, "\n");
3390
3391 if (a % 16 && a % 4 == 0)
3392 fprintf (stderr, " ");
3393
3394 if (a % 16 == 0 || i == 0)
3395 fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
3396
3397 fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
3398 }
3399
3400 fprintf (stderr, "\n");
3401 #endif
3402
3403 if (! exec || ! s_code || ! bfd_big_endian (abfd))
3404 return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
3405
3406 while (count > 0 && caddr > 0 && caddr % 4)
3407 {
3408 switch (caddr % 4)
3409 {
3410 case 0: faddr = offset + 3; break;
3411 case 1: faddr = offset + 1; break;
3412 case 2: faddr = offset - 1; break;
3413 case 3: faddr = offset - 3; break;
3414 }
3415
3416 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3417 if (! rv)
3418 return rv;
3419
3420 location ++;
3421 offset ++;
3422 count --;
3423 caddr ++;
3424 }
3425
3426 scount = (int)(count / 4) * 4;
3427 if (scount > 0)
3428 {
3429 char * cloc = (char *) location;
3430
3431 swapped_data = (char *) bfd_alloc (abfd, count);
3432
3433 for (i = 0; i < count; i += 4)
3434 {
3435 bfd_vma v = bfd_getl32 (cloc + i);
3436 bfd_putb32 (v, swapped_data + i);
3437 }
3438
3439 rv = _bfd_elf_set_section_contents (abfd, section, swapped_data, offset, scount);
3440
3441 if (!rv)
3442 return rv;
3443 }
3444
3445 count -= scount;
3446 location += scount;
3447 offset += scount;
3448
3449 if (count > 0)
3450 {
3451 caddr = section->vma + offset;
3452 while (count > 0)
3453 {
3454 switch (caddr % 4)
3455 {
3456 case 0: faddr = offset + 3; break;
3457 case 1: faddr = offset + 1; break;
3458 case 2: faddr = offset - 1; break;
3459 case 3: faddr = offset - 3; break;
3460 }
3461 rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
3462 if (! rv)
3463 return rv;
3464
3465 location ++;
3466 offset ++;
3467 count --;
3468 caddr ++;
3469 }
3470 }
3471
3472 return TRUE;
3473 }
3474
3475 static bfd_boolean
3476 rx_final_link (bfd * abfd, struct bfd_link_info * info)
3477 {
3478 asection * o;
3479
3480 for (o = abfd->sections; o != NULL; o = o->next)
3481 {
3482 #ifdef DJDEBUG
3483 fprintf (stderr, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
3484 o->name, o->flags, o->vma, o->lma, o->size, o->rawsize);
3485 #endif
3486 if (o->flags & SEC_CODE
3487 && bfd_big_endian (abfd)
3488 && o->size % 4)
3489 {
3490 #ifdef DJDEBUG
3491 fprintf (stderr, "adjusting...\n");
3492 #endif
3493 o->size += 4 - (o->size % 4);
3494 }
3495 }
3496
3497 return bfd_elf_final_link (abfd, info);
3498 }
3499
3500 static bfd_boolean
3501 elf32_rx_modify_program_headers (bfd * abfd ATTRIBUTE_UNUSED,
3502 struct bfd_link_info * info ATTRIBUTE_UNUSED)
3503 {
3504 const struct elf_backend_data * bed;
3505 struct elf_obj_tdata * tdata;
3506 Elf_Internal_Phdr * phdr;
3507 unsigned int count;
3508 unsigned int i;
3509
3510 bed = get_elf_backend_data (abfd);
3511 tdata = elf_tdata (abfd);
3512 phdr = tdata->phdr;
3513 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
3514
3515 if (ignore_lma)
3516 for (i = count; i-- != 0;)
3517 if (phdr[i].p_type == PT_LOAD)
3518 {
3519 /* The Renesas tools expect p_paddr to be zero. However,
3520 there is no other way to store the writable data in ROM for
3521 startup initialization. So, we let the linker *think*
3522 we're using paddr and vaddr the "usual" way, but at the
3523 last minute we move the paddr into the vaddr (which is what
3524 the simulator uses) and zero out paddr. Note that this
3525 does not affect the section headers, just the program
3526 headers. We hope. */
3527 phdr[i].p_vaddr = phdr[i].p_paddr;
3528 #if 0 /* If we zero out p_paddr, then the LMA in the section table
3529 becomes wrong. */
3530 phdr[i].p_paddr = 0;
3531 #endif
3532 }
3533
3534 return TRUE;
3535 }
3536
3537 /* The default literal sections should always be marked as "code" (i.e.,
3538 SHF_EXECINSTR). This is particularly important for big-endian mode
3539 when we do not want their contents byte reversed. */
3540 static const struct bfd_elf_special_section elf32_rx_special_sections[] =
3541 {
3542 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3543 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3544 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
3545 { NULL, 0, 0, 0, 0 }
3546 };
3547 \f
3548 #define ELF_ARCH bfd_arch_rx
3549 #define ELF_MACHINE_CODE EM_RX
3550 #define ELF_MAXPAGESIZE 0x1000
3551
3552 #define TARGET_BIG_SYM bfd_elf32_rx_be_vec
3553 #define TARGET_BIG_NAME "elf32-rx-be"
3554
3555 #define TARGET_LITTLE_SYM bfd_elf32_rx_le_vec
3556 #define TARGET_LITTLE_NAME "elf32-rx-le"
3557
3558 #define elf_info_to_howto_rel NULL
3559 #define elf_info_to_howto rx_info_to_howto_rela
3560 #define elf_backend_object_p rx_elf_object_p
3561 #define elf_backend_relocate_section rx_elf_relocate_section
3562 #define elf_symbol_leading_char ('_')
3563 #define elf_backend_can_gc_sections 1
3564 #define elf_backend_modify_program_headers elf32_rx_modify_program_headers
3565
3566 #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
3567 #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
3568 #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
3569 #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
3570 #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
3571 #define bfd_elf32_get_section_contents rx_get_section_contents
3572 #define bfd_elf32_set_section_contents rx_set_section_contents
3573 #define bfd_elf32_bfd_final_link rx_final_link
3574 #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
3575 #define elf_backend_special_sections elf32_rx_special_sections
3576
3577 #include "elf32-target.h"
3578
3579 /* We define a second big-endian target that doesn't have the custom
3580 section get/set hooks, for times when we want to preserve the
3581 pre-swapped .text sections (like objcopy). */
3582
3583 #undef TARGET_BIG_SYM
3584 #define TARGET_BIG_SYM bfd_elf32_rx_be_ns_vec
3585 #undef TARGET_BIG_NAME
3586 #define TARGET_BIG_NAME "elf32-rx-be-ns"
3587 #undef TARGET_LITTLE_SYM
3588
3589 #undef bfd_elf32_get_section_contents
3590 #undef bfd_elf32_set_section_contents
3591
3592 #undef elf32_bed
3593 #define elf32_bed elf32_rx_be_ns_bed
3594
3595 #include "elf32-target.h"
This page took 0.104628 seconds and 5 git commands to generate.