This commit was generated by cvs2svn to track changes on a CVS vendor
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
4 Written by Steve Chamberlain, <sac@cygnus.com>.
5 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6
7 This file is part of BFD, the Binary File Descriptor library.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22
23 #include "bfd.h"
24 #include "sysdep.h"
25 #include "obstack.h"
26 #include "libbfd.h"
27 #include "bfdlink.h"
28 #include "coff/sh.h"
29 #include "coff/internal.h"
30 #include "libcoff.h"
31
32 /* Internal functions. */
33 static bfd_reloc_status_type sh_reloc
34 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
35 static long get_symbol_value PARAMS ((asymbol *));
36 static boolean sh_relax_section
37 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
38 static boolean sh_relax_delete_bytes
39 PARAMS ((bfd *, asection *, bfd_vma, int));
40 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
41 static boolean sh_align_loads
42 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
43 static boolean sh_swap_insns
44 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, bfd_vma));
45 static boolean sh_relocate_section
46 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
47 struct internal_reloc *, struct internal_syment *, asection **));
48 static bfd_byte *sh_coff_get_relocated_section_contents
49 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
50 bfd_byte *, boolean, asymbol **));
51
52 /* Default section alignment to 2**2. */
53 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER (2)
54
55 /* Generate long file names. */
56 #define COFF_LONG_FILENAMES
57
58 /* The supported relocations. There are a lot of relocations defined
59 in coff/internal.h which we do not expect to ever see. */
60 static reloc_howto_type sh_coff_howtos[] =
61 {
62 { 0 },
63 { 1 },
64 { 2 },
65 { 3 }, /* R_SH_PCREL8 */
66 { 4 }, /* R_SH_PCREL16 */
67 { 5 }, /* R_SH_HIGH8 */
68 { 6 }, /* R_SH_IMM24 */
69 { 7 }, /* R_SH_LOW16 */
70 { 8 },
71 { 9 }, /* R_SH_PCDISP8BY4 */
72
73 HOWTO (R_SH_PCDISP8BY2, /* type */
74 1, /* rightshift */
75 1, /* size (0 = byte, 1 = short, 2 = long) */
76 8, /* bitsize */
77 true, /* pc_relative */
78 0, /* bitpos */
79 complain_overflow_signed, /* complain_on_overflow */
80 sh_reloc, /* special_function */
81 "r_pcdisp8by2", /* name */
82 true, /* partial_inplace */
83 0xff, /* src_mask */
84 0xff, /* dst_mask */
85 true), /* pcrel_offset */
86
87 { 11 }, /* R_SH_PCDISP8 */
88
89 HOWTO (R_SH_PCDISP, /* type */
90 1, /* rightshift */
91 1, /* size (0 = byte, 1 = short, 2 = long) */
92 12, /* bitsize */
93 true, /* pc_relative */
94 0, /* bitpos */
95 complain_overflow_signed, /* complain_on_overflow */
96 sh_reloc, /* special_function */
97 "r_pcdisp12by2", /* name */
98 true, /* partial_inplace */
99 0xfff, /* src_mask */
100 0xfff, /* dst_mask */
101 true), /* pcrel_offset */
102
103 { 13 },
104
105 HOWTO (R_SH_IMM32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 false, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield, /* complain_on_overflow */
112 sh_reloc, /* special_function */
113 "r_imm32", /* name */
114 true, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 false), /* pcrel_offset */
118
119 { 15 },
120 { 16 }, /* R_SH_IMM8 */
121 { 17 }, /* R_SH_IMM8BY2 */
122 { 18 }, /* R_SH_IMM8BY4 */
123 { 19 }, /* R_SH_IMM4 */
124 { 20 }, /* R_SH_IMM4BY2 */
125 { 21 }, /* R_SH_IMM4BY4 */
126
127 HOWTO (R_SH_PCRELIMM8BY2, /* type */
128 1, /* rightshift */
129 1, /* size (0 = byte, 1 = short, 2 = long) */
130 8, /* bitsize */
131 true, /* pc_relative */
132 0, /* bitpos */
133 complain_overflow_unsigned, /* complain_on_overflow */
134 sh_reloc, /* special_function */
135 "r_pcrelimm8by2", /* name */
136 true, /* partial_inplace */
137 0xff, /* src_mask */
138 0xff, /* dst_mask */
139 true), /* pcrel_offset */
140
141 HOWTO (R_SH_PCRELIMM8BY4, /* type */
142 2, /* rightshift */
143 1, /* size (0 = byte, 1 = short, 2 = long) */
144 8, /* bitsize */
145 true, /* pc_relative */
146 0, /* bitpos */
147 complain_overflow_unsigned, /* complain_on_overflow */
148 sh_reloc, /* special_function */
149 "r_pcrelimm8by4", /* name */
150 true, /* partial_inplace */
151 0xff, /* src_mask */
152 0xff, /* dst_mask */
153 true), /* pcrel_offset */
154
155 HOWTO (R_SH_IMM16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 false, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield, /* complain_on_overflow */
162 sh_reloc, /* special_function */
163 "r_imm16", /* name */
164 true, /* partial_inplace */
165 0xffff, /* src_mask */
166 0xffff, /* dst_mask */
167 false), /* pcrel_offset */
168
169 HOWTO (R_SH_SWITCH16, /* type */
170 0, /* rightshift */
171 1, /* size (0 = byte, 1 = short, 2 = long) */
172 16, /* bitsize */
173 false, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield, /* complain_on_overflow */
176 sh_reloc, /* special_function */
177 "r_switch16", /* name */
178 true, /* partial_inplace */
179 0xffff, /* src_mask */
180 0xffff, /* dst_mask */
181 false), /* pcrel_offset */
182
183 HOWTO (R_SH_SWITCH32, /* type */
184 0, /* rightshift */
185 2, /* size (0 = byte, 1 = short, 2 = long) */
186 32, /* bitsize */
187 false, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield, /* complain_on_overflow */
190 sh_reloc, /* special_function */
191 "r_switch32", /* name */
192 true, /* partial_inplace */
193 0xffffffff, /* src_mask */
194 0xffffffff, /* dst_mask */
195 false), /* pcrel_offset */
196
197 HOWTO (R_SH_USES, /* type */
198 0, /* rightshift */
199 1, /* size (0 = byte, 1 = short, 2 = long) */
200 16, /* bitsize */
201 false, /* pc_relative */
202 0, /* bitpos */
203 complain_overflow_bitfield, /* complain_on_overflow */
204 sh_reloc, /* special_function */
205 "r_uses", /* name */
206 true, /* partial_inplace */
207 0xffff, /* src_mask */
208 0xffff, /* dst_mask */
209 false), /* pcrel_offset */
210
211 HOWTO (R_SH_COUNT, /* type */
212 0, /* rightshift */
213 2, /* size (0 = byte, 1 = short, 2 = long) */
214 32, /* bitsize */
215 false, /* pc_relative */
216 0, /* bitpos */
217 complain_overflow_bitfield, /* complain_on_overflow */
218 sh_reloc, /* special_function */
219 "r_count", /* name */
220 true, /* partial_inplace */
221 0xffffffff, /* src_mask */
222 0xffffffff, /* dst_mask */
223 false), /* pcrel_offset */
224
225 HOWTO (R_SH_ALIGN, /* type */
226 0, /* rightshift */
227 2, /* size (0 = byte, 1 = short, 2 = long) */
228 32, /* bitsize */
229 false, /* pc_relative */
230 0, /* bitpos */
231 complain_overflow_bitfield, /* complain_on_overflow */
232 sh_reloc, /* special_function */
233 "r_align", /* name */
234 true, /* partial_inplace */
235 0xffffffff, /* src_mask */
236 0xffffffff, /* dst_mask */
237 false), /* pcrel_offset */
238
239 HOWTO (R_SH_CODE, /* type */
240 0, /* rightshift */
241 2, /* size (0 = byte, 1 = short, 2 = long) */
242 32, /* bitsize */
243 false, /* pc_relative */
244 0, /* bitpos */
245 complain_overflow_bitfield, /* complain_on_overflow */
246 sh_reloc, /* special_function */
247 "r_code", /* name */
248 true, /* partial_inplace */
249 0xffffffff, /* src_mask */
250 0xffffffff, /* dst_mask */
251 false), /* pcrel_offset */
252
253 HOWTO (R_SH_DATA, /* type */
254 0, /* rightshift */
255 2, /* size (0 = byte, 1 = short, 2 = long) */
256 32, /* bitsize */
257 false, /* pc_relative */
258 0, /* bitpos */
259 complain_overflow_bitfield, /* complain_on_overflow */
260 sh_reloc, /* special_function */
261 "r_data", /* name */
262 true, /* partial_inplace */
263 0xffffffff, /* src_mask */
264 0xffffffff, /* dst_mask */
265 false), /* pcrel_offset */
266
267 HOWTO (R_SH_LABEL, /* type */
268 0, /* rightshift */
269 2, /* size (0 = byte, 1 = short, 2 = long) */
270 32, /* bitsize */
271 false, /* pc_relative */
272 0, /* bitpos */
273 complain_overflow_bitfield, /* complain_on_overflow */
274 sh_reloc, /* special_function */
275 "r_label", /* name */
276 true, /* partial_inplace */
277 0xffffffff, /* src_mask */
278 0xffffffff, /* dst_mask */
279 false) /* pcrel_offset */
280 };
281
282 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
283
284 /* Check for a bad magic number. */
285 #define BADMAG(x) SHBADMAG(x)
286
287 /* Customize coffcode.h (this is not currently used). */
288 #define SH 1
289
290 /* FIXME: This should not be set here. */
291 #define __A_MAGIC_SET__
292
293 /* Swap the r_offset field in and out. */
294 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
295 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
296
297 /* Swap out extra information in the reloc structure. */
298 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
299 do \
300 { \
301 dst->r_stuff[0] = 'S'; \
302 dst->r_stuff[1] = 'C'; \
303 } \
304 while (0)
305
306 /* Get the value of a symbol, when performing a relocation. */
307
308 static long
309 get_symbol_value (symbol)
310 asymbol *symbol;
311 {
312 bfd_vma relocation;
313
314 if (bfd_is_com_section (symbol->section))
315 relocation = 0;
316 else
317 relocation = (symbol->value +
318 symbol->section->output_section->vma +
319 symbol->section->output_offset);
320
321 return relocation;
322 }
323
324 /* This macro is used in coffcode.h to get the howto corresponding to
325 an internal reloc. */
326
327 #define RTYPE2HOWTO(relent, internal) \
328 ((relent)->howto = \
329 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
330 ? &sh_coff_howtos[(internal)->r_type] \
331 : (reloc_howto_type *) NULL))
332
333 /* This is the same as the macro in coffcode.h, except that it copies
334 r_offset into reloc_entry->addend for some relocs. */
335 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
336 { \
337 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
338 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
339 coffsym = (obj_symbols (abfd) \
340 + (cache_ptr->sym_ptr_ptr - symbols)); \
341 else if (ptr) \
342 coffsym = coff_symbol_from (abfd, ptr); \
343 if (coffsym != (coff_symbol_type *) NULL \
344 && coffsym->native->u.syment.n_scnum == 0) \
345 cache_ptr->addend = 0; \
346 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
347 && ptr->section != (asection *) NULL) \
348 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
349 else \
350 cache_ptr->addend = 0; \
351 if ((reloc).r_type == R_SH_SWITCH16 \
352 || (reloc).r_type == R_SH_SWITCH32 \
353 || (reloc).r_type == R_SH_USES \
354 || (reloc).r_type == R_SH_COUNT \
355 || (reloc).r_type == R_SH_ALIGN) \
356 cache_ptr->addend = (reloc).r_offset; \
357 }
358
359 /* This is the howto function for the SH relocations. */
360
361 static bfd_reloc_status_type
362 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
363 error_message)
364 bfd *abfd;
365 arelent *reloc_entry;
366 asymbol *symbol_in;
367 PTR data;
368 asection *input_section;
369 bfd *output_bfd;
370 char **error_message;
371 {
372 unsigned long insn;
373 bfd_vma sym_value;
374 unsigned short r_type;
375 bfd_vma addr = reloc_entry->address;
376 bfd_byte *hit_data = addr + (bfd_byte *) data;
377
378 r_type = reloc_entry->howto->type;
379
380 if (output_bfd != NULL)
381 {
382 /* Partial linking--do nothing. */
383 reloc_entry->address += input_section->output_offset;
384 return bfd_reloc_ok;
385 }
386
387 /* Almost all relocs have to do with relaxing. If any work must be
388 done for them, it has been done in sh_relax_section. */
389 if (r_type != R_SH_IMM32
390 && (r_type != R_SH_PCDISP
391 || (symbol_in->flags & BSF_LOCAL) != 0))
392 return bfd_reloc_ok;
393
394 if (symbol_in != NULL
395 && bfd_is_und_section (symbol_in->section))
396 return bfd_reloc_undefined;
397
398 sym_value = get_symbol_value (symbol_in);
399
400 switch (r_type)
401 {
402 case R_SH_IMM32:
403 insn = bfd_get_32 (abfd, hit_data);
404 insn += sym_value + reloc_entry->addend;
405 bfd_put_32 (abfd, insn, hit_data);
406 break;
407 case R_SH_PCDISP:
408 insn = bfd_get_16 (abfd, hit_data);
409 sym_value += reloc_entry->addend;
410 sym_value -= (input_section->output_section->vma
411 + input_section->output_offset
412 + addr
413 + 4);
414 sym_value += (insn & 0xfff) << 1;
415 if (insn & 0x800)
416 sym_value -= 0x1000;
417 insn = (insn & 0xf000) | (sym_value & 0xfff);
418 bfd_put_16 (abfd, insn, hit_data);
419 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
420 return bfd_reloc_overflow;
421 break;
422 default:
423 abort ();
424 break;
425 }
426
427 return bfd_reloc_ok;
428 }
429
430 /* We can do relaxing. */
431 #define coff_bfd_relax_section sh_relax_section
432
433 /* We use the special COFF backend linker. */
434 #define coff_relocate_section sh_relocate_section
435
436 /* When relaxing, we need to use special code to get the relocated
437 section contents. */
438 #define coff_bfd_get_relocated_section_contents \
439 sh_coff_get_relocated_section_contents
440
441 #include "coffcode.h"
442 \f
443 /* This function handles relaxing on the SH.
444
445 Function calls on the SH look like this:
446
447 movl L1,r0
448 ...
449 jsr @r0
450 ...
451 L1:
452 .long function
453
454 The compiler and assembler will cooperate to create R_SH_USES
455 relocs on the jsr instructions. The r_offset field of the
456 R_SH_USES reloc is the PC relative offset to the instruction which
457 loads the register (the r_offset field is computed as though it
458 were a jump instruction, so the offset value is actually from four
459 bytes past the instruction). The linker can use this reloc to
460 determine just which function is being called, and thus decide
461 whether it is possible to replace the jsr with a bsr.
462
463 If multiple function calls are all based on a single register load
464 (i.e., the same function is called multiple times), the compiler
465 guarantees that each function call will have an R_SH_USES reloc.
466 Therefore, if the linker is able to convert each R_SH_USES reloc
467 which refers to that address, it can safely eliminate the register
468 load.
469
470 When the assembler creates an R_SH_USES reloc, it examines it to
471 determine which address is being loaded (L1 in the above example).
472 It then counts the number of references to that address, and
473 creates an R_SH_COUNT reloc at that address. The r_offset field of
474 the R_SH_COUNT reloc will be the number of references. If the
475 linker is able to eliminate a register load, it can use the
476 R_SH_COUNT reloc to see whether it can also eliminate the function
477 address.
478
479 SH relaxing also handles another, unrelated, matter. On the SH, if
480 a load or store instruction is not aligned on a four byte boundary,
481 the memory cycle interferes with the 32 bit instruction fetch,
482 causing a one cycle bubble in the pipeline. Therefore, we try to
483 align load and store instructions on four byte boundaries if we
484 can, by swapping them with one of the adjacent instructions. */
485
486 static boolean
487 sh_relax_section (abfd, sec, link_info, again)
488 bfd *abfd;
489 asection *sec;
490 struct bfd_link_info *link_info;
491 boolean *again;
492 {
493 struct internal_reloc *internal_relocs;
494 struct internal_reloc *free_relocs = NULL;
495 boolean have_code;
496 struct internal_reloc *irel, *irelend;
497 bfd_byte *contents = NULL;
498 bfd_byte *free_contents = NULL;
499
500 *again = false;
501
502 if (link_info->relocateable
503 || (sec->flags & SEC_RELOC) == 0
504 || sec->reloc_count == 0)
505 return true;
506
507 /* If this is the first time we have been called for this section,
508 initialize the cooked size. */
509 if (sec->_cooked_size == 0)
510 sec->_cooked_size = sec->_raw_size;
511
512 internal_relocs = (_bfd_coff_read_internal_relocs
513 (abfd, sec, link_info->keep_memory,
514 (bfd_byte *) NULL, false,
515 (struct internal_reloc *) NULL));
516 if (internal_relocs == NULL)
517 goto error_return;
518 if (! link_info->keep_memory)
519 free_relocs = internal_relocs;
520
521 have_code = false;
522
523 irelend = internal_relocs + sec->reloc_count;
524 for (irel = internal_relocs; irel < irelend; irel++)
525 {
526 bfd_vma laddr, paddr, symval;
527 unsigned short insn;
528 struct internal_reloc *irelfn, *irelscan, *irelcount;
529 struct internal_syment sym;
530 bfd_signed_vma foff;
531
532 if (irel->r_type == R_SH_CODE)
533 have_code = true;
534
535 if (irel->r_type != R_SH_USES)
536 continue;
537
538 /* Get the section contents. */
539 if (contents == NULL)
540 {
541 if (coff_section_data (abfd, sec) != NULL
542 && coff_section_data (abfd, sec)->contents != NULL)
543 contents = coff_section_data (abfd, sec)->contents;
544 else
545 {
546 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
547 if (contents == NULL)
548 goto error_return;
549 free_contents = contents;
550
551 if (! bfd_get_section_contents (abfd, sec, contents,
552 (file_ptr) 0, sec->_raw_size))
553 goto error_return;
554 }
555 }
556
557 /* The r_offset field of the R_SH_USES reloc will point us to
558 the register load. The 4 is because the r_offset field is
559 computed as though it were a jump offset, which are based
560 from 4 bytes after the jump instruction. */
561 laddr = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
562 if (laddr >= sec->_raw_size)
563 {
564 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
565 bfd_get_filename (abfd),
566 (unsigned long) irel->r_vaddr);
567 continue;
568 }
569 insn = bfd_get_16 (abfd, contents + laddr);
570
571 /* If the instruction is not mov.l NN,rN, we don't know what to
572 do. */
573 if ((insn & 0xf000) != 0xd000)
574 {
575 ((*_bfd_error_handler)
576 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
577 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
578 continue;
579 }
580
581 /* Get the address from which the register is being loaded. The
582 displacement in the mov.l instruction is quadrupled. It is a
583 displacement from four bytes after the movl instruction, but,
584 before adding in the PC address, two least significant bits
585 of the PC are cleared. We assume that the section is aligned
586 on a four byte boundary. */
587 paddr = insn & 0xff;
588 paddr *= 4;
589 paddr += (laddr + 4) &~ 3;
590 if (paddr >= sec->_raw_size)
591 {
592 ((*_bfd_error_handler)
593 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
594 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
595 continue;
596 }
597
598 /* Get the reloc for the address from which the register is
599 being loaded. This reloc will tell us which function is
600 actually being called. */
601 paddr += sec->vma;
602 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
603 if (irelfn->r_vaddr == paddr
604 && irelfn->r_type == R_SH_IMM32)
605 break;
606 if (irelfn >= irelend)
607 {
608 ((*_bfd_error_handler)
609 ("%s: 0x%lx: warning: could not find expected reloc",
610 bfd_get_filename (abfd), (unsigned long) paddr));
611 continue;
612 }
613
614 /* Get the value of the symbol referred to by the reloc. */
615 if (! _bfd_coff_get_external_symbols (abfd))
616 goto error_return;
617 bfd_coff_swap_sym_in (abfd,
618 ((bfd_byte *) obj_coff_external_syms (abfd)
619 + (irelfn->r_symndx
620 * bfd_coff_symesz (abfd))),
621 &sym);
622 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
623 {
624 ((*_bfd_error_handler)
625 ("%s: 0x%lx: warning: symbol in unexpected section",
626 bfd_get_filename (abfd), (unsigned long) paddr));
627 continue;
628 }
629
630 if (sym.n_sclass != C_EXT)
631 {
632 symval = (sym.n_value
633 - sec->vma
634 + sec->output_section->vma
635 + sec->output_offset);
636 }
637 else
638 {
639 struct coff_link_hash_entry *h;
640
641 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
642 BFD_ASSERT (h != NULL);
643 if (h->root.type != bfd_link_hash_defined
644 && h->root.type != bfd_link_hash_defweak)
645 {
646 /* This appears to be a reference to an undefined
647 symbol. Just ignore it--it will be caught by the
648 regular reloc processing. */
649 continue;
650 }
651
652 symval = (h->root.u.def.value
653 + h->root.u.def.section->output_section->vma
654 + h->root.u.def.section->output_offset);
655 }
656
657 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
658
659 /* See if this function call can be shortened. */
660 foff = (symval
661 - (irel->r_vaddr
662 - sec->vma
663 + sec->output_section->vma
664 + sec->output_offset
665 + 4));
666 if (foff < -0x1000 || foff >= 0x1000)
667 {
668 /* After all that work, we can't shorten this function call. */
669 continue;
670 }
671
672 /* Shorten the function call. */
673
674 /* For simplicity of coding, we are going to modify the section
675 contents, the section relocs, and the BFD symbol table. We
676 must tell the rest of the code not to free up this
677 information. It would be possible to instead create a table
678 of changes which have to be made, as is done in coff-mips.c;
679 that would be more work, but would require less memory when
680 the linker is run. */
681
682 if (coff_section_data (abfd, sec) == NULL)
683 {
684 sec->used_by_bfd =
685 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
686 if (sec->used_by_bfd == NULL)
687 goto error_return;
688 }
689
690 coff_section_data (abfd, sec)->relocs = internal_relocs;
691 coff_section_data (abfd, sec)->keep_relocs = true;
692 free_relocs = NULL;
693
694 coff_section_data (abfd, sec)->contents = contents;
695 coff_section_data (abfd, sec)->keep_contents = true;
696 free_contents = NULL;
697
698 obj_coff_keep_syms (abfd) = true;
699
700 /* Replace the jsr with a bsr. */
701
702 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
703 replace the jsr with a bsr. */
704 irel->r_type = R_SH_PCDISP;
705 irel->r_symndx = irelfn->r_symndx;
706 if (sym.n_sclass != C_EXT)
707 {
708 /* If this needs to be changed because of future relaxing,
709 it will be handled here like other internal PCDISP
710 relocs. */
711 bfd_put_16 (abfd,
712 0xb000 | ((foff >> 1) & 0xfff),
713 contents + irel->r_vaddr - sec->vma);
714 }
715 else
716 {
717 /* We can't fully resolve this yet, because the external
718 symbol value may be changed by future relaxing. We let
719 the final link phase handle it. */
720 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
721 }
722
723 /* See if there is another R_SH_USES reloc referring to the same
724 register load. */
725 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
726 if (irelscan->r_type == R_SH_USES
727 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
728 break;
729 if (irelscan < irelend)
730 {
731 /* Some other function call depends upon this register load,
732 and we have not yet converted that function call.
733 Indeed, we may never be able to convert it. There is
734 nothing else we can do at this point. */
735 continue;
736 }
737
738 /* Look for a R_SH_COUNT reloc on the location where the
739 function address is stored. Do this before deleting any
740 bytes, to avoid confusion about the address. */
741 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
742 if (irelcount->r_vaddr == paddr
743 && irelcount->r_type == R_SH_COUNT)
744 break;
745
746 /* Delete the register load. */
747 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
748 goto error_return;
749
750 /* That will change things, so, just in case it permits some
751 other function call to come within range, we should relax
752 again. Note that this is not required, and it may be slow. */
753 *again = true;
754
755 /* Now check whether we got a COUNT reloc. */
756 if (irelcount >= irelend)
757 {
758 ((*_bfd_error_handler)
759 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
760 bfd_get_filename (abfd), (unsigned long) paddr));
761 continue;
762 }
763
764 /* The number of uses is stored in the r_offset field. We've
765 just deleted one. */
766 if (irelcount->r_offset == 0)
767 {
768 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
769 bfd_get_filename (abfd),
770 (unsigned long) paddr));
771 continue;
772 }
773
774 --irelcount->r_offset;
775
776 /* If there are no more uses, we can delete the address. Reload
777 the address from irelfn, in case it was changed by the
778 previous call to sh_relax_delete_bytes. */
779 if (irelcount->r_offset == 0)
780 {
781 if (! sh_relax_delete_bytes (abfd, sec,
782 irelfn->r_vaddr - sec->vma, 4))
783 goto error_return;
784 }
785
786 /* We've done all we can with that function call. */
787 }
788
789 /* Look for load and store instructions that we can align on four
790 byte boundaries. */
791 if (have_code)
792 {
793 boolean swapped;
794
795 /* Get the section contents. */
796 if (contents == NULL)
797 {
798 if (coff_section_data (abfd, sec) != NULL
799 && coff_section_data (abfd, sec)->contents != NULL)
800 contents = coff_section_data (abfd, sec)->contents;
801 else
802 {
803 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
804 if (contents == NULL)
805 goto error_return;
806 free_contents = contents;
807
808 if (! bfd_get_section_contents (abfd, sec, contents,
809 (file_ptr) 0, sec->_raw_size))
810 goto error_return;
811 }
812 }
813
814 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
815 goto error_return;
816
817 if (swapped)
818 {
819 if (coff_section_data (abfd, sec) == NULL)
820 {
821 sec->used_by_bfd =
822 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
823 if (sec->used_by_bfd == NULL)
824 goto error_return;
825 }
826
827 coff_section_data (abfd, sec)->relocs = internal_relocs;
828 coff_section_data (abfd, sec)->keep_relocs = true;
829 free_relocs = NULL;
830
831 coff_section_data (abfd, sec)->contents = contents;
832 coff_section_data (abfd, sec)->keep_contents = true;
833 free_contents = NULL;
834
835 obj_coff_keep_syms (abfd) = true;
836 }
837 }
838
839 if (free_relocs != NULL)
840 {
841 free (free_relocs);
842 free_relocs = NULL;
843 }
844
845 if (free_contents != NULL)
846 {
847 if (! link_info->keep_memory)
848 free (free_contents);
849 else
850 {
851 /* Cache the section contents for coff_link_input_bfd. */
852 if (coff_section_data (abfd, sec) == NULL)
853 {
854 sec->used_by_bfd =
855 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
856 if (sec->used_by_bfd == NULL)
857 goto error_return;
858 coff_section_data (abfd, sec)->relocs = NULL;
859 }
860 coff_section_data (abfd, sec)->contents = contents;
861 }
862 }
863
864 return true;
865
866 error_return:
867 if (free_relocs != NULL)
868 free (free_relocs);
869 if (free_contents != NULL)
870 free (free_contents);
871 return false;
872 }
873
874 /* Delete some bytes from a section while relaxing. */
875
876 static boolean
877 sh_relax_delete_bytes (abfd, sec, addr, count)
878 bfd *abfd;
879 asection *sec;
880 bfd_vma addr;
881 int count;
882 {
883 bfd_byte *contents;
884 struct internal_reloc *irel, *irelend;
885 struct internal_reloc *irelalign;
886 bfd_vma toaddr;
887 bfd_byte *esym, *esymend;
888 bfd_size_type symesz;
889 struct coff_link_hash_entry **sym_hash;
890 asection *o;
891
892 contents = coff_section_data (abfd, sec)->contents;
893
894 /* The deletion must stop at the next ALIGN reloc for an aligment
895 power larger than the number of bytes we are deleting. */
896
897 irelalign = NULL;
898 toaddr = sec->_cooked_size;
899
900 irel = coff_section_data (abfd, sec)->relocs;
901 irelend = irel + sec->reloc_count;
902 for (; irel < irelend; irel++)
903 {
904 if (irel->r_type == R_SH_ALIGN
905 && irel->r_vaddr - sec->vma > addr
906 && count < (1 << irel->r_offset))
907 {
908 irelalign = irel;
909 toaddr = irel->r_vaddr - sec->vma;
910 break;
911 }
912 }
913
914 /* Actually delete the bytes. */
915 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
916 if (irelalign == NULL)
917 sec->_cooked_size -= count;
918 else
919 {
920 int i;
921
922 #define NOP_OPCODE (0x0009)
923
924 BFD_ASSERT ((count & 1) == 0);
925 for (i = 0; i < count; i += 2)
926 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
927 }
928
929 /* Adjust all the relocs. */
930 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
931 {
932 bfd_vma nraddr, start, stop;
933 int insn = 0;
934 struct internal_syment sym;
935 int off, adjust, oinsn;
936 bfd_signed_vma voff;
937 boolean overflow;
938
939 /* Get the new reloc address. */
940 nraddr = irel->r_vaddr - sec->vma;
941 if ((irel->r_vaddr - sec->vma > addr
942 && irel->r_vaddr - sec->vma < toaddr)
943 || (irel->r_type == R_SH_ALIGN
944 && irel->r_vaddr - sec->vma == toaddr))
945 nraddr -= count;
946
947 /* See if this reloc was for the bytes we have deleted, in which
948 case we no longer care about it. Don't delete relocs which
949 represent addresses, though. */
950 if (irel->r_vaddr - sec->vma >= addr
951 && irel->r_vaddr - sec->vma < addr + count
952 && irel->r_type != R_SH_ALIGN
953 && irel->r_type != R_SH_CODE
954 && irel->r_type != R_SH_DATA)
955 irel->r_type = R_SH_UNUSED;
956
957 /* If this is a PC relative reloc, see if the range it covers
958 includes the bytes we have deleted. */
959 switch (irel->r_type)
960 {
961 default:
962 break;
963
964 case R_SH_PCDISP8BY2:
965 case R_SH_PCDISP:
966 case R_SH_PCRELIMM8BY2:
967 case R_SH_PCRELIMM8BY4:
968 start = irel->r_vaddr - sec->vma;
969 insn = bfd_get_16 (abfd, contents + nraddr);
970 break;
971 }
972
973 switch (irel->r_type)
974 {
975 default:
976 start = stop = addr;
977 break;
978
979 case R_SH_IMM32:
980 /* If this reloc is against a symbol defined in this
981 section, and the symbol will not be adjusted below, we
982 must check the addend to see it will put the value in
983 range to be adjusted, and hence must be changed. */
984 bfd_coff_swap_sym_in (abfd,
985 ((bfd_byte *) obj_coff_external_syms (abfd)
986 + (irel->r_symndx
987 * bfd_coff_symesz (abfd))),
988 &sym);
989 if (sym.n_sclass != C_EXT
990 && sym.n_scnum == sec->target_index
991 && ((bfd_vma) sym.n_value <= addr
992 || (bfd_vma) sym.n_value >= toaddr))
993 {
994 bfd_vma val;
995
996 val = bfd_get_32 (abfd, contents + nraddr);
997 val += sym.n_value;
998 if (val >= addr && val < toaddr)
999 bfd_put_32 (abfd, val - count, contents + nraddr);
1000 }
1001 start = stop = addr;
1002 break;
1003
1004 case R_SH_PCDISP8BY2:
1005 off = insn & 0xff;
1006 if (off & 0x80)
1007 off -= 0x100;
1008 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1009 break;
1010
1011 case R_SH_PCDISP:
1012 bfd_coff_swap_sym_in (abfd,
1013 ((bfd_byte *) obj_coff_external_syms (abfd)
1014 + (irel->r_symndx
1015 * bfd_coff_symesz (abfd))),
1016 &sym);
1017 if (sym.n_sclass == C_EXT)
1018 start = stop = addr;
1019 else
1020 {
1021 off = insn & 0xfff;
1022 if (off & 0x800)
1023 off -= 0x1000;
1024 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1025 }
1026 break;
1027
1028 case R_SH_PCRELIMM8BY2:
1029 off = insn & 0xff;
1030 stop = start + 4 + off * 2;
1031 break;
1032
1033 case R_SH_PCRELIMM8BY4:
1034 off = insn & 0xff;
1035 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1036 break;
1037
1038 case R_SH_SWITCH16:
1039 case R_SH_SWITCH32:
1040 /* These relocs types represent
1041 .word L2-L1
1042 The r_offset field holds the difference between the reloc
1043 address and L1. That is the start of the reloc, and
1044 adding in the contents gives us the top. We must adjust
1045 both the r_offset field and the section contents. */
1046
1047 start = irel->r_vaddr - sec->vma;
1048 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1049
1050 if (start > addr
1051 && start < toaddr
1052 && (stop <= addr || stop >= toaddr))
1053 irel->r_offset += count;
1054 else if (stop > addr
1055 && stop < toaddr
1056 && (start <= addr || start >= toaddr))
1057 irel->r_offset -= count;
1058
1059 start = stop;
1060
1061 if (irel->r_type == R_SH_SWITCH16)
1062 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1063 else
1064 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1065 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1066
1067 break;
1068
1069 case R_SH_USES:
1070 start = irel->r_vaddr - sec->vma;
1071 stop = (bfd_vma) ((bfd_signed_vma) start
1072 + (long) irel->r_offset
1073 + 4);
1074 break;
1075 }
1076
1077 if (start > addr
1078 && start < toaddr
1079 && (stop <= addr || stop >= toaddr))
1080 adjust = count;
1081 else if (stop > addr
1082 && stop < toaddr
1083 && (start <= addr || start >= toaddr))
1084 adjust = - count;
1085 else
1086 adjust = 0;
1087
1088 if (adjust != 0)
1089 {
1090 oinsn = insn;
1091 overflow = false;
1092 switch (irel->r_type)
1093 {
1094 default:
1095 abort ();
1096 break;
1097
1098 case R_SH_PCDISP8BY2:
1099 case R_SH_PCRELIMM8BY2:
1100 insn += adjust / 2;
1101 if ((oinsn & 0xff00) != (insn & 0xff00))
1102 overflow = true;
1103 bfd_put_16 (abfd, insn, contents + nraddr);
1104 break;
1105
1106 case R_SH_PCDISP:
1107 insn += adjust / 2;
1108 if ((oinsn & 0xf000) != (insn & 0xf000))
1109 overflow = true;
1110 bfd_put_16 (abfd, insn, contents + nraddr);
1111 break;
1112
1113 case R_SH_PCRELIMM8BY4:
1114 BFD_ASSERT (adjust == count || count >= 4);
1115 if (count >= 4)
1116 insn += adjust / 4;
1117 else
1118 {
1119 if ((irel->r_vaddr & 3) == 0)
1120 ++insn;
1121 }
1122 if ((oinsn & 0xff00) != (insn & 0xff00))
1123 overflow = true;
1124 bfd_put_16 (abfd, insn, contents + nraddr);
1125 break;
1126
1127 case R_SH_SWITCH16:
1128 voff += adjust;
1129 if (voff < - 0x8000 || voff >= 0x8000)
1130 overflow = true;
1131 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1132 break;
1133
1134 case R_SH_SWITCH32:
1135 voff += adjust;
1136 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1137 break;
1138
1139 case R_SH_USES:
1140 irel->r_offset += adjust;
1141 break;
1142 }
1143
1144 if (overflow)
1145 {
1146 ((*_bfd_error_handler)
1147 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1148 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1149 bfd_set_error (bfd_error_bad_value);
1150 return false;
1151 }
1152 }
1153
1154 irel->r_vaddr = nraddr + sec->vma;
1155 }
1156
1157 /* Look through all the other sections. If there contain any IMM32
1158 relocs against internal symbols which we are not going to adjust
1159 below, we may need to adjust the addends. */
1160 for (o = abfd->sections; o != NULL; o = o->next)
1161 {
1162 struct internal_reloc *internal_relocs;
1163 struct internal_reloc *irelscan, *irelscanend;
1164 bfd_byte *ocontents;
1165
1166 if (o == sec
1167 || (o->flags & SEC_RELOC) == 0
1168 || o->reloc_count == 0)
1169 continue;
1170
1171 /* We always cache the relocs. Perhaps, if info->keep_memory is
1172 false, we should free them, if we are permitted to, when we
1173 leave sh_coff_relax_section. */
1174 internal_relocs = (_bfd_coff_read_internal_relocs
1175 (abfd, o, true, (bfd_byte *) NULL, false,
1176 (struct internal_reloc *) NULL));
1177 if (internal_relocs == NULL)
1178 return false;
1179
1180 ocontents = NULL;
1181 irelscanend = internal_relocs + o->reloc_count;
1182 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1183 {
1184 struct internal_syment sym;
1185
1186 if (irelscan->r_type != R_SH_IMM32)
1187 continue;
1188
1189 bfd_coff_swap_sym_in (abfd,
1190 ((bfd_byte *) obj_coff_external_syms (abfd)
1191 + (irelscan->r_symndx
1192 * bfd_coff_symesz (abfd))),
1193 &sym);
1194 if (sym.n_sclass != C_EXT
1195 && sym.n_scnum == sec->target_index
1196 && ((bfd_vma) sym.n_value <= addr
1197 || (bfd_vma) sym.n_value >= toaddr))
1198 {
1199 bfd_vma val;
1200
1201 if (ocontents == NULL)
1202 {
1203 if (coff_section_data (abfd, o)->contents != NULL)
1204 ocontents = coff_section_data (abfd, o)->contents;
1205 else
1206 {
1207 /* We always cache the section contents.
1208 Perhaps, if info->keep_memory is false, we
1209 should free them, if we are permitted to,
1210 when we leave sh_coff_relax_section. */
1211 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1212 if (ocontents == NULL)
1213 return false;
1214 if (! bfd_get_section_contents (abfd, o, ocontents,
1215 (file_ptr) 0,
1216 o->_raw_size))
1217 return false;
1218 coff_section_data (abfd, o)->contents = ocontents;
1219 }
1220 }
1221
1222 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1223 val += sym.n_value;
1224 if (val >= addr && val < toaddr)
1225 bfd_put_32 (abfd, val - count,
1226 ocontents + irelscan->r_vaddr - o->vma);
1227
1228 coff_section_data (abfd, o)->keep_contents = true;
1229 }
1230 }
1231 }
1232
1233 /* Adjusting the internal symbols will not work if something has
1234 already retrieved the generic symbols. It would be possible to
1235 make this work by adjusting the generic symbols at the same time.
1236 However, this case should not arise in normal usage. */
1237 if (obj_symbols (abfd) != NULL
1238 || obj_raw_syments (abfd) != NULL)
1239 {
1240 ((*_bfd_error_handler)
1241 ("%s: fatal: generic symbols retrieved before relaxing",
1242 bfd_get_filename (abfd)));
1243 bfd_set_error (bfd_error_invalid_operation);
1244 return false;
1245 }
1246
1247 /* Adjust all the symbols. */
1248 sym_hash = obj_coff_sym_hashes (abfd);
1249 symesz = bfd_coff_symesz (abfd);
1250 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1251 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1252 while (esym < esymend)
1253 {
1254 struct internal_syment isym;
1255
1256 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1257
1258 if (isym.n_scnum == sec->target_index
1259 && (bfd_vma) isym.n_value > addr
1260 && (bfd_vma) isym.n_value < toaddr)
1261 {
1262 isym.n_value -= count;
1263
1264 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1265
1266 if (*sym_hash != NULL)
1267 {
1268 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1269 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1270 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1271 && (*sym_hash)->root.u.def.value < toaddr);
1272 (*sym_hash)->root.u.def.value -= count;
1273 }
1274 }
1275
1276 esym += (isym.n_numaux + 1) * symesz;
1277 sym_hash += isym.n_numaux + 1;
1278 }
1279
1280 /* See if we can move the ALIGN reloc forward. We have adjusted
1281 r_vaddr for it already. */
1282 if (irelalign != NULL)
1283 {
1284 bfd_vma alignaddr;
1285
1286 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1287 1 << irelalign->r_offset);
1288 if (alignaddr != toaddr)
1289 {
1290 /* Tail recursion. */
1291 return sh_relax_delete_bytes (abfd, sec,
1292 irelalign->r_vaddr - sec->vma,
1293 1 << irelalign->r_offset);
1294 }
1295 }
1296
1297 return true;
1298 }
1299 \f
1300 /* This is yet another version of the SH opcode table, used to rapidly
1301 get information about a particular instruction. */
1302
1303 /* The opcode map is represented by an array of these structures. The
1304 array is indexed by the high order four bits in the instruction. */
1305
1306 struct sh_major_opcode
1307 {
1308 /* A pointer to the instruction list. This is an array which
1309 contains all the instructions with this major opcode. */
1310 const struct sh_minor_opcode *minor_opcodes;
1311 /* The number of elements in minor_opcodes. */
1312 unsigned short count;
1313 };
1314
1315 /* This structure holds information for a set of SH opcodes. The
1316 instruction code is anded with the mask value, and the resulting
1317 value is used to search the order opcode list. */
1318
1319 struct sh_minor_opcode
1320 {
1321 /* The sorted opcode list. */
1322 const struct sh_opcode *opcodes;
1323 /* The number of elements in opcodes. */
1324 unsigned short count;
1325 /* The mask value to use when searching the opcode list. */
1326 unsigned short mask;
1327 };
1328
1329 /* This structure holds information for an SH instruction. An array
1330 of these structures is sorted in order by opcode. */
1331
1332 struct sh_opcode
1333 {
1334 /* The code for this instruction, after it has been anded with the
1335 mask value in the sh_major_opcode structure. */
1336 unsigned short opcode;
1337 /* Flags for this instruction. */
1338 unsigned short flags;
1339 };
1340
1341 /* Flag which appear in the sh_opcode structure. */
1342
1343 /* This instruction loads a value from memory. */
1344 #define LOAD (0x1)
1345
1346 /* This instruction stores a value to memory. */
1347 #define STORE (0x2)
1348
1349 /* This instruction is a branch. */
1350 #define BRANCH (0x4)
1351
1352 /* This instruction has a delay slot. */
1353 #define DELAY (0x8)
1354
1355 /* This instruction uses the value in the register in the field at
1356 mask 0x0f00 of the instruction. */
1357 #define USES1 (0x10)
1358
1359 /* This instruction uses the value in the register in the field at
1360 mask 0x00f0 of the instruction. */
1361 #define USES2 (0x20)
1362
1363 /* This instruction uses the value in register 0. */
1364 #define USESR0 (0x40)
1365
1366 /* This instruction sets the value in the register in the field at
1367 mask 0x0f00 of the instruction. */
1368 #define SETS1 (0x80)
1369
1370 /* This instruction sets the value in the register in the field at
1371 mask 0x00f0 of the instruction. */
1372 #define SETS2 (0x100)
1373
1374 /* This instruction sets register 0. */
1375 #define SETSR0 (0x200)
1376
1377 /* This instruction sets a special register. */
1378 #define SETSSP (0x400)
1379
1380 /* This instruction uses a special register. */
1381 #define USESSP (0x800)
1382
1383 /* This instruction uses the floating point register in the field at
1384 mask 0x0f00 of the instruction. */
1385 #define USESF1 (0x1000)
1386
1387 /* This instruction uses the floating point register in the field at
1388 mask 0x00f0 of the instruction. */
1389 #define USESF2 (0x2000)
1390
1391 /* This instruction uses floating point register 0. */
1392 #define USESF0 (0x4000)
1393
1394 /* This instruction sets the floating point register in the field at
1395 mask 0x0f00 of the instruction. */
1396 #define SETSF1 (0x8000)
1397
1398 static boolean sh_insn_uses_reg
1399 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1400 static boolean sh_insn_uses_freg
1401 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1402 static boolean sh_insns_conflict
1403 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1404 const struct sh_opcode *));
1405 static boolean sh_load_use
1406 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1407 const struct sh_opcode *));
1408
1409 /* The opcode maps. */
1410
1411 #define MAP(a) a, sizeof a / sizeof a[0]
1412
1413 static const struct sh_opcode sh_opcode00[] =
1414 {
1415 { 0x0008, SETSSP }, /* clrt */
1416 { 0x0009, 0 }, /* nop */
1417 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1418 { 0x0018, SETSSP }, /* sett */
1419 { 0x0019, SETSSP }, /* div0u */
1420 { 0x001b, 0 }, /* sleep */
1421 { 0x0028, SETSSP }, /* clrmac */
1422 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1423 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1424 { 0x0048, SETSSP }, /* clrs */
1425 { 0x0058, SETSSP } /* sets */
1426 };
1427
1428 static const struct sh_opcode sh_opcode01[] =
1429 {
1430 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1431 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1432 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1433 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1434 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1435 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1436 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1437 { 0x0029, SETS1 | USESSP }, /* movt rn */
1438 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1439 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1440 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1441 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1442 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn */
1443 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1444 { 0x0083, LOAD | USES1 }, /* pref @rn */
1445 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1446 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1447 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1448 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1449 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1450 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1451 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1452 };
1453
1454 static const struct sh_opcode sh_opcode02[] =
1455 {
1456 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1457 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1458 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1459 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1460 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1461 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1462 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1463 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1464 };
1465
1466 static const struct sh_minor_opcode sh_opcode0[] =
1467 {
1468 { MAP (sh_opcode00), 0xffff },
1469 { MAP (sh_opcode01), 0xf0ff },
1470 { MAP (sh_opcode02), 0xf00f }
1471 };
1472
1473 static const struct sh_opcode sh_opcode10[] =
1474 {
1475 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1476 };
1477
1478 static const struct sh_minor_opcode sh_opcode1[] =
1479 {
1480 { MAP (sh_opcode10), 0xf000 }
1481 };
1482
1483 static const struct sh_opcode sh_opcode20[] =
1484 {
1485 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1486 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1487 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1488 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1489 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1490 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1491 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1492 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1493 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1494 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1495 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1496 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1497 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1498 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1499 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1500 };
1501
1502 static const struct sh_minor_opcode sh_opcode2[] =
1503 {
1504 { MAP (sh_opcode20), 0xf00f }
1505 };
1506
1507 static const struct sh_opcode sh_opcode30[] =
1508 {
1509 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1510 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1511 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1512 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1513 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1514 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1515 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1516 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1517 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1518 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1519 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1520 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1521 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1522 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1523 };
1524
1525 static const struct sh_minor_opcode sh_opcode3[] =
1526 {
1527 { MAP (sh_opcode30), 0xf00f }
1528 };
1529
1530 static const struct sh_opcode sh_opcode40[] =
1531 {
1532 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1533 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1534 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1535 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1536 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1537 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1538 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1539 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1540 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1541 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1542 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1543 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1544 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1545 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1546 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1547 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1548 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1549 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1550 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1551 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1552 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1553 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1554 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1555 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1556 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1557 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1558 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1559 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1560 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1561 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1562 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1563 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1564 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1565 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1566 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1567 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1568 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1569 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1570 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1571 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1572 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1573 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1574 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1575 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1576 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1577 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1578 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1579 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr,@-rn */
1580 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr */
1581 { 0x406a, SETSSP | USES1 } /* lds rm,fpscr */
1582 };
1583
1584 static const struct sh_opcode sh_opcode41[] =
1585 {
1586 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l rx_bank,@-rn */
1587 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rx_bank */
1588 { 0x408e, SETSSP | USES1 } /* ldc rm,rx_bank */
1589 };
1590
1591 static const struct sh_opcode sh_opcode42[] =
1592 {
1593 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1594 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1595 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1596 };
1597
1598 static const struct sh_minor_opcode sh_opcode4[] =
1599 {
1600 { MAP (sh_opcode40), 0xf0ff },
1601 { MAP (sh_opcode41), 0xf08f },
1602 { MAP (sh_opcode42), 0xf00f }
1603 };
1604
1605 static const struct sh_opcode sh_opcode50[] =
1606 {
1607 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1608 };
1609
1610 static const struct sh_minor_opcode sh_opcode5[] =
1611 {
1612 { MAP (sh_opcode50), 0xf000 }
1613 };
1614
1615 static const struct sh_opcode sh_opcode60[] =
1616 {
1617 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1618 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1619 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1620 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1621 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1622 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1623 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1624 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1625 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1626 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1627 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1628 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1629 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1630 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1631 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1632 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1633 };
1634
1635 static const struct sh_minor_opcode sh_opcode6[] =
1636 {
1637 { MAP (sh_opcode60), 0xf00f }
1638 };
1639
1640 static const struct sh_opcode sh_opcode70[] =
1641 {
1642 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1643 };
1644
1645 static const struct sh_minor_opcode sh_opcode7[] =
1646 {
1647 { MAP (sh_opcode70), 0xf000 }
1648 };
1649
1650 static const struct sh_opcode sh_opcode80[] =
1651 {
1652 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1653 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1654 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1655 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1656 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1657 { 0x8900, BRANCH | USESSP }, /* bt label */
1658 { 0x8b00, BRANCH | USESSP }, /* bf label */
1659 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1660 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1661 };
1662
1663 static const struct sh_minor_opcode sh_opcode8[] =
1664 {
1665 { MAP (sh_opcode80), 0xff00 }
1666 };
1667
1668 static const struct sh_opcode sh_opcode90[] =
1669 {
1670 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1671 };
1672
1673 static const struct sh_minor_opcode sh_opcode9[] =
1674 {
1675 { MAP (sh_opcode90), 0xf000 }
1676 };
1677
1678 static const struct sh_opcode sh_opcodea0[] =
1679 {
1680 { 0xa000, BRANCH | DELAY } /* bra label */
1681 };
1682
1683 static const struct sh_minor_opcode sh_opcodea[] =
1684 {
1685 { MAP (sh_opcodea0), 0xf000 }
1686 };
1687
1688 static const struct sh_opcode sh_opcodeb0[] =
1689 {
1690 { 0xb000, BRANCH | DELAY } /* bsr label */
1691 };
1692
1693 static const struct sh_minor_opcode sh_opcodeb[] =
1694 {
1695 { MAP (sh_opcodeb0), 0xf000 }
1696 };
1697
1698 static const struct sh_opcode sh_opcodec0[] =
1699 {
1700 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1701 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1702 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1703 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1704 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1705 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1706 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1707 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1708 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1709 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1710 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1711 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1712 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1713 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1714 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1715 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1716 };
1717
1718 static const struct sh_minor_opcode sh_opcodec[] =
1719 {
1720 { MAP (sh_opcodec0), 0xff00 }
1721 };
1722
1723 static const struct sh_opcode sh_opcoded0[] =
1724 {
1725 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1726 };
1727
1728 static const struct sh_minor_opcode sh_opcoded[] =
1729 {
1730 { MAP (sh_opcoded0), 0xf000 }
1731 };
1732
1733 static const struct sh_opcode sh_opcodee0[] =
1734 {
1735 { 0xe000, SETS1 } /* mov #imm,rn */
1736 };
1737
1738 static const struct sh_minor_opcode sh_opcodee[] =
1739 {
1740 { MAP (sh_opcodee0), 0xf000 }
1741 };
1742
1743 static const struct sh_opcode sh_opcodef0[] =
1744 {
1745 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1746 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1747 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1748 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1749 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1750 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1751 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1752 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1753 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1754 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1755 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1756 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1757 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1758 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1759 };
1760
1761 static const struct sh_opcode sh_opcodef1[] =
1762 {
1763 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1764 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1765 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1766 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1767 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1768 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1769 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1770 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1771 { 0xf08d, SETSF1 }, /* fldi0 fn */
1772 { 0xf09d, SETSF1 } /* fldi1 fn */
1773 };
1774
1775 static const struct sh_minor_opcode sh_opcodef[] =
1776 {
1777 { MAP (sh_opcodef0), 0xf00f },
1778 { MAP (sh_opcodef1), 0xf0ff }
1779 };
1780
1781 static const struct sh_major_opcode sh_opcodes[] =
1782 {
1783 { MAP (sh_opcode0) },
1784 { MAP (sh_opcode1) },
1785 { MAP (sh_opcode2) },
1786 { MAP (sh_opcode3) },
1787 { MAP (sh_opcode4) },
1788 { MAP (sh_opcode5) },
1789 { MAP (sh_opcode6) },
1790 { MAP (sh_opcode7) },
1791 { MAP (sh_opcode8) },
1792 { MAP (sh_opcode9) },
1793 { MAP (sh_opcodea) },
1794 { MAP (sh_opcodeb) },
1795 { MAP (sh_opcodec) },
1796 { MAP (sh_opcoded) },
1797 { MAP (sh_opcodee) },
1798 { MAP (sh_opcodef) }
1799 };
1800
1801 /* Given an instruction, return a pointer to the corresponding
1802 sh_opcode structure. Return NULL if the instruction is not
1803 recognized. */
1804
1805 static const struct sh_opcode *
1806 sh_insn_info (insn)
1807 unsigned int insn;
1808 {
1809 const struct sh_major_opcode *maj;
1810 const struct sh_minor_opcode *min, *minend;
1811
1812 maj = &sh_opcodes[(insn & 0xf000) >> 12];
1813 min = maj->minor_opcodes;
1814 minend = min + maj->count;
1815 for (; min < minend; min++)
1816 {
1817 unsigned int l;
1818 const struct sh_opcode *op, *opend;
1819
1820 l = insn & min->mask;
1821 op = min->opcodes;
1822 opend = op + min->count;
1823
1824 /* Since the opcodes tables are sorted, we could use a binary
1825 search here if the count were above some cutoff value. */
1826 for (; op < opend; op++)
1827 if (op->opcode == l)
1828 return op;
1829 }
1830
1831 return NULL;
1832 }
1833
1834 /* See whether an instruction uses a general purpose register. */
1835
1836 static boolean
1837 sh_insn_uses_reg (insn, op, reg)
1838 unsigned int insn;
1839 const struct sh_opcode *op;
1840 unsigned int reg;
1841 {
1842 unsigned int f;
1843
1844 f = op->flags;
1845
1846 if ((f & USES1) != 0
1847 && ((insn & 0x0f00) >> 8) == reg)
1848 return true;
1849 if ((f & USES2) != 0
1850 && ((insn & 0x00f0) >> 4) == reg)
1851 return true;
1852 if ((f & USESR0) != 0
1853 && reg == 0)
1854 return true;
1855
1856 return false;
1857 }
1858
1859 /* See whether an instruction uses a floating point register. */
1860
1861 static boolean
1862 sh_insn_uses_freg (insn, op, freg)
1863 unsigned int insn;
1864 const struct sh_opcode *op;
1865 unsigned int freg;
1866 {
1867 unsigned int f;
1868
1869 f = op->flags;
1870
1871 if ((f & USESF1) != 0
1872 && ((insn & 0x0f00) >> 8) == freg)
1873 return true;
1874 if ((f & USESF2) != 0
1875 && ((insn & 0x00f0) >> 4) == freg)
1876 return true;
1877 if ((f & USESF0) != 0
1878 && freg == 0)
1879 return true;
1880
1881 return false;
1882 }
1883
1884 /* See whether instructions I1 and I2 conflict, assuming I1 comes
1885 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
1886 This should return true if the instructions can be swapped safely. */
1887
1888 static boolean
1889 sh_insns_conflict (i1, op1, i2, op2)
1890 unsigned int i1;
1891 const struct sh_opcode *op1;
1892 unsigned int i2;
1893 const struct sh_opcode *op2;
1894 {
1895 unsigned int f1, f2;
1896
1897 f1 = op1->flags;
1898 f2 = op2->flags;
1899
1900 if ((f1 & (BRANCH | DELAY)) != 0
1901 || (f2 & (BRANCH | DELAY)) != 0)
1902 return true;
1903
1904 if ((f1 & SETSSP) != 0 && (f2 & USESSP) != 0)
1905 return false;
1906 if ((f2 & SETSSP) != 0 && (f1 & USESSP) != 0)
1907 return true;
1908
1909 if ((f1 & SETS1) != 0
1910 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
1911 return true;
1912 if ((f1 & SETS2) != 0
1913 && sh_insn_uses_reg (i2, op2, (i1 & 0x00f0) >> 4))
1914 return true;
1915 if ((f1 & SETSR0) != 0
1916 && sh_insn_uses_reg (i2, op2, 0))
1917 return true;
1918 if ((f1 & SETSF1) != 0
1919 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
1920 return true;
1921
1922 if ((f2 & SETS1) != 0
1923 && sh_insn_uses_reg (i1, op1, (i2 & 0x0f00) >> 8))
1924 return true;
1925 if ((f2 & SETS2) != 0
1926 && sh_insn_uses_reg (i1, op1, (i2 & 0x00f0) >> 4))
1927 return true;
1928 if ((f2 & SETSR0) != 0
1929 && sh_insn_uses_reg (i1, op1, 0))
1930 return true;
1931 if ((f2 & SETSF1) != 0
1932 && sh_insn_uses_freg (i1, op1, (i2 & 0x0f00) >> 8))
1933 return true;
1934
1935 /* The instructions do not conflict. */
1936 return false;
1937 }
1938
1939 /* I1 is a load instruction, and I2 is some other instruction. Return
1940 true if I1 loads a register which I2 uses. */
1941
1942 static boolean
1943 sh_load_use (i1, op1, i2, op2)
1944 unsigned int i1;
1945 const struct sh_opcode *op1;
1946 unsigned int i2;
1947 const struct sh_opcode *op2;
1948 {
1949 unsigned int f1;
1950
1951 f1 = op1->flags;
1952
1953 if ((f1 & LOAD) == 0)
1954 return false;
1955
1956 /* If both SETS1 and SETSSP are set, that means a load to a special
1957 register using postincrement addressing mode, which we don't care
1958 about here. */
1959 if ((f1 & SETS1) != 0
1960 && (f1 & SETSSP) == 0
1961 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
1962 return true;
1963
1964 if ((f1 & SETSR0) != 0
1965 && sh_insn_uses_reg (i2, op2, 0))
1966 return true;
1967
1968 if ((f1 & SETSF1) != 0
1969 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
1970 return true;
1971
1972 return false;
1973 }
1974
1975 /* Look for loads and stores which we can align to four byte
1976 boundaries. See the longer comment above sh_relax_section for why
1977 this is desirable. This sets *PSWAPPED if some instruction was
1978 swapped. */
1979
1980 static boolean
1981 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
1982 bfd *abfd;
1983 asection *sec;
1984 struct internal_reloc *internal_relocs;
1985 bfd_byte *contents;
1986 boolean *pswapped;
1987 {
1988 struct internal_reloc *irel, *irelend;
1989 bfd_vma *labels = NULL;
1990 bfd_vma *label, *label_end;
1991
1992 *pswapped = false;
1993
1994 irelend = internal_relocs + sec->reloc_count;
1995
1996 /* Get all the addresses with labels on them. */
1997 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
1998 if (labels == NULL)
1999 goto error_return;
2000 label_end = labels;
2001 for (irel = internal_relocs; irel < irelend; irel++)
2002 {
2003 if (irel->r_type == R_SH_LABEL)
2004 {
2005 *label_end = irel->r_vaddr - sec->vma;
2006 ++label_end;
2007 }
2008 }
2009
2010 /* Note that the assembler currently always outputs relocs in
2011 address order. If that ever changes, this code will need to sort
2012 the label values and the relocs. */
2013
2014 label = labels;
2015
2016 for (irel = internal_relocs; irel < irelend; irel++)
2017 {
2018 bfd_vma start, stop, i;
2019
2020 if (irel->r_type != R_SH_CODE)
2021 continue;
2022
2023 start = irel->r_vaddr - sec->vma;
2024
2025 for (irel++; irel < irelend; irel++)
2026 if (irel->r_type == R_SH_DATA)
2027 break;
2028 if (irel < irelend)
2029 stop = irel->r_vaddr - sec->vma;
2030 else
2031 stop = sec->_cooked_size;
2032
2033 /* Instructions should be aligned on 2 byte boundaries. */
2034 if ((start & 1) == 1)
2035 ++start;
2036
2037 /* Now look through the unaligned addresses. */
2038 i = start;
2039 if ((i & 2) == 0)
2040 i += 2;
2041 for (; i < stop; i += 4)
2042 {
2043 unsigned int insn;
2044 const struct sh_opcode *op;
2045 unsigned int prev_insn = 0;
2046 const struct sh_opcode *prev_op = NULL;
2047
2048 insn = bfd_get_16 (abfd, contents + i);
2049 op = sh_insn_info (insn);
2050 if (op == NULL
2051 || (op->flags & (LOAD | STORE)) == 0)
2052 continue;
2053
2054 /* This is a load or store which is not on a four byte
2055 boundary. */
2056
2057 while (label < label_end && *label < i)
2058 ++label;
2059
2060 if (i > start)
2061 {
2062 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2063 prev_op = sh_insn_info (prev_insn);
2064
2065 /* If the load/store instruction is in a delay slot, we
2066 can't swap. */
2067 if (prev_op == NULL
2068 || (prev_op->flags & DELAY) != 0)
2069 continue;
2070 }
2071 if (i > start
2072 && (label >= label_end || *label != i)
2073 && prev_op != NULL
2074 && (prev_op->flags & (LOAD | STORE)) == 0
2075 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2076 {
2077 boolean ok;
2078
2079 /* The load/store instruction does not have a label, and
2080 there is a previous instruction; PREV_INSN is not
2081 itself a load/store instruction, and PREV_INSN and
2082 INSN do not conflict. */
2083
2084 ok = true;
2085
2086 if (i >= start + 4)
2087 {
2088 unsigned int prev2_insn;
2089 const struct sh_opcode *prev2_op;
2090
2091 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2092 prev2_op = sh_insn_info (prev2_insn);
2093
2094 /* If the instruction before PREV_INSN has a delay
2095 slot--that is, PREV_INSN is in a delay slot--we
2096 can not swap. */
2097 if (prev2_op == NULL
2098 || (prev2_op->flags & DELAY) != 0)
2099 ok = false;
2100
2101 /* If the instruction before PREV_INSN is a load,
2102 and it sets a register which INSN uses, then
2103 putting INSN immediately after PREV_INSN will
2104 cause a pipeline bubble, so there is no point to
2105 making the swap. */
2106 if (ok
2107 && (prev2_op->flags & LOAD) != 0
2108 && sh_load_use (prev2_insn, prev2_op, insn, op))
2109 ok = false;
2110 }
2111
2112 if (ok)
2113 {
2114 if (! sh_swap_insns (abfd, sec, internal_relocs,
2115 contents, i - 2))
2116 goto error_return;
2117 *pswapped = true;
2118 continue;
2119 }
2120 }
2121
2122 while (label < label_end && *label < i + 2)
2123 ++label;
2124
2125 if (i + 2 < stop
2126 && (label >= label_end || *label != i + 2))
2127 {
2128 unsigned int next_insn;
2129 const struct sh_opcode *next_op;
2130
2131 /* There is an instruction after the load/store
2132 instruction, and it does not have a label. */
2133 next_insn = bfd_get_16 (abfd, contents + i + 2);
2134 next_op = sh_insn_info (next_insn);
2135 if (next_op != NULL
2136 && (next_op->flags & (LOAD | STORE)) == 0
2137 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2138 {
2139 boolean ok;
2140
2141 /* NEXT_INSN is not itself a load/store instruction,
2142 and it does not conflict with INSN. */
2143
2144 ok = true;
2145
2146 /* If PREV_INSN is a load, and it sets a register
2147 which NEXT_INSN uses, then putting NEXT_INSN
2148 immediately after PREV_INSN will cause a pipeline
2149 bubble, so there is no reason to make this swap. */
2150 if (prev_op != NULL
2151 && (prev_op->flags & LOAD) != 0
2152 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2153 ok = false;
2154
2155 /* If INSN is a load, and it sets a register which
2156 the insn after NEXT_INSN uses, then doing the
2157 swap will cause a pipeline bubble, so there is no
2158 reason to make the swap. However, if the insn
2159 after NEXT_INSN is itself a load or store
2160 instruction, then it is misaligned, so
2161 optimistically hope that it will be swapped
2162 itself, and just live with the pipeline bubble if
2163 it isn't. */
2164 if (ok
2165 && i + 4 < stop
2166 && (op->flags & LOAD) != 0)
2167 {
2168 unsigned int next2_insn;
2169 const struct sh_opcode *next2_op;
2170
2171 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2172 next2_op = sh_insn_info (next2_insn);
2173 if ((next2_op->flags & (LOAD | STORE)) == 0
2174 && sh_load_use (insn, op, next2_insn, next2_op))
2175 ok = false;
2176 }
2177
2178 if (ok)
2179 {
2180 if (! sh_swap_insns (abfd, sec, internal_relocs,
2181 contents, i))
2182 goto error_return;
2183 *pswapped = true;
2184 continue;
2185 }
2186 }
2187 }
2188 }
2189 }
2190
2191 free (labels);
2192
2193 return true;
2194
2195 error_return:
2196 if (labels != NULL)
2197 free (labels);
2198 return false;
2199 }
2200
2201 /* Swap two SH instructions. */
2202
2203 static boolean
2204 sh_swap_insns (abfd, sec, internal_relocs, contents, addr)
2205 bfd *abfd;
2206 asection *sec;
2207 struct internal_reloc *internal_relocs;
2208 bfd_byte *contents;
2209 bfd_vma addr;
2210 {
2211 unsigned short i1, i2;
2212 struct internal_reloc *irel, *irelend;
2213
2214 /* Swap the instructions themselves. */
2215 i1 = bfd_get_16 (abfd, contents + addr);
2216 i2 = bfd_get_16 (abfd, contents + addr + 2);
2217 bfd_put_16 (abfd, i2, contents + addr);
2218 bfd_put_16 (abfd, i1, contents + addr + 2);
2219
2220 /* Adjust all reloc addresses. */
2221 irelend = internal_relocs + sec->reloc_count;
2222 for (irel = internal_relocs; irel < irelend; irel++)
2223 {
2224 int type, add;
2225
2226 /* There are a few special types of relocs that we don't want to
2227 adjust. These relocs do not apply to the instruction itself,
2228 but are only associated with the address. */
2229 type = irel->r_type;
2230 if (type == R_SH_ALIGN
2231 || type == R_SH_CODE
2232 || type == R_SH_DATA
2233 || type == R_SH_LABEL)
2234 continue;
2235
2236 /* If an R_SH_USES reloc points to one of the addresses being
2237 swapped, we must adjust it. It would be incorrect to do this
2238 for a jump, though, since we want to execute both
2239 instructions after the jump. (We have avoided swapping
2240 around a label, so the jump will not wind up executing an
2241 instruction it shouldn't). */
2242 if (type == R_SH_USES)
2243 {
2244 bfd_vma off;
2245
2246 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2247 if (off == addr)
2248 irel->r_offset += 2;
2249 else if (off == addr + 2)
2250 irel->r_offset -= 2;
2251 }
2252
2253 if (irel->r_vaddr - sec->vma == addr)
2254 {
2255 irel->r_vaddr += 2;
2256 add = -2;
2257 }
2258 else if (irel->r_vaddr - sec->vma == addr + 2)
2259 {
2260 irel->r_vaddr -= 2;
2261 add = 2;
2262 }
2263 else
2264 add = 0;
2265
2266 if (add != 0)
2267 {
2268 bfd_byte *loc;
2269 unsigned short insn, oinsn;
2270 boolean overflow;
2271
2272 loc = contents + irel->r_vaddr - sec->vma;
2273 overflow = false;
2274 switch (type)
2275 {
2276 default:
2277 break;
2278
2279 case R_SH_PCDISP8BY2:
2280 case R_SH_PCRELIMM8BY2:
2281 insn = bfd_get_16 (abfd, loc);
2282 oinsn = insn;
2283 insn += add / 2;
2284 if ((oinsn & 0xff00) != (insn & 0xff00))
2285 overflow = true;
2286 bfd_put_16 (abfd, insn, loc);
2287 break;
2288
2289 case R_SH_PCDISP:
2290 insn = bfd_get_16 (abfd, loc);
2291 oinsn = insn;
2292 insn += add / 2;
2293 if ((oinsn & 0xf000) != (insn & 0xf000))
2294 overflow = true;
2295 bfd_put_16 (abfd, insn, loc);
2296 break;
2297
2298 case R_SH_PCRELIMM8BY4:
2299 /* This reloc ignores the least significant 3 bits of
2300 the program counter before adding in the offset.
2301 This means that if ADDR is at an even address, the
2302 swap will not affect the offset. If ADDR is an at an
2303 odd address, then the instruction will be crossing a
2304 four byte boundary, and must be adjusted. */
2305 if ((addr & 3) != 0)
2306 {
2307 insn = bfd_get_16 (abfd, loc);
2308 oinsn = insn;
2309 insn += add / 2;
2310 if ((oinsn & 0xff00) != (insn & 0xff00))
2311 overflow = true;
2312 bfd_put_16 (abfd, insn, loc);
2313 }
2314
2315 break;
2316 }
2317
2318 if (overflow)
2319 {
2320 ((*_bfd_error_handler)
2321 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2322 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2323 bfd_set_error (bfd_error_bad_value);
2324 return false;
2325 }
2326 }
2327 }
2328
2329 return true;
2330 }
2331 \f
2332 /* This is a modification of _bfd_coff_generic_relocate_section, which
2333 will handle SH relaxing. */
2334
2335 static boolean
2336 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2337 relocs, syms, sections)
2338 bfd *output_bfd;
2339 struct bfd_link_info *info;
2340 bfd *input_bfd;
2341 asection *input_section;
2342 bfd_byte *contents;
2343 struct internal_reloc *relocs;
2344 struct internal_syment *syms;
2345 asection **sections;
2346 {
2347 struct internal_reloc *rel;
2348 struct internal_reloc *relend;
2349
2350 rel = relocs;
2351 relend = rel + input_section->reloc_count;
2352 for (; rel < relend; rel++)
2353 {
2354 long symndx;
2355 struct coff_link_hash_entry *h;
2356 struct internal_syment *sym;
2357 bfd_vma addend;
2358 bfd_vma val;
2359 reloc_howto_type *howto;
2360 bfd_reloc_status_type rstat;
2361
2362 /* Almost all relocs have to do with relaxing. If any work must
2363 be done for them, it has been done in sh_relax_section. */
2364 if (rel->r_type != R_SH_IMM32
2365 && rel->r_type != R_SH_PCDISP)
2366 continue;
2367
2368 symndx = rel->r_symndx;
2369
2370 if (symndx == -1)
2371 {
2372 h = NULL;
2373 sym = NULL;
2374 }
2375 else
2376 {
2377 h = obj_coff_sym_hashes (input_bfd)[symndx];
2378 sym = syms + symndx;
2379 }
2380
2381 if (sym != NULL && sym->n_scnum != 0)
2382 addend = - sym->n_value;
2383 else
2384 addend = 0;
2385
2386 if (rel->r_type == R_SH_PCDISP)
2387 addend -= 4;
2388
2389 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2390 howto = NULL;
2391 else
2392 howto = &sh_coff_howtos[rel->r_type];
2393
2394 if (howto == NULL)
2395 {
2396 bfd_set_error (bfd_error_bad_value);
2397 return false;
2398 }
2399
2400 val = 0;
2401
2402 if (h == NULL)
2403 {
2404 asection *sec;
2405
2406 /* There is nothing to do for an internal PCDISP reloc. */
2407 if (rel->r_type == R_SH_PCDISP)
2408 continue;
2409
2410 if (symndx == -1)
2411 {
2412 sec = bfd_abs_section_ptr;
2413 val = 0;
2414 }
2415 else
2416 {
2417 sec = sections[symndx];
2418 val = (sec->output_section->vma
2419 + sec->output_offset
2420 + sym->n_value
2421 - sec->vma);
2422 }
2423 }
2424 else
2425 {
2426 if (h->root.type == bfd_link_hash_defined
2427 || h->root.type == bfd_link_hash_defweak)
2428 {
2429 asection *sec;
2430
2431 sec = h->root.u.def.section;
2432 val = (h->root.u.def.value
2433 + sec->output_section->vma
2434 + sec->output_offset);
2435 }
2436 else if (! info->relocateable)
2437 {
2438 if (! ((*info->callbacks->undefined_symbol)
2439 (info, h->root.root.string, input_bfd, input_section,
2440 rel->r_vaddr - input_section->vma)))
2441 return false;
2442 }
2443 }
2444
2445 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2446 contents,
2447 rel->r_vaddr - input_section->vma,
2448 val, addend);
2449
2450 switch (rstat)
2451 {
2452 default:
2453 abort ();
2454 case bfd_reloc_ok:
2455 break;
2456 case bfd_reloc_overflow:
2457 {
2458 const char *name;
2459 char buf[SYMNMLEN + 1];
2460
2461 if (symndx == -1)
2462 name = "*ABS*";
2463 else if (h != NULL)
2464 name = h->root.root.string;
2465 else if (sym->_n._n_n._n_zeroes == 0
2466 && sym->_n._n_n._n_offset != 0)
2467 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2468 else
2469 {
2470 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2471 buf[SYMNMLEN] = '\0';
2472 name = buf;
2473 }
2474
2475 if (! ((*info->callbacks->reloc_overflow)
2476 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2477 input_section, rel->r_vaddr - input_section->vma)))
2478 return false;
2479 }
2480 }
2481 }
2482
2483 return true;
2484 }
2485
2486 /* This is a version of bfd_generic_get_relocated_section_contents
2487 which uses sh_relocate_section. */
2488
2489 static bfd_byte *
2490 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2491 data, relocateable, symbols)
2492 bfd *output_bfd;
2493 struct bfd_link_info *link_info;
2494 struct bfd_link_order *link_order;
2495 bfd_byte *data;
2496 boolean relocateable;
2497 asymbol **symbols;
2498 {
2499 asection *input_section = link_order->u.indirect.section;
2500 bfd *input_bfd = input_section->owner;
2501 asection **sections = NULL;
2502 struct internal_reloc *internal_relocs = NULL;
2503 struct internal_syment *internal_syms = NULL;
2504
2505 /* We only need to handle the case of relaxing, or of having a
2506 particular set of section contents, specially. */
2507 if (relocateable
2508 || coff_section_data (input_bfd, input_section) == NULL
2509 || coff_section_data (input_bfd, input_section)->contents == NULL)
2510 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2511 link_order, data,
2512 relocateable,
2513 symbols);
2514
2515 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2516 input_section->_raw_size);
2517
2518 if ((input_section->flags & SEC_RELOC) != 0
2519 && input_section->reloc_count > 0)
2520 {
2521 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2522 bfd_byte *esym, *esymend;
2523 struct internal_syment *isymp;
2524 asection **secpp;
2525
2526 if (! _bfd_coff_get_external_symbols (input_bfd))
2527 goto error_return;
2528
2529 internal_relocs = (_bfd_coff_read_internal_relocs
2530 (input_bfd, input_section, false, (bfd_byte *) NULL,
2531 false, (struct internal_reloc *) NULL));
2532 if (internal_relocs == NULL)
2533 goto error_return;
2534
2535 internal_syms = ((struct internal_syment *)
2536 bfd_malloc (obj_raw_syment_count (input_bfd)
2537 * sizeof (struct internal_syment)));
2538 if (internal_syms == NULL)
2539 goto error_return;
2540
2541 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
2542 * sizeof (asection *));
2543 if (sections == NULL)
2544 goto error_return;
2545
2546 isymp = internal_syms;
2547 secpp = sections;
2548 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2549 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2550 while (esym < esymend)
2551 {
2552 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2553
2554 if (isymp->n_scnum != 0)
2555 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2556 else
2557 {
2558 if (isymp->n_value == 0)
2559 *secpp = bfd_und_section_ptr;
2560 else
2561 *secpp = bfd_com_section_ptr;
2562 }
2563
2564 esym += (isymp->n_numaux + 1) * symesz;
2565 secpp += isymp->n_numaux + 1;
2566 isymp += isymp->n_numaux + 1;
2567 }
2568
2569 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2570 input_section, data, internal_relocs,
2571 internal_syms, sections))
2572 goto error_return;
2573
2574 free (sections);
2575 sections = NULL;
2576 free (internal_syms);
2577 internal_syms = NULL;
2578 free (internal_relocs);
2579 internal_relocs = NULL;
2580 }
2581
2582 return data;
2583
2584 error_return:
2585 if (internal_relocs != NULL)
2586 free (internal_relocs);
2587 if (internal_syms != NULL)
2588 free (internal_syms);
2589 if (sections != NULL)
2590 free (sections);
2591 return NULL;
2592 }
2593
2594 /* The target vectors. */
2595
2596 const bfd_target shcoff_vec =
2597 {
2598 "coff-sh", /* name */
2599 bfd_target_coff_flavour,
2600 BFD_ENDIAN_BIG, /* data byte order is big */
2601 BFD_ENDIAN_BIG, /* header byte order is big */
2602
2603 (HAS_RELOC | EXEC_P | /* object flags */
2604 HAS_LINENO | HAS_DEBUG |
2605 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2606
2607 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2608 '_', /* leading symbol underscore */
2609 '/', /* ar_pad_char */
2610 15, /* ar_max_namelen */
2611 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2612 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2613 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
2614 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
2615 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
2616 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
2617
2618 {_bfd_dummy_target, coff_object_p, /* bfd_check_format */
2619 bfd_generic_archive_p, _bfd_dummy_target},
2620 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2621 bfd_false},
2622 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2623 _bfd_write_archive_contents, bfd_false},
2624
2625 BFD_JUMP_TABLE_GENERIC (coff),
2626 BFD_JUMP_TABLE_COPY (coff),
2627 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2628 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2629 BFD_JUMP_TABLE_SYMBOLS (coff),
2630 BFD_JUMP_TABLE_RELOCS (coff),
2631 BFD_JUMP_TABLE_WRITE (coff),
2632 BFD_JUMP_TABLE_LINK (coff),
2633 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2634
2635 COFF_SWAP_TABLE,
2636 };
2637
2638 const bfd_target shlcoff_vec =
2639 {
2640 "coff-shl", /* name */
2641 bfd_target_coff_flavour,
2642 BFD_ENDIAN_LITTLE, /* data byte order is little */
2643 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
2644
2645 (HAS_RELOC | EXEC_P | /* object flags */
2646 HAS_LINENO | HAS_DEBUG |
2647 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
2648
2649 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
2650 '_', /* leading symbol underscore */
2651 '/', /* ar_pad_char */
2652 15, /* ar_max_namelen */
2653 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2654 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2655 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
2656 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
2657 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
2658 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
2659
2660 {_bfd_dummy_target, coff_object_p, /* bfd_check_format */
2661 bfd_generic_archive_p, _bfd_dummy_target},
2662 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
2663 bfd_false},
2664 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
2665 _bfd_write_archive_contents, bfd_false},
2666
2667 BFD_JUMP_TABLE_GENERIC (coff),
2668 BFD_JUMP_TABLE_COPY (coff),
2669 BFD_JUMP_TABLE_CORE (_bfd_nocore),
2670 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
2671 BFD_JUMP_TABLE_SYMBOLS (coff),
2672 BFD_JUMP_TABLE_RELOCS (coff),
2673 BFD_JUMP_TABLE_WRITE (coff),
2674 BFD_JUMP_TABLE_LINK (coff),
2675 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
2676
2677 COFF_SWAP_TABLE,
2678 };
This page took 0.126553 seconds and 5 git commands to generate.