Add WinCE support.
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 94, 95, 96, 97, 98, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Cygnus Support.
4 Written by Steve Chamberlain, <sac@cygnus.com>.
5 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6
7 This file is part of BFD, the Binary File Descriptor library.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22
23 #include "bfd.h"
24 #include "sysdep.h"
25 #include "libbfd.h"
26 #include "bfdlink.h"
27 #include "coff/sh.h"
28 #include "coff/internal.h"
29
30 #ifdef COFF_WITH_PE
31 #include "coff/pe.h"
32 #endif
33
34 #include "libcoff.h"
35
36 /* Internal functions. */
37 static bfd_reloc_status_type sh_reloc
38 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
39 static long get_symbol_value PARAMS ((asymbol *));
40 static boolean sh_relax_section
41 PARAMS ((bfd *, asection *, struct bfd_link_info *, boolean *));
42 static boolean sh_relax_delete_bytes
43 PARAMS ((bfd *, asection *, bfd_vma, int));
44 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
45 static boolean sh_align_loads
46 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *, boolean *));
47 static boolean sh_swap_insns
48 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
49 static boolean sh_relocate_section
50 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
51 struct internal_reloc *, struct internal_syment *, asection **));
52 static bfd_byte *sh_coff_get_relocated_section_contents
53 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
54 bfd_byte *, boolean, asymbol **));
55
56 #ifdef COFF_WITH_PE
57 /* Can't build import tables with 2**4 alignment. */
58 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
59 #else
60 /* Default section alignment to 2**4. */
61 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
62 #endif
63
64 #ifdef COFF_IMAGE_WITH_PE
65 /* Align PE executables. */
66 #define COFF_PAGE_SIZE 0x1000
67 #endif
68
69 /* Generate long file names. */
70 #define COFF_LONG_FILENAMES
71
72 #ifdef COFF_WITH_PE
73 /* Return true if this relocation should
74 appear in the output .reloc section. */
75 static boolean in_reloc_p (abfd, howto)
76 bfd * abfd ATTRIBUTE_UNUSED;
77 reloc_howto_type * howto;
78 {
79 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
80 }
81 #endif
82
83 /* The supported relocations. There are a lot of relocations defined
84 in coff/internal.h which we do not expect to ever see. */
85 static reloc_howto_type sh_coff_howtos[] =
86 {
87 EMPTY_HOWTO (0),
88 EMPTY_HOWTO (1),
89 #ifdef COFF_WITH_PE
90 /* Windows CE */
91 HOWTO (R_SH_IMM32CE, /* type */
92 0, /* rightshift */
93 2, /* size (0 = byte, 1 = short, 2 = long) */
94 32, /* bitsize */
95 false, /* pc_relative */
96 0, /* bitpos */
97 complain_overflow_bitfield, /* complain_on_overflow */
98 sh_reloc, /* special_function */
99 "r_imm32ce", /* name */
100 true, /* partial_inplace */
101 0xffffffff, /* src_mask */
102 0xffffffff, /* dst_mask */
103 false), /* pcrel_offset */
104 #else
105 EMPTY_HOWTO (2),
106 #endif
107 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
108 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
109 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
110 EMPTY_HOWTO (6), /* R_SH_IMM24 */
111 EMPTY_HOWTO (7), /* R_SH_LOW16 */
112 EMPTY_HOWTO (8),
113 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
114
115 HOWTO (R_SH_PCDISP8BY2, /* type */
116 1, /* rightshift */
117 1, /* size (0 = byte, 1 = short, 2 = long) */
118 8, /* bitsize */
119 true, /* pc_relative */
120 0, /* bitpos */
121 complain_overflow_signed, /* complain_on_overflow */
122 sh_reloc, /* special_function */
123 "r_pcdisp8by2", /* name */
124 true, /* partial_inplace */
125 0xff, /* src_mask */
126 0xff, /* dst_mask */
127 true), /* pcrel_offset */
128
129 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
130
131 HOWTO (R_SH_PCDISP, /* type */
132 1, /* rightshift */
133 1, /* size (0 = byte, 1 = short, 2 = long) */
134 12, /* bitsize */
135 true, /* pc_relative */
136 0, /* bitpos */
137 complain_overflow_signed, /* complain_on_overflow */
138 sh_reloc, /* special_function */
139 "r_pcdisp12by2", /* name */
140 true, /* partial_inplace */
141 0xfff, /* src_mask */
142 0xfff, /* dst_mask */
143 true), /* pcrel_offset */
144
145 EMPTY_HOWTO (13),
146
147 HOWTO (R_SH_IMM32, /* type */
148 0, /* rightshift */
149 2, /* size (0 = byte, 1 = short, 2 = long) */
150 32, /* bitsize */
151 false, /* pc_relative */
152 0, /* bitpos */
153 complain_overflow_bitfield, /* complain_on_overflow */
154 sh_reloc, /* special_function */
155 "r_imm32", /* name */
156 true, /* partial_inplace */
157 0xffffffff, /* src_mask */
158 0xffffffff, /* dst_mask */
159 false), /* pcrel_offset */
160
161 EMPTY_HOWTO (15),
162 #ifdef COFF_WITH_PE
163 HOWTO (R_SH_IMAGEBASE, /* type */
164 0, /* rightshift */
165 2, /* size (0 = byte, 1 = short, 2 = long) */
166 32, /* bitsize */
167 false, /* pc_relative */
168 0, /* bitpos */
169 complain_overflow_bitfield, /* complain_on_overflow */
170 sh_reloc, /* special_function */
171 "rva32", /* name */
172 true, /* partial_inplace */
173 0xffffffff, /* src_mask */
174 0xffffffff, /* dst_mask */
175 false), /* pcrel_offset */
176 #else
177 EMPTY_HOWTO (16), /* R_SH_IMM8 */
178 #endif
179 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
180 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
181 EMPTY_HOWTO (19), /* R_SH_IMM4 */
182 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
183 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
184
185 HOWTO (R_SH_PCRELIMM8BY2, /* type */
186 1, /* rightshift */
187 1, /* size (0 = byte, 1 = short, 2 = long) */
188 8, /* bitsize */
189 true, /* pc_relative */
190 0, /* bitpos */
191 complain_overflow_unsigned, /* complain_on_overflow */
192 sh_reloc, /* special_function */
193 "r_pcrelimm8by2", /* name */
194 true, /* partial_inplace */
195 0xff, /* src_mask */
196 0xff, /* dst_mask */
197 true), /* pcrel_offset */
198
199 HOWTO (R_SH_PCRELIMM8BY4, /* type */
200 2, /* rightshift */
201 1, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 true, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_unsigned, /* complain_on_overflow */
206 sh_reloc, /* special_function */
207 "r_pcrelimm8by4", /* name */
208 true, /* partial_inplace */
209 0xff, /* src_mask */
210 0xff, /* dst_mask */
211 true), /* pcrel_offset */
212
213 HOWTO (R_SH_IMM16, /* type */
214 0, /* rightshift */
215 1, /* size (0 = byte, 1 = short, 2 = long) */
216 16, /* bitsize */
217 false, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_bitfield, /* complain_on_overflow */
220 sh_reloc, /* special_function */
221 "r_imm16", /* name */
222 true, /* partial_inplace */
223 0xffff, /* src_mask */
224 0xffff, /* dst_mask */
225 false), /* pcrel_offset */
226
227 HOWTO (R_SH_SWITCH16, /* type */
228 0, /* rightshift */
229 1, /* size (0 = byte, 1 = short, 2 = long) */
230 16, /* bitsize */
231 false, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_bitfield, /* complain_on_overflow */
234 sh_reloc, /* special_function */
235 "r_switch16", /* name */
236 true, /* partial_inplace */
237 0xffff, /* src_mask */
238 0xffff, /* dst_mask */
239 false), /* pcrel_offset */
240
241 HOWTO (R_SH_SWITCH32, /* type */
242 0, /* rightshift */
243 2, /* size (0 = byte, 1 = short, 2 = long) */
244 32, /* bitsize */
245 false, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_bitfield, /* complain_on_overflow */
248 sh_reloc, /* special_function */
249 "r_switch32", /* name */
250 true, /* partial_inplace */
251 0xffffffff, /* src_mask */
252 0xffffffff, /* dst_mask */
253 false), /* pcrel_offset */
254
255 HOWTO (R_SH_USES, /* type */
256 0, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 16, /* bitsize */
259 false, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_bitfield, /* complain_on_overflow */
262 sh_reloc, /* special_function */
263 "r_uses", /* name */
264 true, /* partial_inplace */
265 0xffff, /* src_mask */
266 0xffff, /* dst_mask */
267 false), /* pcrel_offset */
268
269 HOWTO (R_SH_COUNT, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 false, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield, /* complain_on_overflow */
276 sh_reloc, /* special_function */
277 "r_count", /* name */
278 true, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
282
283 HOWTO (R_SH_ALIGN, /* type */
284 0, /* rightshift */
285 2, /* size (0 = byte, 1 = short, 2 = long) */
286 32, /* bitsize */
287 false, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_bitfield, /* complain_on_overflow */
290 sh_reloc, /* special_function */
291 "r_align", /* name */
292 true, /* partial_inplace */
293 0xffffffff, /* src_mask */
294 0xffffffff, /* dst_mask */
295 false), /* pcrel_offset */
296
297 HOWTO (R_SH_CODE, /* type */
298 0, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 32, /* bitsize */
301 false, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_bitfield, /* complain_on_overflow */
304 sh_reloc, /* special_function */
305 "r_code", /* name */
306 true, /* partial_inplace */
307 0xffffffff, /* src_mask */
308 0xffffffff, /* dst_mask */
309 false), /* pcrel_offset */
310
311 HOWTO (R_SH_DATA, /* type */
312 0, /* rightshift */
313 2, /* size (0 = byte, 1 = short, 2 = long) */
314 32, /* bitsize */
315 false, /* pc_relative */
316 0, /* bitpos */
317 complain_overflow_bitfield, /* complain_on_overflow */
318 sh_reloc, /* special_function */
319 "r_data", /* name */
320 true, /* partial_inplace */
321 0xffffffff, /* src_mask */
322 0xffffffff, /* dst_mask */
323 false), /* pcrel_offset */
324
325 HOWTO (R_SH_LABEL, /* type */
326 0, /* rightshift */
327 2, /* size (0 = byte, 1 = short, 2 = long) */
328 32, /* bitsize */
329 false, /* pc_relative */
330 0, /* bitpos */
331 complain_overflow_bitfield, /* complain_on_overflow */
332 sh_reloc, /* special_function */
333 "r_label", /* name */
334 true, /* partial_inplace */
335 0xffffffff, /* src_mask */
336 0xffffffff, /* dst_mask */
337 false), /* pcrel_offset */
338
339 HOWTO (R_SH_SWITCH8, /* type */
340 0, /* rightshift */
341 0, /* size (0 = byte, 1 = short, 2 = long) */
342 8, /* bitsize */
343 false, /* pc_relative */
344 0, /* bitpos */
345 complain_overflow_bitfield, /* complain_on_overflow */
346 sh_reloc, /* special_function */
347 "r_switch8", /* name */
348 true, /* partial_inplace */
349 0xff, /* src_mask */
350 0xff, /* dst_mask */
351 false) /* pcrel_offset */
352 };
353
354 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
355
356 /* Check for a bad magic number. */
357 #define BADMAG(x) SHBADMAG(x)
358
359 /* Customize coffcode.h (this is not currently used). */
360 #define SH 1
361
362 /* FIXME: This should not be set here. */
363 #define __A_MAGIC_SET__
364
365 #ifndef COFF_WITH_PE
366 /* Swap the r_offset field in and out. */
367 #define SWAP_IN_RELOC_OFFSET bfd_h_get_32
368 #define SWAP_OUT_RELOC_OFFSET bfd_h_put_32
369
370 /* Swap out extra information in the reloc structure. */
371 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
372 do \
373 { \
374 dst->r_stuff[0] = 'S'; \
375 dst->r_stuff[1] = 'C'; \
376 } \
377 while (0)
378 #endif
379
380 /* Get the value of a symbol, when performing a relocation. */
381
382 static long
383 get_symbol_value (symbol)
384 asymbol *symbol;
385 {
386 bfd_vma relocation;
387
388 if (bfd_is_com_section (symbol->section))
389 relocation = 0;
390 else
391 relocation = (symbol->value +
392 symbol->section->output_section->vma +
393 symbol->section->output_offset);
394
395 return relocation;
396 }
397
398 #ifdef COFF_WITH_PE
399 /* Convert an rtype to howto for the COFF backend linker.
400 Copied from coff-i386. */
401 #define coff_rtype_to_howto coff_sh_rtype_to_howto
402
403 static reloc_howto_type *
404 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
405 bfd * abfd;
406 asection * sec;
407 struct internal_reloc * rel;
408 struct coff_link_hash_entry * h;
409 struct internal_syment * sym;
410 bfd_vma * addendp;
411 {
412 reloc_howto_type * howto;
413
414 howto = sh_coff_howtos + rel->r_type;
415
416 *addendp = 0;
417
418 if (howto->pc_relative)
419 *addendp += sec->vma;
420
421 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
422 {
423 /* This is a common symbol. The section contents include the
424 size (sym->n_value) as an addend. The relocate_section
425 function will be adding in the final value of the symbol. We
426 need to subtract out the current size in order to get the
427 correct result. */
428 BFD_ASSERT (h != NULL);
429 }
430
431 if (howto->pc_relative)
432 {
433 *addendp -= 4;
434
435 /* If the symbol is defined, then the generic code is going to
436 add back the symbol value in order to cancel out an
437 adjustment it made to the addend. However, we set the addend
438 to 0 at the start of this function. We need to adjust here,
439 to avoid the adjustment the generic code will make. FIXME:
440 This is getting a bit hackish. */
441 if (sym != NULL && sym->n_scnum != 0)
442 *addendp -= sym->n_value;
443 }
444
445 if (rel->r_type == R_SH_IMAGEBASE)
446 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
447
448 return howto;
449 }
450
451 /* This structure is used to map BFD reloc codes to SH PE relocs. */
452 struct shcoff_reloc_map
453 {
454 unsigned char bfd_reloc_val;
455 unsigned char shcoff_reloc_val;
456 };
457
458 /* An array mapping BFD reloc codes to SH PE relocs. */
459 static const struct shcoff_reloc_map sh_reloc_map[] =
460 {
461 { BFD_RELOC_32, R_SH_IMM32CE },
462 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
463 { BFD_RELOC_CTOR, R_SH_IMM32CE },
464 };
465
466 /* Given a BFD reloc code, return the howto structure for the
467 corresponding SH PE reloc. */
468 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
469
470 static reloc_howto_type *
471 sh_coff_reloc_type_lookup (abfd, code)
472 bfd * abfd ATTRIBUTE_UNUSED;
473 bfd_reloc_code_real_type code;
474 {
475 unsigned int i;
476
477 for (i = 0; i < sizeof (sh_reloc_map) / sizeof (struct shcoff_reloc_map); i++)
478 {
479 if (sh_reloc_map[i].bfd_reloc_val == code)
480 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
481 }
482
483 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
484 return NULL;
485 }
486 #endif /* COFF_WITH_PE */
487
488 /* This macro is used in coffcode.h to get the howto corresponding to
489 an internal reloc. */
490
491 #define RTYPE2HOWTO(relent, internal) \
492 ((relent)->howto = \
493 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
494 ? &sh_coff_howtos[(internal)->r_type] \
495 : (reloc_howto_type *) NULL))
496
497 /* This is the same as the macro in coffcode.h, except that it copies
498 r_offset into reloc_entry->addend for some relocs. */
499 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
500 { \
501 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
502 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
503 coffsym = (obj_symbols (abfd) \
504 + (cache_ptr->sym_ptr_ptr - symbols)); \
505 else if (ptr) \
506 coffsym = coff_symbol_from (abfd, ptr); \
507 if (coffsym != (coff_symbol_type *) NULL \
508 && coffsym->native->u.syment.n_scnum == 0) \
509 cache_ptr->addend = 0; \
510 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
511 && ptr->section != (asection *) NULL) \
512 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
513 else \
514 cache_ptr->addend = 0; \
515 if ((reloc).r_type == R_SH_SWITCH8 \
516 || (reloc).r_type == R_SH_SWITCH16 \
517 || (reloc).r_type == R_SH_SWITCH32 \
518 || (reloc).r_type == R_SH_USES \
519 || (reloc).r_type == R_SH_COUNT \
520 || (reloc).r_type == R_SH_ALIGN) \
521 cache_ptr->addend = (reloc).r_offset; \
522 }
523
524 /* This is the howto function for the SH relocations. */
525
526 static bfd_reloc_status_type
527 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
528 error_message)
529 bfd *abfd;
530 arelent *reloc_entry;
531 asymbol *symbol_in;
532 PTR data;
533 asection *input_section;
534 bfd *output_bfd;
535 char **error_message ATTRIBUTE_UNUSED;
536 {
537 unsigned long insn;
538 bfd_vma sym_value;
539 unsigned short r_type;
540 bfd_vma addr = reloc_entry->address;
541 bfd_byte *hit_data = addr + (bfd_byte *) data;
542
543 r_type = reloc_entry->howto->type;
544
545 if (output_bfd != NULL)
546 {
547 /* Partial linking--do nothing. */
548 reloc_entry->address += input_section->output_offset;
549 return bfd_reloc_ok;
550 }
551
552 /* Almost all relocs have to do with relaxing. If any work must be
553 done for them, it has been done in sh_relax_section. */
554 if (r_type != R_SH_IMM32
555 #ifdef COFF_WITH_PE
556 && r_type != R_SH_IMM32CE
557 && r_type != R_SH_IMAGEBASE
558 #endif
559 && (r_type != R_SH_PCDISP
560 || (symbol_in->flags & BSF_LOCAL) != 0))
561 return bfd_reloc_ok;
562
563 if (symbol_in != NULL
564 && bfd_is_und_section (symbol_in->section))
565 return bfd_reloc_undefined;
566
567 sym_value = get_symbol_value (symbol_in);
568
569 switch (r_type)
570 {
571 case R_SH_IMM32:
572 #ifdef COFF_WITH_PE
573 case R_SH_IMM32CE:
574 #endif
575 insn = bfd_get_32 (abfd, hit_data);
576 insn += sym_value + reloc_entry->addend;
577 bfd_put_32 (abfd, insn, hit_data);
578 break;
579 #ifdef COFF_WITH_PE
580 case R_SH_IMAGEBASE:
581 insn = bfd_get_32 (abfd, hit_data);
582 insn += (sym_value + reloc_entry->addend
583 - pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase);
584 bfd_put_32 (abfd, insn, hit_data);
585 break;
586 #endif
587 case R_SH_PCDISP:
588 insn = bfd_get_16 (abfd, hit_data);
589 sym_value += reloc_entry->addend;
590 sym_value -= (input_section->output_section->vma
591 + input_section->output_offset
592 + addr
593 + 4);
594 sym_value += (insn & 0xfff) << 1;
595 if (insn & 0x800)
596 sym_value -= 0x1000;
597 insn = (insn & 0xf000) | (sym_value & 0xfff);
598 bfd_put_16 (abfd, insn, hit_data);
599 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
600 return bfd_reloc_overflow;
601 break;
602 default:
603 abort ();
604 break;
605 }
606
607 return bfd_reloc_ok;
608 }
609
610 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
611
612 /* We can do relaxing. */
613 #define coff_bfd_relax_section sh_relax_section
614
615 /* We use the special COFF backend linker. */
616 #define coff_relocate_section sh_relocate_section
617
618 /* When relaxing, we need to use special code to get the relocated
619 section contents. */
620 #define coff_bfd_get_relocated_section_contents \
621 sh_coff_get_relocated_section_contents
622
623 #include "coffcode.h"
624 \f
625 /* This function handles relaxing on the SH.
626
627 Function calls on the SH look like this:
628
629 movl L1,r0
630 ...
631 jsr @r0
632 ...
633 L1:
634 .long function
635
636 The compiler and assembler will cooperate to create R_SH_USES
637 relocs on the jsr instructions. The r_offset field of the
638 R_SH_USES reloc is the PC relative offset to the instruction which
639 loads the register (the r_offset field is computed as though it
640 were a jump instruction, so the offset value is actually from four
641 bytes past the instruction). The linker can use this reloc to
642 determine just which function is being called, and thus decide
643 whether it is possible to replace the jsr with a bsr.
644
645 If multiple function calls are all based on a single register load
646 (i.e., the same function is called multiple times), the compiler
647 guarantees that each function call will have an R_SH_USES reloc.
648 Therefore, if the linker is able to convert each R_SH_USES reloc
649 which refers to that address, it can safely eliminate the register
650 load.
651
652 When the assembler creates an R_SH_USES reloc, it examines it to
653 determine which address is being loaded (L1 in the above example).
654 It then counts the number of references to that address, and
655 creates an R_SH_COUNT reloc at that address. The r_offset field of
656 the R_SH_COUNT reloc will be the number of references. If the
657 linker is able to eliminate a register load, it can use the
658 R_SH_COUNT reloc to see whether it can also eliminate the function
659 address.
660
661 SH relaxing also handles another, unrelated, matter. On the SH, if
662 a load or store instruction is not aligned on a four byte boundary,
663 the memory cycle interferes with the 32 bit instruction fetch,
664 causing a one cycle bubble in the pipeline. Therefore, we try to
665 align load and store instructions on four byte boundaries if we
666 can, by swapping them with one of the adjacent instructions. */
667
668 static boolean
669 sh_relax_section (abfd, sec, link_info, again)
670 bfd *abfd;
671 asection *sec;
672 struct bfd_link_info *link_info;
673 boolean *again;
674 {
675 struct internal_reloc *internal_relocs;
676 struct internal_reloc *free_relocs = NULL;
677 boolean have_code;
678 struct internal_reloc *irel, *irelend;
679 bfd_byte *contents = NULL;
680 bfd_byte *free_contents = NULL;
681
682 *again = false;
683
684 if (link_info->relocateable
685 || (sec->flags & SEC_RELOC) == 0
686 || sec->reloc_count == 0)
687 return true;
688
689 /* If this is the first time we have been called for this section,
690 initialize the cooked size. */
691 if (sec->_cooked_size == 0)
692 sec->_cooked_size = sec->_raw_size;
693
694 internal_relocs = (_bfd_coff_read_internal_relocs
695 (abfd, sec, link_info->keep_memory,
696 (bfd_byte *) NULL, false,
697 (struct internal_reloc *) NULL));
698 if (internal_relocs == NULL)
699 goto error_return;
700 if (! link_info->keep_memory)
701 free_relocs = internal_relocs;
702
703 have_code = false;
704
705 irelend = internal_relocs + sec->reloc_count;
706 for (irel = internal_relocs; irel < irelend; irel++)
707 {
708 bfd_vma laddr, paddr, symval;
709 unsigned short insn;
710 struct internal_reloc *irelfn, *irelscan, *irelcount;
711 struct internal_syment sym;
712 bfd_signed_vma foff;
713
714 if (irel->r_type == R_SH_CODE)
715 have_code = true;
716
717 if (irel->r_type != R_SH_USES)
718 continue;
719
720 /* Get the section contents. */
721 if (contents == NULL)
722 {
723 if (coff_section_data (abfd, sec) != NULL
724 && coff_section_data (abfd, sec)->contents != NULL)
725 contents = coff_section_data (abfd, sec)->contents;
726 else
727 {
728 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
729 if (contents == NULL)
730 goto error_return;
731 free_contents = contents;
732
733 if (! bfd_get_section_contents (abfd, sec, contents,
734 (file_ptr) 0, sec->_raw_size))
735 goto error_return;
736 }
737 }
738
739 /* The r_offset field of the R_SH_USES reloc will point us to
740 the register load. The 4 is because the r_offset field is
741 computed as though it were a jump offset, which are based
742 from 4 bytes after the jump instruction. */
743 laddr = irel->r_vaddr - sec->vma + 4;
744 /* Careful to sign extend the 32-bit offset. */
745 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
746 if (laddr >= sec->_raw_size)
747 {
748 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
749 bfd_get_filename (abfd),
750 (unsigned long) irel->r_vaddr);
751 continue;
752 }
753 insn = bfd_get_16 (abfd, contents + laddr);
754
755 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
756 if ((insn & 0xf000) != 0xd000)
757 {
758 ((*_bfd_error_handler)
759 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
760 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr, insn));
761 continue;
762 }
763
764 /* Get the address from which the register is being loaded. The
765 displacement in the mov.l instruction is quadrupled. It is a
766 displacement from four bytes after the movl instruction, but,
767 before adding in the PC address, two least significant bits
768 of the PC are cleared. We assume that the section is aligned
769 on a four byte boundary. */
770 paddr = insn & 0xff;
771 paddr *= 4;
772 paddr += (laddr + 4) &~ 3;
773 if (paddr >= sec->_raw_size)
774 {
775 ((*_bfd_error_handler)
776 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
777 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
778 continue;
779 }
780
781 /* Get the reloc for the address from which the register is
782 being loaded. This reloc will tell us which function is
783 actually being called. */
784 paddr += sec->vma;
785 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
786 if (irelfn->r_vaddr == paddr
787 #ifdef COFF_WITH_PE
788 && (irelfn->r_type == R_SH_IMM32
789 || irelfn->r_type == R_SH_IMM32CE
790 || irelfn->r_type == R_SH_IMAGEBASE))
791
792 #else
793 && irelfn->r_type == R_SH_IMM32)
794 #endif
795 break;
796 if (irelfn >= irelend)
797 {
798 ((*_bfd_error_handler)
799 ("%s: 0x%lx: warning: could not find expected reloc",
800 bfd_get_filename (abfd), (unsigned long) paddr));
801 continue;
802 }
803
804 /* Get the value of the symbol referred to by the reloc. */
805 if (! _bfd_coff_get_external_symbols (abfd))
806 goto error_return;
807 bfd_coff_swap_sym_in (abfd,
808 ((bfd_byte *) obj_coff_external_syms (abfd)
809 + (irelfn->r_symndx
810 * bfd_coff_symesz (abfd))),
811 &sym);
812 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
813 {
814 ((*_bfd_error_handler)
815 ("%s: 0x%lx: warning: symbol in unexpected section",
816 bfd_get_filename (abfd), (unsigned long) paddr));
817 continue;
818 }
819
820 if (sym.n_sclass != C_EXT)
821 {
822 symval = (sym.n_value
823 - sec->vma
824 + sec->output_section->vma
825 + sec->output_offset);
826 }
827 else
828 {
829 struct coff_link_hash_entry *h;
830
831 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
832 BFD_ASSERT (h != NULL);
833 if (h->root.type != bfd_link_hash_defined
834 && h->root.type != bfd_link_hash_defweak)
835 {
836 /* This appears to be a reference to an undefined
837 symbol. Just ignore it--it will be caught by the
838 regular reloc processing. */
839 continue;
840 }
841
842 symval = (h->root.u.def.value
843 + h->root.u.def.section->output_section->vma
844 + h->root.u.def.section->output_offset);
845 }
846
847 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
848
849 /* See if this function call can be shortened. */
850 foff = (symval
851 - (irel->r_vaddr
852 - sec->vma
853 + sec->output_section->vma
854 + sec->output_offset
855 + 4));
856 if (foff < -0x1000 || foff >= 0x1000)
857 {
858 /* After all that work, we can't shorten this function call. */
859 continue;
860 }
861
862 /* Shorten the function call. */
863
864 /* For simplicity of coding, we are going to modify the section
865 contents, the section relocs, and the BFD symbol table. We
866 must tell the rest of the code not to free up this
867 information. It would be possible to instead create a table
868 of changes which have to be made, as is done in coff-mips.c;
869 that would be more work, but would require less memory when
870 the linker is run. */
871
872 if (coff_section_data (abfd, sec) == NULL)
873 {
874 sec->used_by_bfd =
875 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
876 if (sec->used_by_bfd == NULL)
877 goto error_return;
878 }
879
880 coff_section_data (abfd, sec)->relocs = internal_relocs;
881 coff_section_data (abfd, sec)->keep_relocs = true;
882 free_relocs = NULL;
883
884 coff_section_data (abfd, sec)->contents = contents;
885 coff_section_data (abfd, sec)->keep_contents = true;
886 free_contents = NULL;
887
888 obj_coff_keep_syms (abfd) = true;
889
890 /* Replace the jsr with a bsr. */
891
892 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
893 replace the jsr with a bsr. */
894 irel->r_type = R_SH_PCDISP;
895 irel->r_symndx = irelfn->r_symndx;
896 if (sym.n_sclass != C_EXT)
897 {
898 /* If this needs to be changed because of future relaxing,
899 it will be handled here like other internal PCDISP
900 relocs. */
901 bfd_put_16 (abfd,
902 0xb000 | ((foff >> 1) & 0xfff),
903 contents + irel->r_vaddr - sec->vma);
904 }
905 else
906 {
907 /* We can't fully resolve this yet, because the external
908 symbol value may be changed by future relaxing. We let
909 the final link phase handle it. */
910 bfd_put_16 (abfd, 0xb000, contents + irel->r_vaddr - sec->vma);
911 }
912
913 /* See if there is another R_SH_USES reloc referring to the same
914 register load. */
915 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
916 if (irelscan->r_type == R_SH_USES
917 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
918 break;
919 if (irelscan < irelend)
920 {
921 /* Some other function call depends upon this register load,
922 and we have not yet converted that function call.
923 Indeed, we may never be able to convert it. There is
924 nothing else we can do at this point. */
925 continue;
926 }
927
928 /* Look for a R_SH_COUNT reloc on the location where the
929 function address is stored. Do this before deleting any
930 bytes, to avoid confusion about the address. */
931 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
932 if (irelcount->r_vaddr == paddr
933 && irelcount->r_type == R_SH_COUNT)
934 break;
935
936 /* Delete the register load. */
937 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
938 goto error_return;
939
940 /* That will change things, so, just in case it permits some
941 other function call to come within range, we should relax
942 again. Note that this is not required, and it may be slow. */
943 *again = true;
944
945 /* Now check whether we got a COUNT reloc. */
946 if (irelcount >= irelend)
947 {
948 ((*_bfd_error_handler)
949 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
950 bfd_get_filename (abfd), (unsigned long) paddr));
951 continue;
952 }
953
954 /* The number of uses is stored in the r_offset field. We've
955 just deleted one. */
956 if (irelcount->r_offset == 0)
957 {
958 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
959 bfd_get_filename (abfd),
960 (unsigned long) paddr));
961 continue;
962 }
963
964 --irelcount->r_offset;
965
966 /* If there are no more uses, we can delete the address. Reload
967 the address from irelfn, in case it was changed by the
968 previous call to sh_relax_delete_bytes. */
969 if (irelcount->r_offset == 0)
970 {
971 if (! sh_relax_delete_bytes (abfd, sec,
972 irelfn->r_vaddr - sec->vma, 4))
973 goto error_return;
974 }
975
976 /* We've done all we can with that function call. */
977 }
978
979 /* Look for load and store instructions that we can align on four
980 byte boundaries. */
981 if (have_code)
982 {
983 boolean swapped;
984
985 /* Get the section contents. */
986 if (contents == NULL)
987 {
988 if (coff_section_data (abfd, sec) != NULL
989 && coff_section_data (abfd, sec)->contents != NULL)
990 contents = coff_section_data (abfd, sec)->contents;
991 else
992 {
993 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
994 if (contents == NULL)
995 goto error_return;
996 free_contents = contents;
997
998 if (! bfd_get_section_contents (abfd, sec, contents,
999 (file_ptr) 0, sec->_raw_size))
1000 goto error_return;
1001 }
1002 }
1003
1004 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1005 goto error_return;
1006
1007 if (swapped)
1008 {
1009 if (coff_section_data (abfd, sec) == NULL)
1010 {
1011 sec->used_by_bfd =
1012 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
1013 if (sec->used_by_bfd == NULL)
1014 goto error_return;
1015 }
1016
1017 coff_section_data (abfd, sec)->relocs = internal_relocs;
1018 coff_section_data (abfd, sec)->keep_relocs = true;
1019 free_relocs = NULL;
1020
1021 coff_section_data (abfd, sec)->contents = contents;
1022 coff_section_data (abfd, sec)->keep_contents = true;
1023 free_contents = NULL;
1024
1025 obj_coff_keep_syms (abfd) = true;
1026 }
1027 }
1028
1029 if (free_relocs != NULL)
1030 {
1031 free (free_relocs);
1032 free_relocs = NULL;
1033 }
1034
1035 if (free_contents != NULL)
1036 {
1037 if (! link_info->keep_memory)
1038 free (free_contents);
1039 else
1040 {
1041 /* Cache the section contents for coff_link_input_bfd. */
1042 if (coff_section_data (abfd, sec) == NULL)
1043 {
1044 sec->used_by_bfd =
1045 ((PTR) bfd_zalloc (abfd, sizeof (struct coff_section_tdata)));
1046 if (sec->used_by_bfd == NULL)
1047 goto error_return;
1048 coff_section_data (abfd, sec)->relocs = NULL;
1049 }
1050 coff_section_data (abfd, sec)->contents = contents;
1051 }
1052 }
1053
1054 return true;
1055
1056 error_return:
1057 if (free_relocs != NULL)
1058 free (free_relocs);
1059 if (free_contents != NULL)
1060 free (free_contents);
1061 return false;
1062 }
1063
1064 /* Delete some bytes from a section while relaxing. */
1065
1066 static boolean
1067 sh_relax_delete_bytes (abfd, sec, addr, count)
1068 bfd *abfd;
1069 asection *sec;
1070 bfd_vma addr;
1071 int count;
1072 {
1073 bfd_byte *contents;
1074 struct internal_reloc *irel, *irelend;
1075 struct internal_reloc *irelalign;
1076 bfd_vma toaddr;
1077 bfd_byte *esym, *esymend;
1078 bfd_size_type symesz;
1079 struct coff_link_hash_entry **sym_hash;
1080 asection *o;
1081
1082 contents = coff_section_data (abfd, sec)->contents;
1083
1084 /* The deletion must stop at the next ALIGN reloc for an aligment
1085 power larger than the number of bytes we are deleting. */
1086
1087 irelalign = NULL;
1088 toaddr = sec->_cooked_size;
1089
1090 irel = coff_section_data (abfd, sec)->relocs;
1091 irelend = irel + sec->reloc_count;
1092 for (; irel < irelend; irel++)
1093 {
1094 if (irel->r_type == R_SH_ALIGN
1095 && irel->r_vaddr - sec->vma > addr
1096 && count < (1 << irel->r_offset))
1097 {
1098 irelalign = irel;
1099 toaddr = irel->r_vaddr - sec->vma;
1100 break;
1101 }
1102 }
1103
1104 /* Actually delete the bytes. */
1105 memmove (contents + addr, contents + addr + count, toaddr - addr - count);
1106 if (irelalign == NULL)
1107 sec->_cooked_size -= count;
1108 else
1109 {
1110 int i;
1111
1112 #define NOP_OPCODE (0x0009)
1113
1114 BFD_ASSERT ((count & 1) == 0);
1115 for (i = 0; i < count; i += 2)
1116 bfd_put_16 (abfd, NOP_OPCODE, contents + toaddr - count + i);
1117 }
1118
1119 /* Adjust all the relocs. */
1120 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1121 {
1122 bfd_vma nraddr, stop;
1123 bfd_vma start = 0;
1124 int insn = 0;
1125 struct internal_syment sym;
1126 int off, adjust, oinsn;
1127 bfd_signed_vma voff = 0;
1128 boolean overflow;
1129
1130 /* Get the new reloc address. */
1131 nraddr = irel->r_vaddr - sec->vma;
1132 if ((irel->r_vaddr - sec->vma > addr
1133 && irel->r_vaddr - sec->vma < toaddr)
1134 || (irel->r_type == R_SH_ALIGN
1135 && irel->r_vaddr - sec->vma == toaddr))
1136 nraddr -= count;
1137
1138 /* See if this reloc was for the bytes we have deleted, in which
1139 case we no longer care about it. Don't delete relocs which
1140 represent addresses, though. */
1141 if (irel->r_vaddr - sec->vma >= addr
1142 && irel->r_vaddr - sec->vma < addr + count
1143 && irel->r_type != R_SH_ALIGN
1144 && irel->r_type != R_SH_CODE
1145 && irel->r_type != R_SH_DATA
1146 && irel->r_type != R_SH_LABEL)
1147 irel->r_type = R_SH_UNUSED;
1148
1149 /* If this is a PC relative reloc, see if the range it covers
1150 includes the bytes we have deleted. */
1151 switch (irel->r_type)
1152 {
1153 default:
1154 break;
1155
1156 case R_SH_PCDISP8BY2:
1157 case R_SH_PCDISP:
1158 case R_SH_PCRELIMM8BY2:
1159 case R_SH_PCRELIMM8BY4:
1160 start = irel->r_vaddr - sec->vma;
1161 insn = bfd_get_16 (abfd, contents + nraddr);
1162 break;
1163 }
1164
1165 switch (irel->r_type)
1166 {
1167 default:
1168 start = stop = addr;
1169 break;
1170
1171 case R_SH_IMM32:
1172 #ifdef COFF_WITH_PE
1173 case R_SH_IMM32CE:
1174 case R_SH_IMAGEBASE:
1175 #endif
1176 /* If this reloc is against a symbol defined in this
1177 section, and the symbol will not be adjusted below, we
1178 must check the addend to see it will put the value in
1179 range to be adjusted, and hence must be changed. */
1180 bfd_coff_swap_sym_in (abfd,
1181 ((bfd_byte *) obj_coff_external_syms (abfd)
1182 + (irel->r_symndx
1183 * bfd_coff_symesz (abfd))),
1184 &sym);
1185 if (sym.n_sclass != C_EXT
1186 && sym.n_scnum == sec->target_index
1187 && ((bfd_vma) sym.n_value <= addr
1188 || (bfd_vma) sym.n_value >= toaddr))
1189 {
1190 bfd_vma val;
1191
1192 val = bfd_get_32 (abfd, contents + nraddr);
1193 val += sym.n_value;
1194 if (val > addr && val < toaddr)
1195 bfd_put_32 (abfd, val - count, contents + nraddr);
1196 }
1197 start = stop = addr;
1198 break;
1199
1200 case R_SH_PCDISP8BY2:
1201 off = insn & 0xff;
1202 if (off & 0x80)
1203 off -= 0x100;
1204 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1205 break;
1206
1207 case R_SH_PCDISP:
1208 bfd_coff_swap_sym_in (abfd,
1209 ((bfd_byte *) obj_coff_external_syms (abfd)
1210 + (irel->r_symndx
1211 * bfd_coff_symesz (abfd))),
1212 &sym);
1213 if (sym.n_sclass == C_EXT)
1214 start = stop = addr;
1215 else
1216 {
1217 off = insn & 0xfff;
1218 if (off & 0x800)
1219 off -= 0x1000;
1220 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1221 }
1222 break;
1223
1224 case R_SH_PCRELIMM8BY2:
1225 off = insn & 0xff;
1226 stop = start + 4 + off * 2;
1227 break;
1228
1229 case R_SH_PCRELIMM8BY4:
1230 off = insn & 0xff;
1231 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1232 break;
1233
1234 case R_SH_SWITCH8:
1235 case R_SH_SWITCH16:
1236 case R_SH_SWITCH32:
1237 /* These relocs types represent
1238 .word L2-L1
1239 The r_offset field holds the difference between the reloc
1240 address and L1. That is the start of the reloc, and
1241 adding in the contents gives us the top. We must adjust
1242 both the r_offset field and the section contents. */
1243
1244 start = irel->r_vaddr - sec->vma;
1245 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1246
1247 if (start > addr
1248 && start < toaddr
1249 && (stop <= addr || stop >= toaddr))
1250 irel->r_offset += count;
1251 else if (stop > addr
1252 && stop < toaddr
1253 && (start <= addr || start >= toaddr))
1254 irel->r_offset -= count;
1255
1256 start = stop;
1257
1258 if (irel->r_type == R_SH_SWITCH16)
1259 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1260 else if (irel->r_type == R_SH_SWITCH8)
1261 voff = bfd_get_8 (abfd, contents + nraddr);
1262 else
1263 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1264 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1265
1266 break;
1267
1268 case R_SH_USES:
1269 start = irel->r_vaddr - sec->vma;
1270 stop = (bfd_vma) ((bfd_signed_vma) start
1271 + (long) irel->r_offset
1272 + 4);
1273 break;
1274 }
1275
1276 if (start > addr
1277 && start < toaddr
1278 && (stop <= addr || stop >= toaddr))
1279 adjust = count;
1280 else if (stop > addr
1281 && stop < toaddr
1282 && (start <= addr || start >= toaddr))
1283 adjust = - count;
1284 else
1285 adjust = 0;
1286
1287 if (adjust != 0)
1288 {
1289 oinsn = insn;
1290 overflow = false;
1291 switch (irel->r_type)
1292 {
1293 default:
1294 abort ();
1295 break;
1296
1297 case R_SH_PCDISP8BY2:
1298 case R_SH_PCRELIMM8BY2:
1299 insn += adjust / 2;
1300 if ((oinsn & 0xff00) != (insn & 0xff00))
1301 overflow = true;
1302 bfd_put_16 (abfd, insn, contents + nraddr);
1303 break;
1304
1305 case R_SH_PCDISP:
1306 insn += adjust / 2;
1307 if ((oinsn & 0xf000) != (insn & 0xf000))
1308 overflow = true;
1309 bfd_put_16 (abfd, insn, contents + nraddr);
1310 break;
1311
1312 case R_SH_PCRELIMM8BY4:
1313 BFD_ASSERT (adjust == count || count >= 4);
1314 if (count >= 4)
1315 insn += adjust / 4;
1316 else
1317 {
1318 if ((irel->r_vaddr & 3) == 0)
1319 ++insn;
1320 }
1321 if ((oinsn & 0xff00) != (insn & 0xff00))
1322 overflow = true;
1323 bfd_put_16 (abfd, insn, contents + nraddr);
1324 break;
1325
1326 case R_SH_SWITCH8:
1327 voff += adjust;
1328 if (voff < 0 || voff >= 0xff)
1329 overflow = true;
1330 bfd_put_8 (abfd, voff, contents + nraddr);
1331 break;
1332
1333 case R_SH_SWITCH16:
1334 voff += adjust;
1335 if (voff < - 0x8000 || voff >= 0x8000)
1336 overflow = true;
1337 bfd_put_signed_16 (abfd, voff, contents + nraddr);
1338 break;
1339
1340 case R_SH_SWITCH32:
1341 voff += adjust;
1342 bfd_put_signed_32 (abfd, voff, contents + nraddr);
1343 break;
1344
1345 case R_SH_USES:
1346 irel->r_offset += adjust;
1347 break;
1348 }
1349
1350 if (overflow)
1351 {
1352 ((*_bfd_error_handler)
1353 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1354 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
1355 bfd_set_error (bfd_error_bad_value);
1356 return false;
1357 }
1358 }
1359
1360 irel->r_vaddr = nraddr + sec->vma;
1361 }
1362
1363 /* Look through all the other sections. If there contain any IMM32
1364 relocs against internal symbols which we are not going to adjust
1365 below, we may need to adjust the addends. */
1366 for (o = abfd->sections; o != NULL; o = o->next)
1367 {
1368 struct internal_reloc *internal_relocs;
1369 struct internal_reloc *irelscan, *irelscanend;
1370 bfd_byte *ocontents;
1371
1372 if (o == sec
1373 || (o->flags & SEC_RELOC) == 0
1374 || o->reloc_count == 0)
1375 continue;
1376
1377 /* We always cache the relocs. Perhaps, if info->keep_memory is
1378 false, we should free them, if we are permitted to, when we
1379 leave sh_coff_relax_section. */
1380 internal_relocs = (_bfd_coff_read_internal_relocs
1381 (abfd, o, true, (bfd_byte *) NULL, false,
1382 (struct internal_reloc *) NULL));
1383 if (internal_relocs == NULL)
1384 return false;
1385
1386 ocontents = NULL;
1387 irelscanend = internal_relocs + o->reloc_count;
1388 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1389 {
1390 struct internal_syment sym;
1391
1392 #ifdef COFF_WITH_PE
1393 if (irelscan->r_type != R_SH_IMM32
1394 && irelscan->r_type != R_SH_IMAGEBASE
1395 && irelscan->r_type != R_SH_IMM32CE)
1396 #else
1397 if (irelscan->r_type != R_SH_IMM32)
1398 #endif
1399 continue;
1400
1401 bfd_coff_swap_sym_in (abfd,
1402 ((bfd_byte *) obj_coff_external_syms (abfd)
1403 + (irelscan->r_symndx
1404 * bfd_coff_symesz (abfd))),
1405 &sym);
1406 if (sym.n_sclass != C_EXT
1407 && sym.n_scnum == sec->target_index
1408 && ((bfd_vma) sym.n_value <= addr
1409 || (bfd_vma) sym.n_value >= toaddr))
1410 {
1411 bfd_vma val;
1412
1413 if (ocontents == NULL)
1414 {
1415 if (coff_section_data (abfd, o)->contents != NULL)
1416 ocontents = coff_section_data (abfd, o)->contents;
1417 else
1418 {
1419 /* We always cache the section contents.
1420 Perhaps, if info->keep_memory is false, we
1421 should free them, if we are permitted to,
1422 when we leave sh_coff_relax_section. */
1423 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1424 if (ocontents == NULL)
1425 return false;
1426 if (! bfd_get_section_contents (abfd, o, ocontents,
1427 (file_ptr) 0,
1428 o->_raw_size))
1429 return false;
1430 coff_section_data (abfd, o)->contents = ocontents;
1431 }
1432 }
1433
1434 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1435 val += sym.n_value;
1436 if (val > addr && val < toaddr)
1437 bfd_put_32 (abfd, val - count,
1438 ocontents + irelscan->r_vaddr - o->vma);
1439
1440 coff_section_data (abfd, o)->keep_contents = true;
1441 }
1442 }
1443 }
1444
1445 /* Adjusting the internal symbols will not work if something has
1446 already retrieved the generic symbols. It would be possible to
1447 make this work by adjusting the generic symbols at the same time.
1448 However, this case should not arise in normal usage. */
1449 if (obj_symbols (abfd) != NULL
1450 || obj_raw_syments (abfd) != NULL)
1451 {
1452 ((*_bfd_error_handler)
1453 ("%s: fatal: generic symbols retrieved before relaxing",
1454 bfd_get_filename (abfd)));
1455 bfd_set_error (bfd_error_invalid_operation);
1456 return false;
1457 }
1458
1459 /* Adjust all the symbols. */
1460 sym_hash = obj_coff_sym_hashes (abfd);
1461 symesz = bfd_coff_symesz (abfd);
1462 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1463 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1464 while (esym < esymend)
1465 {
1466 struct internal_syment isym;
1467
1468 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1469
1470 if (isym.n_scnum == sec->target_index
1471 && (bfd_vma) isym.n_value > addr
1472 && (bfd_vma) isym.n_value < toaddr)
1473 {
1474 isym.n_value -= count;
1475
1476 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1477
1478 if (*sym_hash != NULL)
1479 {
1480 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1481 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1482 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1483 && (*sym_hash)->root.u.def.value < toaddr);
1484 (*sym_hash)->root.u.def.value -= count;
1485 }
1486 }
1487
1488 esym += (isym.n_numaux + 1) * symesz;
1489 sym_hash += isym.n_numaux + 1;
1490 }
1491
1492 /* See if we can move the ALIGN reloc forward. We have adjusted
1493 r_vaddr for it already. */
1494 if (irelalign != NULL)
1495 {
1496 bfd_vma alignto, alignaddr;
1497
1498 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1499 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1500 1 << irelalign->r_offset);
1501 if (alignto != alignaddr)
1502 {
1503 /* Tail recursion. */
1504 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1505 alignto - alignaddr);
1506 }
1507 }
1508
1509 return true;
1510 }
1511 \f
1512 /* This is yet another version of the SH opcode table, used to rapidly
1513 get information about a particular instruction. */
1514
1515 /* The opcode map is represented by an array of these structures. The
1516 array is indexed by the high order four bits in the instruction. */
1517
1518 struct sh_major_opcode
1519 {
1520 /* A pointer to the instruction list. This is an array which
1521 contains all the instructions with this major opcode. */
1522 const struct sh_minor_opcode *minor_opcodes;
1523 /* The number of elements in minor_opcodes. */
1524 unsigned short count;
1525 };
1526
1527 /* This structure holds information for a set of SH opcodes. The
1528 instruction code is anded with the mask value, and the resulting
1529 value is used to search the order opcode list. */
1530
1531 struct sh_minor_opcode
1532 {
1533 /* The sorted opcode list. */
1534 const struct sh_opcode *opcodes;
1535 /* The number of elements in opcodes. */
1536 unsigned short count;
1537 /* The mask value to use when searching the opcode list. */
1538 unsigned short mask;
1539 };
1540
1541 /* This structure holds information for an SH instruction. An array
1542 of these structures is sorted in order by opcode. */
1543
1544 struct sh_opcode
1545 {
1546 /* The code for this instruction, after it has been anded with the
1547 mask value in the sh_major_opcode structure. */
1548 unsigned short opcode;
1549 /* Flags for this instruction. */
1550 unsigned short flags;
1551 };
1552
1553 /* Flag which appear in the sh_opcode structure. */
1554
1555 /* This instruction loads a value from memory. */
1556 #define LOAD (0x1)
1557
1558 /* This instruction stores a value to memory. */
1559 #define STORE (0x2)
1560
1561 /* This instruction is a branch. */
1562 #define BRANCH (0x4)
1563
1564 /* This instruction has a delay slot. */
1565 #define DELAY (0x8)
1566
1567 /* This instruction uses the value in the register in the field at
1568 mask 0x0f00 of the instruction. */
1569 #define USES1 (0x10)
1570 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1571
1572 /* This instruction uses the value in the register in the field at
1573 mask 0x00f0 of the instruction. */
1574 #define USES2 (0x20)
1575 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1576
1577 /* This instruction uses the value in register 0. */
1578 #define USESR0 (0x40)
1579
1580 /* This instruction sets the value in the register in the field at
1581 mask 0x0f00 of the instruction. */
1582 #define SETS1 (0x80)
1583 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1584
1585 /* This instruction sets the value in the register in the field at
1586 mask 0x00f0 of the instruction. */
1587 #define SETS2 (0x100)
1588 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1589
1590 /* This instruction sets register 0. */
1591 #define SETSR0 (0x200)
1592
1593 /* This instruction sets a special register. */
1594 #define SETSSP (0x400)
1595
1596 /* This instruction uses a special register. */
1597 #define USESSP (0x800)
1598
1599 /* This instruction uses the floating point register in the field at
1600 mask 0x0f00 of the instruction. */
1601 #define USESF1 (0x1000)
1602 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1603
1604 /* This instruction uses the floating point register in the field at
1605 mask 0x00f0 of the instruction. */
1606 #define USESF2 (0x2000)
1607 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1608
1609 /* This instruction uses floating point register 0. */
1610 #define USESF0 (0x4000)
1611
1612 /* This instruction sets the floating point register in the field at
1613 mask 0x0f00 of the instruction. */
1614 #define SETSF1 (0x8000)
1615 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1616
1617 #define USESAS (0x10000)
1618 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1619 #define USESR8 (0x20000)
1620 #define SETSAS (0x40000)
1621 #define SETSAS_REG(x) USESAS_REG (x)
1622
1623 static boolean sh_insn_uses_reg
1624 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1625 static boolean sh_insn_sets_reg
1626 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1627 static boolean sh_insn_uses_or_sets_reg
1628 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1629 static boolean sh_insn_uses_freg
1630 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1631 static boolean sh_insn_sets_freg
1632 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1633 static boolean sh_insn_uses_or_sets_freg
1634 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1635 static boolean sh_insns_conflict
1636 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1637 const struct sh_opcode *));
1638 static boolean sh_load_use
1639 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1640 const struct sh_opcode *));
1641
1642 /* The opcode maps. */
1643
1644 #define MAP(a) a, sizeof a / sizeof a[0]
1645
1646 static const struct sh_opcode sh_opcode00[] =
1647 {
1648 { 0x0008, SETSSP }, /* clrt */
1649 { 0x0009, 0 }, /* nop */
1650 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1651 { 0x0018, SETSSP }, /* sett */
1652 { 0x0019, SETSSP }, /* div0u */
1653 { 0x001b, 0 }, /* sleep */
1654 { 0x0028, SETSSP }, /* clrmac */
1655 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1656 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1657 { 0x0048, SETSSP }, /* clrs */
1658 { 0x0058, SETSSP } /* sets */
1659 };
1660
1661 static const struct sh_opcode sh_opcode01[] =
1662 {
1663 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1664 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1665 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1666 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1667 { 0x0029, SETS1 | USESSP }, /* movt rn */
1668 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1669 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1670 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1671 { 0x0083, LOAD | USES1 }, /* pref @rn */
1672 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1673 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1674 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1675 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1676 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1677 };
1678
1679 /* These sixteen instructions can be handled with one table entry below. */
1680 #if 0
1681 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1682 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1683 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1684 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1685 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1686 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1687 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1688 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1689 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1690 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1691 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1692 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1693 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1694 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1695 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1696 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1697 #endif
1698
1699 static const struct sh_opcode sh_opcode02[] =
1700 {
1701 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1702 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1703 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1704 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1705 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1706 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1707 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1708 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1709 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1710 };
1711
1712 static const struct sh_minor_opcode sh_opcode0[] =
1713 {
1714 { MAP (sh_opcode00), 0xffff },
1715 { MAP (sh_opcode01), 0xf0ff },
1716 { MAP (sh_opcode02), 0xf00f }
1717 };
1718
1719 static const struct sh_opcode sh_opcode10[] =
1720 {
1721 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1722 };
1723
1724 static const struct sh_minor_opcode sh_opcode1[] =
1725 {
1726 { MAP (sh_opcode10), 0xf000 }
1727 };
1728
1729 static const struct sh_opcode sh_opcode20[] =
1730 {
1731 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1732 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1733 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1734 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1735 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1736 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1737 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1738 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1739 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1740 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1741 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1742 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1743 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1744 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1745 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1746 };
1747
1748 static const struct sh_minor_opcode sh_opcode2[] =
1749 {
1750 { MAP (sh_opcode20), 0xf00f }
1751 };
1752
1753 static const struct sh_opcode sh_opcode30[] =
1754 {
1755 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1756 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1757 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1758 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1759 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1760 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1761 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1762 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1763 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1764 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1765 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1766 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1767 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1768 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1769 };
1770
1771 static const struct sh_minor_opcode sh_opcode3[] =
1772 {
1773 { MAP (sh_opcode30), 0xf00f }
1774 };
1775
1776 static const struct sh_opcode sh_opcode40[] =
1777 {
1778 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1779 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1780 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1781 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1782 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1783 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1784 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1785 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1786 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1787 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1788 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1789 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1790 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1791 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1792 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1793 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1794 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1795 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1796 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1797 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1798 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1799 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1800 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1801 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1802 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1803 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1804 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1805 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1806 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1807 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1808 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1809 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1810 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1811 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1812 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1813 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1814 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1815 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1816 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1817 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1818 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1819 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1820 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1821 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1822 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1823 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1824 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1825 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1826 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1827 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1828 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1829 #if 0 /* These groups sixteen insns can be
1830 handled with one table entry each below. */
1831 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1832 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1833 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1834 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1835 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1836 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1837 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1838 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1839 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1840 ..
1841 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1842
1843 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1844 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1845 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1846 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1847 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1848 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1849 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1850 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1851 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1852 ..
1853 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1854
1855 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1856 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1857 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1858 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1859 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1860 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1861 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1862 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1863 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1864 ..
1865 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1866 #endif
1867 };
1868
1869 static const struct sh_opcode sh_opcode41[] =
1870 {
1871 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1872 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1873 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1874 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1875 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1876 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1877 };
1878
1879 static const struct sh_minor_opcode sh_opcode4[] =
1880 {
1881 { MAP (sh_opcode40), 0xf0ff },
1882 { MAP (sh_opcode41), 0xf00f }
1883 };
1884
1885 static const struct sh_opcode sh_opcode50[] =
1886 {
1887 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1888 };
1889
1890 static const struct sh_minor_opcode sh_opcode5[] =
1891 {
1892 { MAP (sh_opcode50), 0xf000 }
1893 };
1894
1895 static const struct sh_opcode sh_opcode60[] =
1896 {
1897 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1898 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1899 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1900 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1901 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1902 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1903 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1904 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1905 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1906 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1907 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1908 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1909 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1910 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1911 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1912 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1913 };
1914
1915 static const struct sh_minor_opcode sh_opcode6[] =
1916 {
1917 { MAP (sh_opcode60), 0xf00f }
1918 };
1919
1920 static const struct sh_opcode sh_opcode70[] =
1921 {
1922 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1923 };
1924
1925 static const struct sh_minor_opcode sh_opcode7[] =
1926 {
1927 { MAP (sh_opcode70), 0xf000 }
1928 };
1929
1930 static const struct sh_opcode sh_opcode80[] =
1931 {
1932 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1933 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1934 { 0x8200, SETSSP }, /* setrc #imm */
1935 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1936 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1937 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1938 { 0x8900, BRANCH | USESSP }, /* bt label */
1939 { 0x8b00, BRANCH | USESSP }, /* bf label */
1940 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1941 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1942 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1943 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1944 };
1945
1946 static const struct sh_minor_opcode sh_opcode8[] =
1947 {
1948 { MAP (sh_opcode80), 0xff00 }
1949 };
1950
1951 static const struct sh_opcode sh_opcode90[] =
1952 {
1953 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1954 };
1955
1956 static const struct sh_minor_opcode sh_opcode9[] =
1957 {
1958 { MAP (sh_opcode90), 0xf000 }
1959 };
1960
1961 static const struct sh_opcode sh_opcodea0[] =
1962 {
1963 { 0xa000, BRANCH | DELAY } /* bra label */
1964 };
1965
1966 static const struct sh_minor_opcode sh_opcodea[] =
1967 {
1968 { MAP (sh_opcodea0), 0xf000 }
1969 };
1970
1971 static const struct sh_opcode sh_opcodeb0[] =
1972 {
1973 { 0xb000, BRANCH | DELAY } /* bsr label */
1974 };
1975
1976 static const struct sh_minor_opcode sh_opcodeb[] =
1977 {
1978 { MAP (sh_opcodeb0), 0xf000 }
1979 };
1980
1981 static const struct sh_opcode sh_opcodec0[] =
1982 {
1983 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1984 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1985 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1986 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1987 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1988 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1989 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1990 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1991 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1992 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1993 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1994 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1995 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1996 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1997 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1998 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1999 };
2000
2001 static const struct sh_minor_opcode sh_opcodec[] =
2002 {
2003 { MAP (sh_opcodec0), 0xff00 }
2004 };
2005
2006 static const struct sh_opcode sh_opcoded0[] =
2007 {
2008 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
2009 };
2010
2011 static const struct sh_minor_opcode sh_opcoded[] =
2012 {
2013 { MAP (sh_opcoded0), 0xf000 }
2014 };
2015
2016 static const struct sh_opcode sh_opcodee0[] =
2017 {
2018 { 0xe000, SETS1 } /* mov #imm,rn */
2019 };
2020
2021 static const struct sh_minor_opcode sh_opcodee[] =
2022 {
2023 { MAP (sh_opcodee0), 0xf000 }
2024 };
2025
2026 static const struct sh_opcode sh_opcodef0[] =
2027 {
2028 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
2029 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
2030 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
2031 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
2032 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
2033 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
2034 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
2035 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
2036 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
2037 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
2038 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
2039 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
2040 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
2041 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
2042 };
2043
2044 static const struct sh_opcode sh_opcodef1[] =
2045 {
2046 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
2047 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
2048 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
2049 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
2050 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
2051 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2052 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2053 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2054 { 0xf08d, SETSF1 }, /* fldi0 fn */
2055 { 0xf09d, SETSF1 } /* fldi1 fn */
2056 };
2057
2058 static const struct sh_minor_opcode sh_opcodef[] =
2059 {
2060 { MAP (sh_opcodef0), 0xf00f },
2061 { MAP (sh_opcodef1), 0xf0ff }
2062 };
2063
2064 static struct sh_major_opcode sh_opcodes[] =
2065 {
2066 { MAP (sh_opcode0) },
2067 { MAP (sh_opcode1) },
2068 { MAP (sh_opcode2) },
2069 { MAP (sh_opcode3) },
2070 { MAP (sh_opcode4) },
2071 { MAP (sh_opcode5) },
2072 { MAP (sh_opcode6) },
2073 { MAP (sh_opcode7) },
2074 { MAP (sh_opcode8) },
2075 { MAP (sh_opcode9) },
2076 { MAP (sh_opcodea) },
2077 { MAP (sh_opcodeb) },
2078 { MAP (sh_opcodec) },
2079 { MAP (sh_opcoded) },
2080 { MAP (sh_opcodee) },
2081 { MAP (sh_opcodef) }
2082 };
2083
2084 /* The double data transfer / parallel processing insns are not
2085 described here. This will cause sh_align_load_span to leave them alone. */
2086
2087 static const struct sh_opcode sh_dsp_opcodef0[] =
2088 {
2089 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2090 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2091 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2092 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2093 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2094 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2095 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2096 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2097 };
2098
2099 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2100 {
2101 { MAP (sh_dsp_opcodef0), 0xfc0d }
2102 };
2103
2104 /* Given an instruction, return a pointer to the corresponding
2105 sh_opcode structure. Return NULL if the instruction is not
2106 recognized. */
2107
2108 static const struct sh_opcode *
2109 sh_insn_info (insn)
2110 unsigned int insn;
2111 {
2112 const struct sh_major_opcode *maj;
2113 const struct sh_minor_opcode *min, *minend;
2114
2115 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2116 min = maj->minor_opcodes;
2117 minend = min + maj->count;
2118 for (; min < minend; min++)
2119 {
2120 unsigned int l;
2121 const struct sh_opcode *op, *opend;
2122
2123 l = insn & min->mask;
2124 op = min->opcodes;
2125 opend = op + min->count;
2126
2127 /* Since the opcodes tables are sorted, we could use a binary
2128 search here if the count were above some cutoff value. */
2129 for (; op < opend; op++)
2130 if (op->opcode == l)
2131 return op;
2132 }
2133
2134 return NULL;
2135 }
2136
2137 /* See whether an instruction uses or sets a general purpose register */
2138
2139 static boolean
2140 sh_insn_uses_or_sets_reg (insn, op, reg)
2141 unsigned int insn;
2142 const struct sh_opcode *op;
2143 unsigned int reg;
2144 {
2145 if (sh_insn_uses_reg (insn, op, reg))
2146 return true;
2147
2148 return sh_insn_sets_reg (insn, op, reg);
2149 }
2150
2151 /* See whether an instruction uses a general purpose register. */
2152
2153 static boolean
2154 sh_insn_uses_reg (insn, op, reg)
2155 unsigned int insn;
2156 const struct sh_opcode *op;
2157 unsigned int reg;
2158 {
2159 unsigned int f;
2160
2161 f = op->flags;
2162
2163 if ((f & USES1) != 0
2164 && USES1_REG (insn) == reg)
2165 return true;
2166 if ((f & USES2) != 0
2167 && USES2_REG (insn) == reg)
2168 return true;
2169 if ((f & USESR0) != 0
2170 && reg == 0)
2171 return true;
2172 if ((f & USESAS) && reg == USESAS_REG (insn))
2173 return true;
2174 if ((f & USESR8) && reg == 8)
2175 return true;
2176
2177 return false;
2178 }
2179
2180 /* See whether an instruction sets a general purpose register. */
2181
2182 static boolean
2183 sh_insn_sets_reg (insn, op, reg)
2184 unsigned int insn;
2185 const struct sh_opcode *op;
2186 unsigned int reg;
2187 {
2188 unsigned int f;
2189
2190 f = op->flags;
2191
2192 if ((f & SETS1) != 0
2193 && SETS1_REG (insn) == reg)
2194 return true;
2195 if ((f & SETS2) != 0
2196 && SETS2_REG (insn) == reg)
2197 return true;
2198 if ((f & SETSR0) != 0
2199 && reg == 0)
2200 return true;
2201 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2202 return true;
2203
2204 return false;
2205 }
2206
2207 /* See whether an instruction uses or sets a floating point register */
2208
2209 static boolean
2210 sh_insn_uses_or_sets_freg (insn, op, reg)
2211 unsigned int insn;
2212 const struct sh_opcode *op;
2213 unsigned int reg;
2214 {
2215 if (sh_insn_uses_freg (insn, op, reg))
2216 return true;
2217
2218 return sh_insn_sets_freg (insn, op, reg);
2219 }
2220
2221 /* See whether an instruction uses a floating point register. */
2222
2223 static boolean
2224 sh_insn_uses_freg (insn, op, freg)
2225 unsigned int insn;
2226 const struct sh_opcode *op;
2227 unsigned int freg;
2228 {
2229 unsigned int f;
2230
2231 f = op->flags;
2232
2233 /* We can't tell if this is a double-precision insn, so just play safe
2234 and assume that it might be. So not only have we test FREG against
2235 itself, but also even FREG against FREG+1 - if the using insn uses
2236 just the low part of a double precision value - but also an odd
2237 FREG against FREG-1 - if the setting insn sets just the low part
2238 of a double precision value.
2239 So what this all boils down to is that we have to ignore the lowest
2240 bit of the register number. */
2241
2242 if ((f & USESF1) != 0
2243 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2244 return true;
2245 if ((f & USESF2) != 0
2246 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2247 return true;
2248 if ((f & USESF0) != 0
2249 && freg == 0)
2250 return true;
2251
2252 return false;
2253 }
2254
2255 /* See whether an instruction sets a floating point register. */
2256
2257 static boolean
2258 sh_insn_sets_freg (insn, op, freg)
2259 unsigned int insn;
2260 const struct sh_opcode *op;
2261 unsigned int freg;
2262 {
2263 unsigned int f;
2264
2265 f = op->flags;
2266
2267 /* We can't tell if this is a double-precision insn, so just play safe
2268 and assume that it might be. So not only have we test FREG against
2269 itself, but also even FREG against FREG+1 - if the using insn uses
2270 just the low part of a double precision value - but also an odd
2271 FREG against FREG-1 - if the setting insn sets just the low part
2272 of a double precision value.
2273 So what this all boils down to is that we have to ignore the lowest
2274 bit of the register number. */
2275
2276 if ((f & SETSF1) != 0
2277 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2278 return true;
2279
2280 return false;
2281 }
2282
2283 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2284 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2285 This should return true if there is a conflict, or false if the
2286 instructions can be swapped safely. */
2287
2288 static boolean
2289 sh_insns_conflict (i1, op1, i2, op2)
2290 unsigned int i1;
2291 const struct sh_opcode *op1;
2292 unsigned int i2;
2293 const struct sh_opcode *op2;
2294 {
2295 unsigned int f1, f2;
2296
2297 f1 = op1->flags;
2298 f2 = op2->flags;
2299
2300 /* Load of fpscr conflicts with floating point operations.
2301 FIXME: shouldn't test raw opcodes here. */
2302 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2303 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2304 return true;
2305
2306 if ((f1 & (BRANCH | DELAY)) != 0
2307 || (f2 & (BRANCH | DELAY)) != 0)
2308 return true;
2309
2310 if (((f1 | f2) & SETSSP)
2311 && (f1 & (SETSSP | USESSP))
2312 && (f2 & (SETSSP | USESSP)))
2313 return true;
2314
2315 if ((f1 & SETS1) != 0
2316 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2317 return true;
2318 if ((f1 & SETS2) != 0
2319 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2320 return true;
2321 if ((f1 & SETSR0) != 0
2322 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2323 return true;
2324 if ((f1 & SETSAS)
2325 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2326 return true;
2327 if ((f1 & SETSF1) != 0
2328 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2329 return true;
2330
2331 if ((f2 & SETS1) != 0
2332 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2333 return true;
2334 if ((f2 & SETS2) != 0
2335 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2336 return true;
2337 if ((f2 & SETSR0) != 0
2338 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2339 return true;
2340 if ((f2 & SETSAS)
2341 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2342 return true;
2343 if ((f2 & SETSF1) != 0
2344 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2345 return true;
2346
2347 /* The instructions do not conflict. */
2348 return false;
2349 }
2350
2351 /* I1 is a load instruction, and I2 is some other instruction. Return
2352 true if I1 loads a register which I2 uses. */
2353
2354 static boolean
2355 sh_load_use (i1, op1, i2, op2)
2356 unsigned int i1;
2357 const struct sh_opcode *op1;
2358 unsigned int i2;
2359 const struct sh_opcode *op2;
2360 {
2361 unsigned int f1;
2362
2363 f1 = op1->flags;
2364
2365 if ((f1 & LOAD) == 0)
2366 return false;
2367
2368 /* If both SETS1 and SETSSP are set, that means a load to a special
2369 register using postincrement addressing mode, which we don't care
2370 about here. */
2371 if ((f1 & SETS1) != 0
2372 && (f1 & SETSSP) == 0
2373 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2374 return true;
2375
2376 if ((f1 & SETSR0) != 0
2377 && sh_insn_uses_reg (i2, op2, 0))
2378 return true;
2379
2380 if ((f1 & SETSF1) != 0
2381 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2382 return true;
2383
2384 return false;
2385 }
2386
2387 #ifndef COFF_IMAGE_WITH_PE
2388 /* Try to align loads and stores within a span of memory. This is
2389 called by both the ELF and the COFF sh targets. ABFD and SEC are
2390 the BFD and section we are examining. CONTENTS is the contents of
2391 the section. SWAP is the routine to call to swap two instructions.
2392 RELOCS is a pointer to the internal relocation information, to be
2393 passed to SWAP. PLABEL is a pointer to the current label in a
2394 sorted list of labels; LABEL_END is the end of the list. START and
2395 STOP are the range of memory to examine. If a swap is made,
2396 *PSWAPPED is set to true. */
2397
2398 boolean
2399 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2400 plabel, label_end, start, stop, pswapped)
2401 bfd *abfd;
2402 asection *sec;
2403 bfd_byte *contents;
2404 boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2405 PTR relocs;
2406 bfd_vma **plabel;
2407 bfd_vma *label_end;
2408 bfd_vma start;
2409 bfd_vma stop;
2410 boolean *pswapped;
2411 {
2412 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2413 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2414 bfd_vma i;
2415
2416 /* The SH4 has a Harvard architecture, hence aligning loads is not
2417 desirable. In fact, it is counter-productive, since it interferes
2418 with the schedules generated by the compiler. */
2419 if (abfd->arch_info->mach == bfd_mach_sh4)
2420 return true;
2421
2422 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2423 instructions. */
2424 if (dsp)
2425 {
2426 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2427 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2428 }
2429
2430 /* Instructions should be aligned on 2 byte boundaries. */
2431 if ((start & 1) == 1)
2432 ++start;
2433
2434 /* Now look through the unaligned addresses. */
2435 i = start;
2436 if ((i & 2) == 0)
2437 i += 2;
2438 for (; i < stop; i += 4)
2439 {
2440 unsigned int insn;
2441 const struct sh_opcode *op;
2442 unsigned int prev_insn = 0;
2443 const struct sh_opcode *prev_op = NULL;
2444
2445 insn = bfd_get_16 (abfd, contents + i);
2446 op = sh_insn_info (insn);
2447 if (op == NULL
2448 || (op->flags & (LOAD | STORE)) == 0)
2449 continue;
2450
2451 /* This is a load or store which is not on a four byte boundary. */
2452
2453 while (*plabel < label_end && **plabel < i)
2454 ++*plabel;
2455
2456 if (i > start)
2457 {
2458 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2459 /* If INSN is the field b of a parallel processing insn, it is not
2460 a load / store after all. Note that the test here might mistake
2461 the field_b of a pcopy insn for the starting code of a parallel
2462 processing insn; this might miss a swapping opportunity, but at
2463 least we're on the safe side. */
2464 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2465 continue;
2466
2467 /* Check if prev_insn is actually the field b of a parallel
2468 processing insn. Again, this can give a spurious match
2469 after a pcopy. */
2470 if (dsp && i - 2 > start)
2471 {
2472 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2473
2474 if ((pprev_insn & 0xfc00) == 0xf800)
2475 prev_op = NULL;
2476 else
2477 prev_op = sh_insn_info (prev_insn);
2478 }
2479 else
2480 prev_op = sh_insn_info (prev_insn);
2481
2482 /* If the load/store instruction is in a delay slot, we
2483 can't swap. */
2484 if (prev_op == NULL
2485 || (prev_op->flags & DELAY) != 0)
2486 continue;
2487 }
2488 if (i > start
2489 && (*plabel >= label_end || **plabel != i)
2490 && prev_op != NULL
2491 && (prev_op->flags & (LOAD | STORE)) == 0
2492 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2493 {
2494 boolean ok;
2495
2496 /* The load/store instruction does not have a label, and
2497 there is a previous instruction; PREV_INSN is not
2498 itself a load/store instruction, and PREV_INSN and
2499 INSN do not conflict. */
2500
2501 ok = true;
2502
2503 if (i >= start + 4)
2504 {
2505 unsigned int prev2_insn;
2506 const struct sh_opcode *prev2_op;
2507
2508 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2509 prev2_op = sh_insn_info (prev2_insn);
2510
2511 /* If the instruction before PREV_INSN has a delay
2512 slot--that is, PREV_INSN is in a delay slot--we
2513 can not swap. */
2514 if (prev2_op == NULL
2515 || (prev2_op->flags & DELAY) != 0)
2516 ok = false;
2517
2518 /* If the instruction before PREV_INSN is a load,
2519 and it sets a register which INSN uses, then
2520 putting INSN immediately after PREV_INSN will
2521 cause a pipeline bubble, so there is no point to
2522 making the swap. */
2523 if (ok
2524 && (prev2_op->flags & LOAD) != 0
2525 && sh_load_use (prev2_insn, prev2_op, insn, op))
2526 ok = false;
2527 }
2528
2529 if (ok)
2530 {
2531 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2532 return false;
2533 *pswapped = true;
2534 continue;
2535 }
2536 }
2537
2538 while (*plabel < label_end && **plabel < i + 2)
2539 ++*plabel;
2540
2541 if (i + 2 < stop
2542 && (*plabel >= label_end || **plabel != i + 2))
2543 {
2544 unsigned int next_insn;
2545 const struct sh_opcode *next_op;
2546
2547 /* There is an instruction after the load/store
2548 instruction, and it does not have a label. */
2549 next_insn = bfd_get_16 (abfd, contents + i + 2);
2550 next_op = sh_insn_info (next_insn);
2551 if (next_op != NULL
2552 && (next_op->flags & (LOAD | STORE)) == 0
2553 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2554 {
2555 boolean ok;
2556
2557 /* NEXT_INSN is not itself a load/store instruction,
2558 and it does not conflict with INSN. */
2559
2560 ok = true;
2561
2562 /* If PREV_INSN is a load, and it sets a register
2563 which NEXT_INSN uses, then putting NEXT_INSN
2564 immediately after PREV_INSN will cause a pipeline
2565 bubble, so there is no reason to make this swap. */
2566 if (prev_op != NULL
2567 && (prev_op->flags & LOAD) != 0
2568 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2569 ok = false;
2570
2571 /* If INSN is a load, and it sets a register which
2572 the insn after NEXT_INSN uses, then doing the
2573 swap will cause a pipeline bubble, so there is no
2574 reason to make the swap. However, if the insn
2575 after NEXT_INSN is itself a load or store
2576 instruction, then it is misaligned, so
2577 optimistically hope that it will be swapped
2578 itself, and just live with the pipeline bubble if
2579 it isn't. */
2580 if (ok
2581 && i + 4 < stop
2582 && (op->flags & LOAD) != 0)
2583 {
2584 unsigned int next2_insn;
2585 const struct sh_opcode *next2_op;
2586
2587 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2588 next2_op = sh_insn_info (next2_insn);
2589 if ((next2_op->flags & (LOAD | STORE)) == 0
2590 && sh_load_use (insn, op, next2_insn, next2_op))
2591 ok = false;
2592 }
2593
2594 if (ok)
2595 {
2596 if (! (*swap) (abfd, sec, relocs, contents, i))
2597 return false;
2598 *pswapped = true;
2599 continue;
2600 }
2601 }
2602 }
2603 }
2604
2605 return true;
2606 }
2607 #endif
2608
2609 /* Look for loads and stores which we can align to four byte
2610 boundaries. See the longer comment above sh_relax_section for why
2611 this is desirable. This sets *PSWAPPED if some instruction was
2612 swapped. */
2613
2614 static boolean
2615 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2616 bfd *abfd;
2617 asection *sec;
2618 struct internal_reloc *internal_relocs;
2619 bfd_byte *contents;
2620 boolean *pswapped;
2621 {
2622 struct internal_reloc *irel, *irelend;
2623 bfd_vma *labels = NULL;
2624 bfd_vma *label, *label_end;
2625
2626 *pswapped = false;
2627
2628 irelend = internal_relocs + sec->reloc_count;
2629
2630 /* Get all the addresses with labels on them. */
2631 labels = (bfd_vma *) bfd_malloc (sec->reloc_count * sizeof (bfd_vma));
2632 if (labels == NULL)
2633 goto error_return;
2634 label_end = labels;
2635 for (irel = internal_relocs; irel < irelend; irel++)
2636 {
2637 if (irel->r_type == R_SH_LABEL)
2638 {
2639 *label_end = irel->r_vaddr - sec->vma;
2640 ++label_end;
2641 }
2642 }
2643
2644 /* Note that the assembler currently always outputs relocs in
2645 address order. If that ever changes, this code will need to sort
2646 the label values and the relocs. */
2647
2648 label = labels;
2649
2650 for (irel = internal_relocs; irel < irelend; irel++)
2651 {
2652 bfd_vma start, stop;
2653
2654 if (irel->r_type != R_SH_CODE)
2655 continue;
2656
2657 start = irel->r_vaddr - sec->vma;
2658
2659 for (irel++; irel < irelend; irel++)
2660 if (irel->r_type == R_SH_DATA)
2661 break;
2662 if (irel < irelend)
2663 stop = irel->r_vaddr - sec->vma;
2664 else
2665 stop = sec->_cooked_size;
2666
2667 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2668 (PTR) internal_relocs, &label,
2669 label_end, start, stop, pswapped))
2670 goto error_return;
2671 }
2672
2673 free (labels);
2674
2675 return true;
2676
2677 error_return:
2678 if (labels != NULL)
2679 free (labels);
2680 return false;
2681 }
2682
2683 /* Swap two SH instructions. */
2684
2685 static boolean
2686 sh_swap_insns (abfd, sec, relocs, contents, addr)
2687 bfd *abfd;
2688 asection *sec;
2689 PTR relocs;
2690 bfd_byte *contents;
2691 bfd_vma addr;
2692 {
2693 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2694 unsigned short i1, i2;
2695 struct internal_reloc *irel, *irelend;
2696
2697 /* Swap the instructions themselves. */
2698 i1 = bfd_get_16 (abfd, contents + addr);
2699 i2 = bfd_get_16 (abfd, contents + addr + 2);
2700 bfd_put_16 (abfd, i2, contents + addr);
2701 bfd_put_16 (abfd, i1, contents + addr + 2);
2702
2703 /* Adjust all reloc addresses. */
2704 irelend = internal_relocs + sec->reloc_count;
2705 for (irel = internal_relocs; irel < irelend; irel++)
2706 {
2707 int type, add;
2708
2709 /* There are a few special types of relocs that we don't want to
2710 adjust. These relocs do not apply to the instruction itself,
2711 but are only associated with the address. */
2712 type = irel->r_type;
2713 if (type == R_SH_ALIGN
2714 || type == R_SH_CODE
2715 || type == R_SH_DATA
2716 || type == R_SH_LABEL)
2717 continue;
2718
2719 /* If an R_SH_USES reloc points to one of the addresses being
2720 swapped, we must adjust it. It would be incorrect to do this
2721 for a jump, though, since we want to execute both
2722 instructions after the jump. (We have avoided swapping
2723 around a label, so the jump will not wind up executing an
2724 instruction it shouldn't). */
2725 if (type == R_SH_USES)
2726 {
2727 bfd_vma off;
2728
2729 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2730 if (off == addr)
2731 irel->r_offset += 2;
2732 else if (off == addr + 2)
2733 irel->r_offset -= 2;
2734 }
2735
2736 if (irel->r_vaddr - sec->vma == addr)
2737 {
2738 irel->r_vaddr += 2;
2739 add = -2;
2740 }
2741 else if (irel->r_vaddr - sec->vma == addr + 2)
2742 {
2743 irel->r_vaddr -= 2;
2744 add = 2;
2745 }
2746 else
2747 add = 0;
2748
2749 if (add != 0)
2750 {
2751 bfd_byte *loc;
2752 unsigned short insn, oinsn;
2753 boolean overflow;
2754
2755 loc = contents + irel->r_vaddr - sec->vma;
2756 overflow = false;
2757 switch (type)
2758 {
2759 default:
2760 break;
2761
2762 case R_SH_PCDISP8BY2:
2763 case R_SH_PCRELIMM8BY2:
2764 insn = bfd_get_16 (abfd, loc);
2765 oinsn = insn;
2766 insn += add / 2;
2767 if ((oinsn & 0xff00) != (insn & 0xff00))
2768 overflow = true;
2769 bfd_put_16 (abfd, insn, loc);
2770 break;
2771
2772 case R_SH_PCDISP:
2773 insn = bfd_get_16 (abfd, loc);
2774 oinsn = insn;
2775 insn += add / 2;
2776 if ((oinsn & 0xf000) != (insn & 0xf000))
2777 overflow = true;
2778 bfd_put_16 (abfd, insn, loc);
2779 break;
2780
2781 case R_SH_PCRELIMM8BY4:
2782 /* This reloc ignores the least significant 3 bits of
2783 the program counter before adding in the offset.
2784 This means that if ADDR is at an even address, the
2785 swap will not affect the offset. If ADDR is an at an
2786 odd address, then the instruction will be crossing a
2787 four byte boundary, and must be adjusted. */
2788 if ((addr & 3) != 0)
2789 {
2790 insn = bfd_get_16 (abfd, loc);
2791 oinsn = insn;
2792 insn += add / 2;
2793 if ((oinsn & 0xff00) != (insn & 0xff00))
2794 overflow = true;
2795 bfd_put_16 (abfd, insn, loc);
2796 }
2797
2798 break;
2799 }
2800
2801 if (overflow)
2802 {
2803 ((*_bfd_error_handler)
2804 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2805 bfd_get_filename (abfd), (unsigned long) irel->r_vaddr));
2806 bfd_set_error (bfd_error_bad_value);
2807 return false;
2808 }
2809 }
2810 }
2811
2812 return true;
2813 }
2814 \f
2815 /* This is a modification of _bfd_coff_generic_relocate_section, which
2816 will handle SH relaxing. */
2817
2818 static boolean
2819 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2820 relocs, syms, sections)
2821 bfd *output_bfd ATTRIBUTE_UNUSED;
2822 struct bfd_link_info *info;
2823 bfd *input_bfd;
2824 asection *input_section;
2825 bfd_byte *contents;
2826 struct internal_reloc *relocs;
2827 struct internal_syment *syms;
2828 asection **sections;
2829 {
2830 struct internal_reloc *rel;
2831 struct internal_reloc *relend;
2832
2833 rel = relocs;
2834 relend = rel + input_section->reloc_count;
2835 for (; rel < relend; rel++)
2836 {
2837 long symndx;
2838 struct coff_link_hash_entry *h;
2839 struct internal_syment *sym;
2840 bfd_vma addend;
2841 bfd_vma val;
2842 reloc_howto_type *howto;
2843 bfd_reloc_status_type rstat;
2844
2845 /* Almost all relocs have to do with relaxing. If any work must
2846 be done for them, it has been done in sh_relax_section. */
2847 if (rel->r_type != R_SH_IMM32
2848 #ifdef COFF_WITH_PE
2849 && rel->r_type != R_SH_IMM32CE
2850 && rel->r_type != R_SH_IMAGEBASE
2851 #endif
2852 && rel->r_type != R_SH_PCDISP)
2853 continue;
2854
2855 symndx = rel->r_symndx;
2856
2857 if (symndx == -1)
2858 {
2859 h = NULL;
2860 sym = NULL;
2861 }
2862 else
2863 {
2864 if (symndx < 0
2865 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2866 {
2867 (*_bfd_error_handler)
2868 ("%s: illegal symbol index %ld in relocs",
2869 bfd_get_filename (input_bfd), symndx);
2870 bfd_set_error (bfd_error_bad_value);
2871 return false;
2872 }
2873 h = obj_coff_sym_hashes (input_bfd)[symndx];
2874 sym = syms + symndx;
2875 }
2876
2877 if (sym != NULL && sym->n_scnum != 0)
2878 addend = - sym->n_value;
2879 else
2880 addend = 0;
2881
2882 if (rel->r_type == R_SH_PCDISP)
2883 addend -= 4;
2884
2885 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2886 howto = NULL;
2887 else
2888 howto = &sh_coff_howtos[rel->r_type];
2889
2890 if (howto == NULL)
2891 {
2892 bfd_set_error (bfd_error_bad_value);
2893 return false;
2894 }
2895
2896 #ifdef COFF_WITH_PE
2897 if (rel->r_type == R_SH_IMAGEBASE)
2898 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2899 #endif
2900
2901 val = 0;
2902
2903 if (h == NULL)
2904 {
2905 asection *sec;
2906
2907 /* There is nothing to do for an internal PCDISP reloc. */
2908 if (rel->r_type == R_SH_PCDISP)
2909 continue;
2910
2911 if (symndx == -1)
2912 {
2913 sec = bfd_abs_section_ptr;
2914 val = 0;
2915 }
2916 else
2917 {
2918 sec = sections[symndx];
2919 val = (sec->output_section->vma
2920 + sec->output_offset
2921 + sym->n_value
2922 - sec->vma);
2923 }
2924 }
2925 else
2926 {
2927 if (h->root.type == bfd_link_hash_defined
2928 || h->root.type == bfd_link_hash_defweak)
2929 {
2930 asection *sec;
2931
2932 sec = h->root.u.def.section;
2933 val = (h->root.u.def.value
2934 + sec->output_section->vma
2935 + sec->output_offset);
2936 }
2937 else if (! info->relocateable)
2938 {
2939 if (! ((*info->callbacks->undefined_symbol)
2940 (info, h->root.root.string, input_bfd, input_section,
2941 rel->r_vaddr - input_section->vma)))
2942 return false;
2943 }
2944 }
2945
2946 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2947 contents,
2948 rel->r_vaddr - input_section->vma,
2949 val, addend);
2950
2951 switch (rstat)
2952 {
2953 default:
2954 abort ();
2955 case bfd_reloc_ok:
2956 break;
2957 case bfd_reloc_overflow:
2958 {
2959 const char *name;
2960 char buf[SYMNMLEN + 1];
2961
2962 if (symndx == -1)
2963 name = "*ABS*";
2964 else if (h != NULL)
2965 name = h->root.root.string;
2966 else if (sym->_n._n_n._n_zeroes == 0
2967 && sym->_n._n_n._n_offset != 0)
2968 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2969 else
2970 {
2971 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2972 buf[SYMNMLEN] = '\0';
2973 name = buf;
2974 }
2975
2976 if (! ((*info->callbacks->reloc_overflow)
2977 (info, name, howto->name, (bfd_vma) 0, input_bfd,
2978 input_section, rel->r_vaddr - input_section->vma)))
2979 return false;
2980 }
2981 }
2982 }
2983
2984 return true;
2985 }
2986
2987 /* This is a version of bfd_generic_get_relocated_section_contents
2988 which uses sh_relocate_section. */
2989
2990 static bfd_byte *
2991 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2992 data, relocateable, symbols)
2993 bfd *output_bfd;
2994 struct bfd_link_info *link_info;
2995 struct bfd_link_order *link_order;
2996 bfd_byte *data;
2997 boolean relocateable;
2998 asymbol **symbols;
2999 {
3000 asection *input_section = link_order->u.indirect.section;
3001 bfd *input_bfd = input_section->owner;
3002 asection **sections = NULL;
3003 struct internal_reloc *internal_relocs = NULL;
3004 struct internal_syment *internal_syms = NULL;
3005
3006 /* We only need to handle the case of relaxing, or of having a
3007 particular set of section contents, specially. */
3008 if (relocateable
3009 || coff_section_data (input_bfd, input_section) == NULL
3010 || coff_section_data (input_bfd, input_section)->contents == NULL)
3011 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
3012 link_order, data,
3013 relocateable,
3014 symbols);
3015
3016 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
3017 input_section->_raw_size);
3018
3019 if ((input_section->flags & SEC_RELOC) != 0
3020 && input_section->reloc_count > 0)
3021 {
3022 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
3023 bfd_byte *esym, *esymend;
3024 struct internal_syment *isymp;
3025 asection **secpp;
3026
3027 if (! _bfd_coff_get_external_symbols (input_bfd))
3028 goto error_return;
3029
3030 internal_relocs = (_bfd_coff_read_internal_relocs
3031 (input_bfd, input_section, false, (bfd_byte *) NULL,
3032 false, (struct internal_reloc *) NULL));
3033 if (internal_relocs == NULL)
3034 goto error_return;
3035
3036 internal_syms = ((struct internal_syment *)
3037 bfd_malloc (obj_raw_syment_count (input_bfd)
3038 * sizeof (struct internal_syment)));
3039 if (internal_syms == NULL)
3040 goto error_return;
3041
3042 sections = (asection **) bfd_malloc (obj_raw_syment_count (input_bfd)
3043 * sizeof (asection *));
3044 if (sections == NULL)
3045 goto error_return;
3046
3047 isymp = internal_syms;
3048 secpp = sections;
3049 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3050 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3051 while (esym < esymend)
3052 {
3053 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3054
3055 if (isymp->n_scnum != 0)
3056 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3057 else
3058 {
3059 if (isymp->n_value == 0)
3060 *secpp = bfd_und_section_ptr;
3061 else
3062 *secpp = bfd_com_section_ptr;
3063 }
3064
3065 esym += (isymp->n_numaux + 1) * symesz;
3066 secpp += isymp->n_numaux + 1;
3067 isymp += isymp->n_numaux + 1;
3068 }
3069
3070 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3071 input_section, data, internal_relocs,
3072 internal_syms, sections))
3073 goto error_return;
3074
3075 free (sections);
3076 sections = NULL;
3077 free (internal_syms);
3078 internal_syms = NULL;
3079 free (internal_relocs);
3080 internal_relocs = NULL;
3081 }
3082
3083 return data;
3084
3085 error_return:
3086 if (internal_relocs != NULL)
3087 free (internal_relocs);
3088 if (internal_syms != NULL)
3089 free (internal_syms);
3090 if (sections != NULL)
3091 free (sections);
3092 return NULL;
3093 }
3094
3095 /* The target vectors. */
3096
3097 #ifndef TARGET_SHL_SYM
3098 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
3099 #endif
3100
3101 #ifdef TARGET_SHL_SYM
3102 #define TARGET_SYM TARGET_SHL_SYM
3103 #else
3104 #define TARGET_SYM shlcoff_vec
3105 #endif
3106
3107 #ifndef TARGET_SHL_NAME
3108 #define TARGET_SHL_NAME "coff-shl"
3109 #endif
3110
3111 #ifdef COFF_WITH_PE
3112 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3113 SEC_CODE | SEC_DATA, '_', NULL);
3114 #else
3115 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE, 0, '_', NULL)
3116 #endif
3117
3118 #ifndef TARGET_SHL_SYM
3119 /* Some people want versions of the SH COFF target which do not align
3120 to 16 byte boundaries. We implement that by adding a couple of new
3121 target vectors. These are just like the ones above, but they
3122 change the default section alignment. To generate them in the
3123 assembler, use -small. To use them in the linker, use -b
3124 coff-sh{l}-small and -oformat coff-sh{l}-small.
3125
3126 Yes, this is a horrible hack. A general solution for setting
3127 section alignment in COFF is rather complex. ELF handles this
3128 correctly. */
3129
3130 /* Only recognize the small versions if the target was not defaulted.
3131 Otherwise we won't recognize the non default endianness. */
3132
3133 static const bfd_target *
3134 coff_small_object_p (abfd)
3135 bfd *abfd;
3136 {
3137 if (abfd->target_defaulted)
3138 {
3139 bfd_set_error (bfd_error_wrong_format);
3140 return NULL;
3141 }
3142 return coff_object_p (abfd);
3143 }
3144
3145 /* Set the section alignment for the small versions. */
3146
3147 static boolean
3148 coff_small_new_section_hook (abfd, section)
3149 bfd *abfd;
3150 asection *section;
3151 {
3152 if (! coff_new_section_hook (abfd, section))
3153 return false;
3154
3155 /* We must align to at least a four byte boundary, because longword
3156 accesses must be on a four byte boundary. */
3157 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3158 section->alignment_power = 2;
3159
3160 return true;
3161 }
3162
3163 /* This is copied from bfd_coff_std_swap_table so that we can change
3164 the default section alignment power. */
3165
3166 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3167 {
3168 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3169 coff_swap_aux_out, coff_swap_sym_out,
3170 coff_swap_lineno_out, coff_swap_reloc_out,
3171 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3172 coff_swap_scnhdr_out,
3173 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3174 #ifdef COFF_LONG_FILENAMES
3175 true,
3176 #else
3177 false,
3178 #endif
3179 #ifdef COFF_LONG_SECTION_NAMES
3180 true,
3181 #else
3182 false,
3183 #endif
3184 2,
3185 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3186 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3187 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3188 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3189 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3190 coff_classify_symbol, coff_compute_section_file_positions,
3191 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3192 coff_adjust_symndx, coff_link_add_one_symbol,
3193 coff_link_output_has_begun, coff_final_link_postscript
3194 };
3195
3196 #define coff_small_close_and_cleanup \
3197 coff_close_and_cleanup
3198 #define coff_small_bfd_free_cached_info \
3199 coff_bfd_free_cached_info
3200 #define coff_small_get_section_contents \
3201 coff_get_section_contents
3202 #define coff_small_get_section_contents_in_window \
3203 coff_get_section_contents_in_window
3204
3205 extern const bfd_target shlcoff_small_vec;
3206
3207 const bfd_target shcoff_small_vec =
3208 {
3209 "coff-sh-small", /* name */
3210 bfd_target_coff_flavour,
3211 BFD_ENDIAN_BIG, /* data byte order is big */
3212 BFD_ENDIAN_BIG, /* header byte order is big */
3213
3214 (HAS_RELOC | EXEC_P | /* object flags */
3215 HAS_LINENO | HAS_DEBUG |
3216 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3217
3218 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3219 '_', /* leading symbol underscore */
3220 '/', /* ar_pad_char */
3221 15, /* ar_max_namelen */
3222 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3223 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3224 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3225 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3226 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3227 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3228
3229 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3230 bfd_generic_archive_p, _bfd_dummy_target},
3231 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3232 bfd_false},
3233 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3234 _bfd_write_archive_contents, bfd_false},
3235
3236 BFD_JUMP_TABLE_GENERIC (coff_small),
3237 BFD_JUMP_TABLE_COPY (coff),
3238 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3239 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3240 BFD_JUMP_TABLE_SYMBOLS (coff),
3241 BFD_JUMP_TABLE_RELOCS (coff),
3242 BFD_JUMP_TABLE_WRITE (coff),
3243 BFD_JUMP_TABLE_LINK (coff),
3244 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3245
3246 & shlcoff_small_vec,
3247
3248 (PTR) &bfd_coff_small_swap_table
3249 };
3250
3251 const bfd_target shlcoff_small_vec =
3252 {
3253 "coff-shl-small", /* name */
3254 bfd_target_coff_flavour,
3255 BFD_ENDIAN_LITTLE, /* data byte order is little */
3256 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3257
3258 (HAS_RELOC | EXEC_P | /* object flags */
3259 HAS_LINENO | HAS_DEBUG |
3260 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3261
3262 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3263 '_', /* leading symbol underscore */
3264 '/', /* ar_pad_char */
3265 15, /* ar_max_namelen */
3266 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3267 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3268 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3269 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3270 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3271 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3272
3273 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3274 bfd_generic_archive_p, _bfd_dummy_target},
3275 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3276 bfd_false},
3277 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3278 _bfd_write_archive_contents, bfd_false},
3279
3280 BFD_JUMP_TABLE_GENERIC (coff_small),
3281 BFD_JUMP_TABLE_COPY (coff),
3282 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3283 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3284 BFD_JUMP_TABLE_SYMBOLS (coff),
3285 BFD_JUMP_TABLE_RELOCS (coff),
3286 BFD_JUMP_TABLE_WRITE (coff),
3287 BFD_JUMP_TABLE_LINK (coff),
3288 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3289
3290 & shcoff_small_vec,
3291
3292 (PTR) &bfd_coff_small_swap_table
3293 };
3294 #endif
This page took 0.118284 seconds and 5 git commands to generate.