merge from gcc
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7
8 This file is part of BFD, the Binary File Descriptor library.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
23 MA 02110-1301, USA. */
24
25 #include "sysdep.h"
26 #include "bfd.h"
27 #include "libiberty.h"
28 #include "libbfd.h"
29 #include "bfdlink.h"
30 #include "coff/sh.h"
31 #include "coff/internal.h"
32
33 #undef bfd_pe_print_pdata
34
35 #ifdef COFF_WITH_PE
36 #include "coff/pe.h"
37
38 #ifndef COFF_IMAGE_WITH_PE
39 static bfd_boolean sh_align_load_span
40 PARAMS ((bfd *, asection *, bfd_byte *,
41 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
42 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
43
44 #define _bfd_sh_align_load_span sh_align_load_span
45 #endif
46
47 #define bfd_pe_print_pdata _bfd_pe_print_ce_compressed_pdata
48
49 #else
50
51 #define bfd_pe_print_pdata NULL
52
53 #endif /* COFF_WITH_PE. */
54
55 #include "libcoff.h"
56
57 /* Internal functions. */
58 static bfd_reloc_status_type sh_reloc
59 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
60 static long get_symbol_value PARAMS ((asymbol *));
61 static bfd_boolean sh_relax_section
62 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
63 static bfd_boolean sh_relax_delete_bytes
64 PARAMS ((bfd *, asection *, bfd_vma, int));
65 #ifndef COFF_IMAGE_WITH_PE
66 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
67 #endif
68 static bfd_boolean sh_align_loads
69 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
70 bfd_boolean *));
71 static bfd_boolean sh_swap_insns
72 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
73 static bfd_boolean sh_relocate_section
74 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
75 struct internal_reloc *, struct internal_syment *, asection **));
76 static bfd_byte *sh_coff_get_relocated_section_contents
77 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
78 bfd_byte *, bfd_boolean, asymbol **));
79 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
80
81 #ifdef COFF_WITH_PE
82 /* Can't build import tables with 2**4 alignment. */
83 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
84 #else
85 /* Default section alignment to 2**4. */
86 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
87 #endif
88
89 #ifdef COFF_IMAGE_WITH_PE
90 /* Align PE executables. */
91 #define COFF_PAGE_SIZE 0x1000
92 #endif
93
94 /* Generate long file names. */
95 #define COFF_LONG_FILENAMES
96
97 #ifdef COFF_WITH_PE
98 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
99 /* Return TRUE if this relocation should
100 appear in the output .reloc section. */
101 static bfd_boolean in_reloc_p (abfd, howto)
102 bfd * abfd ATTRIBUTE_UNUSED;
103 reloc_howto_type * howto;
104 {
105 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
106 }
107 #endif
108
109 /* The supported relocations. There are a lot of relocations defined
110 in coff/internal.h which we do not expect to ever see. */
111 static reloc_howto_type sh_coff_howtos[] =
112 {
113 EMPTY_HOWTO (0),
114 EMPTY_HOWTO (1),
115 #ifdef COFF_WITH_PE
116 /* Windows CE */
117 HOWTO (R_SH_IMM32CE, /* type */
118 0, /* rightshift */
119 2, /* size (0 = byte, 1 = short, 2 = long) */
120 32, /* bitsize */
121 FALSE, /* pc_relative */
122 0, /* bitpos */
123 complain_overflow_bitfield, /* complain_on_overflow */
124 sh_reloc, /* special_function */
125 "r_imm32ce", /* name */
126 TRUE, /* partial_inplace */
127 0xffffffff, /* src_mask */
128 0xffffffff, /* dst_mask */
129 FALSE), /* pcrel_offset */
130 #else
131 EMPTY_HOWTO (2),
132 #endif
133 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
134 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
135 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
136 EMPTY_HOWTO (6), /* R_SH_IMM24 */
137 EMPTY_HOWTO (7), /* R_SH_LOW16 */
138 EMPTY_HOWTO (8),
139 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
140
141 HOWTO (R_SH_PCDISP8BY2, /* type */
142 1, /* rightshift */
143 1, /* size (0 = byte, 1 = short, 2 = long) */
144 8, /* bitsize */
145 TRUE, /* pc_relative */
146 0, /* bitpos */
147 complain_overflow_signed, /* complain_on_overflow */
148 sh_reloc, /* special_function */
149 "r_pcdisp8by2", /* name */
150 TRUE, /* partial_inplace */
151 0xff, /* src_mask */
152 0xff, /* dst_mask */
153 TRUE), /* pcrel_offset */
154
155 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
156
157 HOWTO (R_SH_PCDISP, /* type */
158 1, /* rightshift */
159 1, /* size (0 = byte, 1 = short, 2 = long) */
160 12, /* bitsize */
161 TRUE, /* pc_relative */
162 0, /* bitpos */
163 complain_overflow_signed, /* complain_on_overflow */
164 sh_reloc, /* special_function */
165 "r_pcdisp12by2", /* name */
166 TRUE, /* partial_inplace */
167 0xfff, /* src_mask */
168 0xfff, /* dst_mask */
169 TRUE), /* pcrel_offset */
170
171 EMPTY_HOWTO (13),
172
173 HOWTO (R_SH_IMM32, /* type */
174 0, /* rightshift */
175 2, /* size (0 = byte, 1 = short, 2 = long) */
176 32, /* bitsize */
177 FALSE, /* pc_relative */
178 0, /* bitpos */
179 complain_overflow_bitfield, /* complain_on_overflow */
180 sh_reloc, /* special_function */
181 "r_imm32", /* name */
182 TRUE, /* partial_inplace */
183 0xffffffff, /* src_mask */
184 0xffffffff, /* dst_mask */
185 FALSE), /* pcrel_offset */
186
187 EMPTY_HOWTO (15),
188 #ifdef COFF_WITH_PE
189 HOWTO (R_SH_IMAGEBASE, /* type */
190 0, /* rightshift */
191 2, /* size (0 = byte, 1 = short, 2 = long) */
192 32, /* bitsize */
193 FALSE, /* pc_relative */
194 0, /* bitpos */
195 complain_overflow_bitfield, /* complain_on_overflow */
196 sh_reloc, /* special_function */
197 "rva32", /* name */
198 TRUE, /* partial_inplace */
199 0xffffffff, /* src_mask */
200 0xffffffff, /* dst_mask */
201 FALSE), /* pcrel_offset */
202 #else
203 EMPTY_HOWTO (16), /* R_SH_IMM8 */
204 #endif
205 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
206 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
207 EMPTY_HOWTO (19), /* R_SH_IMM4 */
208 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
209 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
210
211 HOWTO (R_SH_PCRELIMM8BY2, /* type */
212 1, /* rightshift */
213 1, /* size (0 = byte, 1 = short, 2 = long) */
214 8, /* bitsize */
215 TRUE, /* pc_relative */
216 0, /* bitpos */
217 complain_overflow_unsigned, /* complain_on_overflow */
218 sh_reloc, /* special_function */
219 "r_pcrelimm8by2", /* name */
220 TRUE, /* partial_inplace */
221 0xff, /* src_mask */
222 0xff, /* dst_mask */
223 TRUE), /* pcrel_offset */
224
225 HOWTO (R_SH_PCRELIMM8BY4, /* type */
226 2, /* rightshift */
227 1, /* size (0 = byte, 1 = short, 2 = long) */
228 8, /* bitsize */
229 TRUE, /* pc_relative */
230 0, /* bitpos */
231 complain_overflow_unsigned, /* complain_on_overflow */
232 sh_reloc, /* special_function */
233 "r_pcrelimm8by4", /* name */
234 TRUE, /* partial_inplace */
235 0xff, /* src_mask */
236 0xff, /* dst_mask */
237 TRUE), /* pcrel_offset */
238
239 HOWTO (R_SH_IMM16, /* type */
240 0, /* rightshift */
241 1, /* size (0 = byte, 1 = short, 2 = long) */
242 16, /* bitsize */
243 FALSE, /* pc_relative */
244 0, /* bitpos */
245 complain_overflow_bitfield, /* complain_on_overflow */
246 sh_reloc, /* special_function */
247 "r_imm16", /* name */
248 TRUE, /* partial_inplace */
249 0xffff, /* src_mask */
250 0xffff, /* dst_mask */
251 FALSE), /* pcrel_offset */
252
253 HOWTO (R_SH_SWITCH16, /* type */
254 0, /* rightshift */
255 1, /* size (0 = byte, 1 = short, 2 = long) */
256 16, /* bitsize */
257 FALSE, /* pc_relative */
258 0, /* bitpos */
259 complain_overflow_bitfield, /* complain_on_overflow */
260 sh_reloc, /* special_function */
261 "r_switch16", /* name */
262 TRUE, /* partial_inplace */
263 0xffff, /* src_mask */
264 0xffff, /* dst_mask */
265 FALSE), /* pcrel_offset */
266
267 HOWTO (R_SH_SWITCH32, /* type */
268 0, /* rightshift */
269 2, /* size (0 = byte, 1 = short, 2 = long) */
270 32, /* bitsize */
271 FALSE, /* pc_relative */
272 0, /* bitpos */
273 complain_overflow_bitfield, /* complain_on_overflow */
274 sh_reloc, /* special_function */
275 "r_switch32", /* name */
276 TRUE, /* partial_inplace */
277 0xffffffff, /* src_mask */
278 0xffffffff, /* dst_mask */
279 FALSE), /* pcrel_offset */
280
281 HOWTO (R_SH_USES, /* type */
282 0, /* rightshift */
283 1, /* size (0 = byte, 1 = short, 2 = long) */
284 16, /* bitsize */
285 FALSE, /* pc_relative */
286 0, /* bitpos */
287 complain_overflow_bitfield, /* complain_on_overflow */
288 sh_reloc, /* special_function */
289 "r_uses", /* name */
290 TRUE, /* partial_inplace */
291 0xffff, /* src_mask */
292 0xffff, /* dst_mask */
293 FALSE), /* pcrel_offset */
294
295 HOWTO (R_SH_COUNT, /* type */
296 0, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 32, /* bitsize */
299 FALSE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_bitfield, /* complain_on_overflow */
302 sh_reloc, /* special_function */
303 "r_count", /* name */
304 TRUE, /* partial_inplace */
305 0xffffffff, /* src_mask */
306 0xffffffff, /* dst_mask */
307 FALSE), /* pcrel_offset */
308
309 HOWTO (R_SH_ALIGN, /* type */
310 0, /* rightshift */
311 2, /* size (0 = byte, 1 = short, 2 = long) */
312 32, /* bitsize */
313 FALSE, /* pc_relative */
314 0, /* bitpos */
315 complain_overflow_bitfield, /* complain_on_overflow */
316 sh_reloc, /* special_function */
317 "r_align", /* name */
318 TRUE, /* partial_inplace */
319 0xffffffff, /* src_mask */
320 0xffffffff, /* dst_mask */
321 FALSE), /* pcrel_offset */
322
323 HOWTO (R_SH_CODE, /* type */
324 0, /* rightshift */
325 2, /* size (0 = byte, 1 = short, 2 = long) */
326 32, /* bitsize */
327 FALSE, /* pc_relative */
328 0, /* bitpos */
329 complain_overflow_bitfield, /* complain_on_overflow */
330 sh_reloc, /* special_function */
331 "r_code", /* name */
332 TRUE, /* partial_inplace */
333 0xffffffff, /* src_mask */
334 0xffffffff, /* dst_mask */
335 FALSE), /* pcrel_offset */
336
337 HOWTO (R_SH_DATA, /* type */
338 0, /* rightshift */
339 2, /* size (0 = byte, 1 = short, 2 = long) */
340 32, /* bitsize */
341 FALSE, /* pc_relative */
342 0, /* bitpos */
343 complain_overflow_bitfield, /* complain_on_overflow */
344 sh_reloc, /* special_function */
345 "r_data", /* name */
346 TRUE, /* partial_inplace */
347 0xffffffff, /* src_mask */
348 0xffffffff, /* dst_mask */
349 FALSE), /* pcrel_offset */
350
351 HOWTO (R_SH_LABEL, /* type */
352 0, /* rightshift */
353 2, /* size (0 = byte, 1 = short, 2 = long) */
354 32, /* bitsize */
355 FALSE, /* pc_relative */
356 0, /* bitpos */
357 complain_overflow_bitfield, /* complain_on_overflow */
358 sh_reloc, /* special_function */
359 "r_label", /* name */
360 TRUE, /* partial_inplace */
361 0xffffffff, /* src_mask */
362 0xffffffff, /* dst_mask */
363 FALSE), /* pcrel_offset */
364
365 HOWTO (R_SH_SWITCH8, /* type */
366 0, /* rightshift */
367 0, /* size (0 = byte, 1 = short, 2 = long) */
368 8, /* bitsize */
369 FALSE, /* pc_relative */
370 0, /* bitpos */
371 complain_overflow_bitfield, /* complain_on_overflow */
372 sh_reloc, /* special_function */
373 "r_switch8", /* name */
374 TRUE, /* partial_inplace */
375 0xff, /* src_mask */
376 0xff, /* dst_mask */
377 FALSE) /* pcrel_offset */
378 };
379
380 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
381
382 /* Check for a bad magic number. */
383 #define BADMAG(x) SHBADMAG(x)
384
385 /* Customize coffcode.h (this is not currently used). */
386 #define SH 1
387
388 /* FIXME: This should not be set here. */
389 #define __A_MAGIC_SET__
390
391 #ifndef COFF_WITH_PE
392 /* Swap the r_offset field in and out. */
393 #define SWAP_IN_RELOC_OFFSET H_GET_32
394 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
395
396 /* Swap out extra information in the reloc structure. */
397 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
398 do \
399 { \
400 dst->r_stuff[0] = 'S'; \
401 dst->r_stuff[1] = 'C'; \
402 } \
403 while (0)
404 #endif
405
406 /* Get the value of a symbol, when performing a relocation. */
407
408 static long
409 get_symbol_value (symbol)
410 asymbol *symbol;
411 {
412 bfd_vma relocation;
413
414 if (bfd_is_com_section (symbol->section))
415 relocation = 0;
416 else
417 relocation = (symbol->value +
418 symbol->section->output_section->vma +
419 symbol->section->output_offset);
420
421 return relocation;
422 }
423
424 #ifdef COFF_WITH_PE
425 /* Convert an rtype to howto for the COFF backend linker.
426 Copied from coff-i386. */
427 #define coff_rtype_to_howto coff_sh_rtype_to_howto
428 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
429
430 static reloc_howto_type *
431 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
432 bfd * abfd ATTRIBUTE_UNUSED;
433 asection * sec;
434 struct internal_reloc * rel;
435 struct coff_link_hash_entry * h;
436 struct internal_syment * sym;
437 bfd_vma * addendp;
438 {
439 reloc_howto_type * howto;
440
441 howto = sh_coff_howtos + rel->r_type;
442
443 *addendp = 0;
444
445 if (howto->pc_relative)
446 *addendp += sec->vma;
447
448 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
449 {
450 /* This is a common symbol. The section contents include the
451 size (sym->n_value) as an addend. The relocate_section
452 function will be adding in the final value of the symbol. We
453 need to subtract out the current size in order to get the
454 correct result. */
455 BFD_ASSERT (h != NULL);
456 }
457
458 if (howto->pc_relative)
459 {
460 *addendp -= 4;
461
462 /* If the symbol is defined, then the generic code is going to
463 add back the symbol value in order to cancel out an
464 adjustment it made to the addend. However, we set the addend
465 to 0 at the start of this function. We need to adjust here,
466 to avoid the adjustment the generic code will make. FIXME:
467 This is getting a bit hackish. */
468 if (sym != NULL && sym->n_scnum != 0)
469 *addendp -= sym->n_value;
470 }
471
472 if (rel->r_type == R_SH_IMAGEBASE)
473 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
474
475 return howto;
476 }
477
478 #endif /* COFF_WITH_PE */
479
480 /* This structure is used to map BFD reloc codes to SH PE relocs. */
481 struct shcoff_reloc_map
482 {
483 bfd_reloc_code_real_type bfd_reloc_val;
484 unsigned char shcoff_reloc_val;
485 };
486
487 #ifdef COFF_WITH_PE
488 /* An array mapping BFD reloc codes to SH PE relocs. */
489 static const struct shcoff_reloc_map sh_reloc_map[] =
490 {
491 { BFD_RELOC_32, R_SH_IMM32CE },
492 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
493 { BFD_RELOC_CTOR, R_SH_IMM32CE },
494 };
495 #else
496 /* An array mapping BFD reloc codes to SH PE relocs. */
497 static const struct shcoff_reloc_map sh_reloc_map[] =
498 {
499 { BFD_RELOC_32, R_SH_IMM32 },
500 { BFD_RELOC_CTOR, R_SH_IMM32 },
501 };
502 #endif
503
504 /* Given a BFD reloc code, return the howto structure for the
505 corresponding SH PE reloc. */
506 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
507 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
508
509 static reloc_howto_type *
510 sh_coff_reloc_type_lookup (abfd, code)
511 bfd * abfd ATTRIBUTE_UNUSED;
512 bfd_reloc_code_real_type code;
513 {
514 unsigned int i;
515
516 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
517 if (sh_reloc_map[i].bfd_reloc_val == code)
518 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
519
520 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
521 return NULL;
522 }
523
524 static reloc_howto_type *
525 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
526 const char *r_name)
527 {
528 unsigned int i;
529
530 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
531 if (sh_coff_howtos[i].name != NULL
532 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
533 return &sh_coff_howtos[i];
534
535 return NULL;
536 }
537
538 /* This macro is used in coffcode.h to get the howto corresponding to
539 an internal reloc. */
540
541 #define RTYPE2HOWTO(relent, internal) \
542 ((relent)->howto = \
543 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
544 ? &sh_coff_howtos[(internal)->r_type] \
545 : (reloc_howto_type *) NULL))
546
547 /* This is the same as the macro in coffcode.h, except that it copies
548 r_offset into reloc_entry->addend for some relocs. */
549 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
550 { \
551 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
552 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
553 coffsym = (obj_symbols (abfd) \
554 + (cache_ptr->sym_ptr_ptr - symbols)); \
555 else if (ptr) \
556 coffsym = coff_symbol_from (abfd, ptr); \
557 if (coffsym != (coff_symbol_type *) NULL \
558 && coffsym->native->u.syment.n_scnum == 0) \
559 cache_ptr->addend = 0; \
560 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
561 && ptr->section != (asection *) NULL) \
562 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
563 else \
564 cache_ptr->addend = 0; \
565 if ((reloc).r_type == R_SH_SWITCH8 \
566 || (reloc).r_type == R_SH_SWITCH16 \
567 || (reloc).r_type == R_SH_SWITCH32 \
568 || (reloc).r_type == R_SH_USES \
569 || (reloc).r_type == R_SH_COUNT \
570 || (reloc).r_type == R_SH_ALIGN) \
571 cache_ptr->addend = (reloc).r_offset; \
572 }
573
574 /* This is the howto function for the SH relocations. */
575
576 static bfd_reloc_status_type
577 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
578 error_message)
579 bfd *abfd;
580 arelent *reloc_entry;
581 asymbol *symbol_in;
582 PTR data;
583 asection *input_section;
584 bfd *output_bfd;
585 char **error_message ATTRIBUTE_UNUSED;
586 {
587 unsigned long insn;
588 bfd_vma sym_value;
589 unsigned short r_type;
590 bfd_vma addr = reloc_entry->address;
591 bfd_byte *hit_data = addr + (bfd_byte *) data;
592
593 r_type = reloc_entry->howto->type;
594
595 if (output_bfd != NULL)
596 {
597 /* Partial linking--do nothing. */
598 reloc_entry->address += input_section->output_offset;
599 return bfd_reloc_ok;
600 }
601
602 /* Almost all relocs have to do with relaxing. If any work must be
603 done for them, it has been done in sh_relax_section. */
604 if (r_type != R_SH_IMM32
605 #ifdef COFF_WITH_PE
606 && r_type != R_SH_IMM32CE
607 && r_type != R_SH_IMAGEBASE
608 #endif
609 && (r_type != R_SH_PCDISP
610 || (symbol_in->flags & BSF_LOCAL) != 0))
611 return bfd_reloc_ok;
612
613 if (symbol_in != NULL
614 && bfd_is_und_section (symbol_in->section))
615 return bfd_reloc_undefined;
616
617 sym_value = get_symbol_value (symbol_in);
618
619 switch (r_type)
620 {
621 case R_SH_IMM32:
622 #ifdef COFF_WITH_PE
623 case R_SH_IMM32CE:
624 #endif
625 insn = bfd_get_32 (abfd, hit_data);
626 insn += sym_value + reloc_entry->addend;
627 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
628 break;
629 #ifdef COFF_WITH_PE
630 case R_SH_IMAGEBASE:
631 insn = bfd_get_32 (abfd, hit_data);
632 insn += sym_value + reloc_entry->addend;
633 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
634 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
635 break;
636 #endif
637 case R_SH_PCDISP:
638 insn = bfd_get_16 (abfd, hit_data);
639 sym_value += reloc_entry->addend;
640 sym_value -= (input_section->output_section->vma
641 + input_section->output_offset
642 + addr
643 + 4);
644 sym_value += (insn & 0xfff) << 1;
645 if (insn & 0x800)
646 sym_value -= 0x1000;
647 insn = (insn & 0xf000) | (sym_value & 0xfff);
648 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
649 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
650 return bfd_reloc_overflow;
651 break;
652 default:
653 abort ();
654 break;
655 }
656
657 return bfd_reloc_ok;
658 }
659
660 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
661
662 /* We can do relaxing. */
663 #define coff_bfd_relax_section sh_relax_section
664
665 /* We use the special COFF backend linker. */
666 #define coff_relocate_section sh_relocate_section
667
668 /* When relaxing, we need to use special code to get the relocated
669 section contents. */
670 #define coff_bfd_get_relocated_section_contents \
671 sh_coff_get_relocated_section_contents
672
673 #include "coffcode.h"
674 \f
675 /* This function handles relaxing on the SH.
676
677 Function calls on the SH look like this:
678
679 movl L1,r0
680 ...
681 jsr @r0
682 ...
683 L1:
684 .long function
685
686 The compiler and assembler will cooperate to create R_SH_USES
687 relocs on the jsr instructions. The r_offset field of the
688 R_SH_USES reloc is the PC relative offset to the instruction which
689 loads the register (the r_offset field is computed as though it
690 were a jump instruction, so the offset value is actually from four
691 bytes past the instruction). The linker can use this reloc to
692 determine just which function is being called, and thus decide
693 whether it is possible to replace the jsr with a bsr.
694
695 If multiple function calls are all based on a single register load
696 (i.e., the same function is called multiple times), the compiler
697 guarantees that each function call will have an R_SH_USES reloc.
698 Therefore, if the linker is able to convert each R_SH_USES reloc
699 which refers to that address, it can safely eliminate the register
700 load.
701
702 When the assembler creates an R_SH_USES reloc, it examines it to
703 determine which address is being loaded (L1 in the above example).
704 It then counts the number of references to that address, and
705 creates an R_SH_COUNT reloc at that address. The r_offset field of
706 the R_SH_COUNT reloc will be the number of references. If the
707 linker is able to eliminate a register load, it can use the
708 R_SH_COUNT reloc to see whether it can also eliminate the function
709 address.
710
711 SH relaxing also handles another, unrelated, matter. On the SH, if
712 a load or store instruction is not aligned on a four byte boundary,
713 the memory cycle interferes with the 32 bit instruction fetch,
714 causing a one cycle bubble in the pipeline. Therefore, we try to
715 align load and store instructions on four byte boundaries if we
716 can, by swapping them with one of the adjacent instructions. */
717
718 static bfd_boolean
719 sh_relax_section (abfd, sec, link_info, again)
720 bfd *abfd;
721 asection *sec;
722 struct bfd_link_info *link_info;
723 bfd_boolean *again;
724 {
725 struct internal_reloc *internal_relocs;
726 bfd_boolean have_code;
727 struct internal_reloc *irel, *irelend;
728 bfd_byte *contents = NULL;
729
730 *again = FALSE;
731
732 if (link_info->relocatable
733 || (sec->flags & SEC_RELOC) == 0
734 || sec->reloc_count == 0)
735 return TRUE;
736
737 if (coff_section_data (abfd, sec) == NULL)
738 {
739 bfd_size_type amt = sizeof (struct coff_section_tdata);
740 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
741 if (sec->used_by_bfd == NULL)
742 return FALSE;
743 }
744
745 internal_relocs = (_bfd_coff_read_internal_relocs
746 (abfd, sec, link_info->keep_memory,
747 (bfd_byte *) NULL, FALSE,
748 (struct internal_reloc *) NULL));
749 if (internal_relocs == NULL)
750 goto error_return;
751
752 have_code = FALSE;
753
754 irelend = internal_relocs + sec->reloc_count;
755 for (irel = internal_relocs; irel < irelend; irel++)
756 {
757 bfd_vma laddr, paddr, symval;
758 unsigned short insn;
759 struct internal_reloc *irelfn, *irelscan, *irelcount;
760 struct internal_syment sym;
761 bfd_signed_vma foff;
762
763 if (irel->r_type == R_SH_CODE)
764 have_code = TRUE;
765
766 if (irel->r_type != R_SH_USES)
767 continue;
768
769 /* Get the section contents. */
770 if (contents == NULL)
771 {
772 if (coff_section_data (abfd, sec)->contents != NULL)
773 contents = coff_section_data (abfd, sec)->contents;
774 else
775 {
776 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
777 goto error_return;
778 }
779 }
780
781 /* The r_offset field of the R_SH_USES reloc will point us to
782 the register load. The 4 is because the r_offset field is
783 computed as though it were a jump offset, which are based
784 from 4 bytes after the jump instruction. */
785 laddr = irel->r_vaddr - sec->vma + 4;
786 /* Careful to sign extend the 32-bit offset. */
787 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
788 if (laddr >= sec->size)
789 {
790 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
791 abfd, (unsigned long) irel->r_vaddr);
792 continue;
793 }
794 insn = bfd_get_16 (abfd, contents + laddr);
795
796 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
797 if ((insn & 0xf000) != 0xd000)
798 {
799 ((*_bfd_error_handler)
800 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
801 abfd, (unsigned long) irel->r_vaddr, insn));
802 continue;
803 }
804
805 /* Get the address from which the register is being loaded. The
806 displacement in the mov.l instruction is quadrupled. It is a
807 displacement from four bytes after the movl instruction, but,
808 before adding in the PC address, two least significant bits
809 of the PC are cleared. We assume that the section is aligned
810 on a four byte boundary. */
811 paddr = insn & 0xff;
812 paddr *= 4;
813 paddr += (laddr + 4) &~ (bfd_vma) 3;
814 if (paddr >= sec->size)
815 {
816 ((*_bfd_error_handler)
817 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
818 abfd, (unsigned long) irel->r_vaddr));
819 continue;
820 }
821
822 /* Get the reloc for the address from which the register is
823 being loaded. This reloc will tell us which function is
824 actually being called. */
825 paddr += sec->vma;
826 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
827 if (irelfn->r_vaddr == paddr
828 #ifdef COFF_WITH_PE
829 && (irelfn->r_type == R_SH_IMM32
830 || irelfn->r_type == R_SH_IMM32CE
831 || irelfn->r_type == R_SH_IMAGEBASE)
832
833 #else
834 && irelfn->r_type == R_SH_IMM32
835 #endif
836 )
837 break;
838 if (irelfn >= irelend)
839 {
840 ((*_bfd_error_handler)
841 ("%B: 0x%lx: warning: could not find expected reloc",
842 abfd, (unsigned long) paddr));
843 continue;
844 }
845
846 /* Get the value of the symbol referred to by the reloc. */
847 if (! _bfd_coff_get_external_symbols (abfd))
848 goto error_return;
849 bfd_coff_swap_sym_in (abfd,
850 ((bfd_byte *) obj_coff_external_syms (abfd)
851 + (irelfn->r_symndx
852 * bfd_coff_symesz (abfd))),
853 &sym);
854 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
855 {
856 ((*_bfd_error_handler)
857 ("%B: 0x%lx: warning: symbol in unexpected section",
858 abfd, (unsigned long) paddr));
859 continue;
860 }
861
862 if (sym.n_sclass != C_EXT)
863 {
864 symval = (sym.n_value
865 - sec->vma
866 + sec->output_section->vma
867 + sec->output_offset);
868 }
869 else
870 {
871 struct coff_link_hash_entry *h;
872
873 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
874 BFD_ASSERT (h != NULL);
875 if (h->root.type != bfd_link_hash_defined
876 && h->root.type != bfd_link_hash_defweak)
877 {
878 /* This appears to be a reference to an undefined
879 symbol. Just ignore it--it will be caught by the
880 regular reloc processing. */
881 continue;
882 }
883
884 symval = (h->root.u.def.value
885 + h->root.u.def.section->output_section->vma
886 + h->root.u.def.section->output_offset);
887 }
888
889 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
890
891 /* See if this function call can be shortened. */
892 foff = (symval
893 - (irel->r_vaddr
894 - sec->vma
895 + sec->output_section->vma
896 + sec->output_offset
897 + 4));
898 if (foff < -0x1000 || foff >= 0x1000)
899 {
900 /* After all that work, we can't shorten this function call. */
901 continue;
902 }
903
904 /* Shorten the function call. */
905
906 /* For simplicity of coding, we are going to modify the section
907 contents, the section relocs, and the BFD symbol table. We
908 must tell the rest of the code not to free up this
909 information. It would be possible to instead create a table
910 of changes which have to be made, as is done in coff-mips.c;
911 that would be more work, but would require less memory when
912 the linker is run. */
913
914 coff_section_data (abfd, sec)->relocs = internal_relocs;
915 coff_section_data (abfd, sec)->keep_relocs = TRUE;
916
917 coff_section_data (abfd, sec)->contents = contents;
918 coff_section_data (abfd, sec)->keep_contents = TRUE;
919
920 obj_coff_keep_syms (abfd) = TRUE;
921
922 /* Replace the jsr with a bsr. */
923
924 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
925 replace the jsr with a bsr. */
926 irel->r_type = R_SH_PCDISP;
927 irel->r_symndx = irelfn->r_symndx;
928 if (sym.n_sclass != C_EXT)
929 {
930 /* If this needs to be changed because of future relaxing,
931 it will be handled here like other internal PCDISP
932 relocs. */
933 bfd_put_16 (abfd,
934 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
935 contents + irel->r_vaddr - sec->vma);
936 }
937 else
938 {
939 /* We can't fully resolve this yet, because the external
940 symbol value may be changed by future relaxing. We let
941 the final link phase handle it. */
942 bfd_put_16 (abfd, (bfd_vma) 0xb000,
943 contents + irel->r_vaddr - sec->vma);
944 }
945
946 /* See if there is another R_SH_USES reloc referring to the same
947 register load. */
948 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
949 if (irelscan->r_type == R_SH_USES
950 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
951 break;
952 if (irelscan < irelend)
953 {
954 /* Some other function call depends upon this register load,
955 and we have not yet converted that function call.
956 Indeed, we may never be able to convert it. There is
957 nothing else we can do at this point. */
958 continue;
959 }
960
961 /* Look for a R_SH_COUNT reloc on the location where the
962 function address is stored. Do this before deleting any
963 bytes, to avoid confusion about the address. */
964 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
965 if (irelcount->r_vaddr == paddr
966 && irelcount->r_type == R_SH_COUNT)
967 break;
968
969 /* Delete the register load. */
970 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
971 goto error_return;
972
973 /* That will change things, so, just in case it permits some
974 other function call to come within range, we should relax
975 again. Note that this is not required, and it may be slow. */
976 *again = TRUE;
977
978 /* Now check whether we got a COUNT reloc. */
979 if (irelcount >= irelend)
980 {
981 ((*_bfd_error_handler)
982 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
983 abfd, (unsigned long) paddr));
984 continue;
985 }
986
987 /* The number of uses is stored in the r_offset field. We've
988 just deleted one. */
989 if (irelcount->r_offset == 0)
990 {
991 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
992 abfd, (unsigned long) paddr));
993 continue;
994 }
995
996 --irelcount->r_offset;
997
998 /* If there are no more uses, we can delete the address. Reload
999 the address from irelfn, in case it was changed by the
1000 previous call to sh_relax_delete_bytes. */
1001 if (irelcount->r_offset == 0)
1002 {
1003 if (! sh_relax_delete_bytes (abfd, sec,
1004 irelfn->r_vaddr - sec->vma, 4))
1005 goto error_return;
1006 }
1007
1008 /* We've done all we can with that function call. */
1009 }
1010
1011 /* Look for load and store instructions that we can align on four
1012 byte boundaries. */
1013 if (have_code)
1014 {
1015 bfd_boolean swapped;
1016
1017 /* Get the section contents. */
1018 if (contents == NULL)
1019 {
1020 if (coff_section_data (abfd, sec)->contents != NULL)
1021 contents = coff_section_data (abfd, sec)->contents;
1022 else
1023 {
1024 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1025 goto error_return;
1026 }
1027 }
1028
1029 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1030 goto error_return;
1031
1032 if (swapped)
1033 {
1034 coff_section_data (abfd, sec)->relocs = internal_relocs;
1035 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1036
1037 coff_section_data (abfd, sec)->contents = contents;
1038 coff_section_data (abfd, sec)->keep_contents = TRUE;
1039
1040 obj_coff_keep_syms (abfd) = TRUE;
1041 }
1042 }
1043
1044 if (internal_relocs != NULL
1045 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1046 {
1047 if (! link_info->keep_memory)
1048 free (internal_relocs);
1049 else
1050 coff_section_data (abfd, sec)->relocs = internal_relocs;
1051 }
1052
1053 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1054 {
1055 if (! link_info->keep_memory)
1056 free (contents);
1057 else
1058 /* Cache the section contents for coff_link_input_bfd. */
1059 coff_section_data (abfd, sec)->contents = contents;
1060 }
1061
1062 return TRUE;
1063
1064 error_return:
1065 if (internal_relocs != NULL
1066 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1067 free (internal_relocs);
1068 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1069 free (contents);
1070 return FALSE;
1071 }
1072
1073 /* Delete some bytes from a section while relaxing. */
1074
1075 static bfd_boolean
1076 sh_relax_delete_bytes (abfd, sec, addr, count)
1077 bfd *abfd;
1078 asection *sec;
1079 bfd_vma addr;
1080 int count;
1081 {
1082 bfd_byte *contents;
1083 struct internal_reloc *irel, *irelend;
1084 struct internal_reloc *irelalign;
1085 bfd_vma toaddr;
1086 bfd_byte *esym, *esymend;
1087 bfd_size_type symesz;
1088 struct coff_link_hash_entry **sym_hash;
1089 asection *o;
1090
1091 contents = coff_section_data (abfd, sec)->contents;
1092
1093 /* The deletion must stop at the next ALIGN reloc for an aligment
1094 power larger than the number of bytes we are deleting. */
1095
1096 irelalign = NULL;
1097 toaddr = sec->size;
1098
1099 irel = coff_section_data (abfd, sec)->relocs;
1100 irelend = irel + sec->reloc_count;
1101 for (; irel < irelend; irel++)
1102 {
1103 if (irel->r_type == R_SH_ALIGN
1104 && irel->r_vaddr - sec->vma > addr
1105 && count < (1 << irel->r_offset))
1106 {
1107 irelalign = irel;
1108 toaddr = irel->r_vaddr - sec->vma;
1109 break;
1110 }
1111 }
1112
1113 /* Actually delete the bytes. */
1114 memmove (contents + addr, contents + addr + count,
1115 (size_t) (toaddr - addr - count));
1116 if (irelalign == NULL)
1117 sec->size -= count;
1118 else
1119 {
1120 int i;
1121
1122 #define NOP_OPCODE (0x0009)
1123
1124 BFD_ASSERT ((count & 1) == 0);
1125 for (i = 0; i < count; i += 2)
1126 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1127 }
1128
1129 /* Adjust all the relocs. */
1130 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1131 {
1132 bfd_vma nraddr, stop;
1133 bfd_vma start = 0;
1134 int insn = 0;
1135 struct internal_syment sym;
1136 int off, adjust, oinsn;
1137 bfd_signed_vma voff = 0;
1138 bfd_boolean overflow;
1139
1140 /* Get the new reloc address. */
1141 nraddr = irel->r_vaddr - sec->vma;
1142 if ((irel->r_vaddr - sec->vma > addr
1143 && irel->r_vaddr - sec->vma < toaddr)
1144 || (irel->r_type == R_SH_ALIGN
1145 && irel->r_vaddr - sec->vma == toaddr))
1146 nraddr -= count;
1147
1148 /* See if this reloc was for the bytes we have deleted, in which
1149 case we no longer care about it. Don't delete relocs which
1150 represent addresses, though. */
1151 if (irel->r_vaddr - sec->vma >= addr
1152 && irel->r_vaddr - sec->vma < addr + count
1153 && irel->r_type != R_SH_ALIGN
1154 && irel->r_type != R_SH_CODE
1155 && irel->r_type != R_SH_DATA
1156 && irel->r_type != R_SH_LABEL)
1157 irel->r_type = R_SH_UNUSED;
1158
1159 /* If this is a PC relative reloc, see if the range it covers
1160 includes the bytes we have deleted. */
1161 switch (irel->r_type)
1162 {
1163 default:
1164 break;
1165
1166 case R_SH_PCDISP8BY2:
1167 case R_SH_PCDISP:
1168 case R_SH_PCRELIMM8BY2:
1169 case R_SH_PCRELIMM8BY4:
1170 start = irel->r_vaddr - sec->vma;
1171 insn = bfd_get_16 (abfd, contents + nraddr);
1172 break;
1173 }
1174
1175 switch (irel->r_type)
1176 {
1177 default:
1178 start = stop = addr;
1179 break;
1180
1181 case R_SH_IMM32:
1182 #ifdef COFF_WITH_PE
1183 case R_SH_IMM32CE:
1184 case R_SH_IMAGEBASE:
1185 #endif
1186 /* If this reloc is against a symbol defined in this
1187 section, and the symbol will not be adjusted below, we
1188 must check the addend to see it will put the value in
1189 range to be adjusted, and hence must be changed. */
1190 bfd_coff_swap_sym_in (abfd,
1191 ((bfd_byte *) obj_coff_external_syms (abfd)
1192 + (irel->r_symndx
1193 * bfd_coff_symesz (abfd))),
1194 &sym);
1195 if (sym.n_sclass != C_EXT
1196 && sym.n_scnum == sec->target_index
1197 && ((bfd_vma) sym.n_value <= addr
1198 || (bfd_vma) sym.n_value >= toaddr))
1199 {
1200 bfd_vma val;
1201
1202 val = bfd_get_32 (abfd, contents + nraddr);
1203 val += sym.n_value;
1204 if (val > addr && val < toaddr)
1205 bfd_put_32 (abfd, val - count, contents + nraddr);
1206 }
1207 start = stop = addr;
1208 break;
1209
1210 case R_SH_PCDISP8BY2:
1211 off = insn & 0xff;
1212 if (off & 0x80)
1213 off -= 0x100;
1214 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1215 break;
1216
1217 case R_SH_PCDISP:
1218 bfd_coff_swap_sym_in (abfd,
1219 ((bfd_byte *) obj_coff_external_syms (abfd)
1220 + (irel->r_symndx
1221 * bfd_coff_symesz (abfd))),
1222 &sym);
1223 if (sym.n_sclass == C_EXT)
1224 start = stop = addr;
1225 else
1226 {
1227 off = insn & 0xfff;
1228 if (off & 0x800)
1229 off -= 0x1000;
1230 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1231 }
1232 break;
1233
1234 case R_SH_PCRELIMM8BY2:
1235 off = insn & 0xff;
1236 stop = start + 4 + off * 2;
1237 break;
1238
1239 case R_SH_PCRELIMM8BY4:
1240 off = insn & 0xff;
1241 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1242 break;
1243
1244 case R_SH_SWITCH8:
1245 case R_SH_SWITCH16:
1246 case R_SH_SWITCH32:
1247 /* These relocs types represent
1248 .word L2-L1
1249 The r_offset field holds the difference between the reloc
1250 address and L1. That is the start of the reloc, and
1251 adding in the contents gives us the top. We must adjust
1252 both the r_offset field and the section contents. */
1253
1254 start = irel->r_vaddr - sec->vma;
1255 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1256
1257 if (start > addr
1258 && start < toaddr
1259 && (stop <= addr || stop >= toaddr))
1260 irel->r_offset += count;
1261 else if (stop > addr
1262 && stop < toaddr
1263 && (start <= addr || start >= toaddr))
1264 irel->r_offset -= count;
1265
1266 start = stop;
1267
1268 if (irel->r_type == R_SH_SWITCH16)
1269 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1270 else if (irel->r_type == R_SH_SWITCH8)
1271 voff = bfd_get_8 (abfd, contents + nraddr);
1272 else
1273 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1274 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1275
1276 break;
1277
1278 case R_SH_USES:
1279 start = irel->r_vaddr - sec->vma;
1280 stop = (bfd_vma) ((bfd_signed_vma) start
1281 + (long) irel->r_offset
1282 + 4);
1283 break;
1284 }
1285
1286 if (start > addr
1287 && start < toaddr
1288 && (stop <= addr || stop >= toaddr))
1289 adjust = count;
1290 else if (stop > addr
1291 && stop < toaddr
1292 && (start <= addr || start >= toaddr))
1293 adjust = - count;
1294 else
1295 adjust = 0;
1296
1297 if (adjust != 0)
1298 {
1299 oinsn = insn;
1300 overflow = FALSE;
1301 switch (irel->r_type)
1302 {
1303 default:
1304 abort ();
1305 break;
1306
1307 case R_SH_PCDISP8BY2:
1308 case R_SH_PCRELIMM8BY2:
1309 insn += adjust / 2;
1310 if ((oinsn & 0xff00) != (insn & 0xff00))
1311 overflow = TRUE;
1312 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1313 break;
1314
1315 case R_SH_PCDISP:
1316 insn += adjust / 2;
1317 if ((oinsn & 0xf000) != (insn & 0xf000))
1318 overflow = TRUE;
1319 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1320 break;
1321
1322 case R_SH_PCRELIMM8BY4:
1323 BFD_ASSERT (adjust == count || count >= 4);
1324 if (count >= 4)
1325 insn += adjust / 4;
1326 else
1327 {
1328 if ((irel->r_vaddr & 3) == 0)
1329 ++insn;
1330 }
1331 if ((oinsn & 0xff00) != (insn & 0xff00))
1332 overflow = TRUE;
1333 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1334 break;
1335
1336 case R_SH_SWITCH8:
1337 voff += adjust;
1338 if (voff < 0 || voff >= 0xff)
1339 overflow = TRUE;
1340 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1341 break;
1342
1343 case R_SH_SWITCH16:
1344 voff += adjust;
1345 if (voff < - 0x8000 || voff >= 0x8000)
1346 overflow = TRUE;
1347 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1348 break;
1349
1350 case R_SH_SWITCH32:
1351 voff += adjust;
1352 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1353 break;
1354
1355 case R_SH_USES:
1356 irel->r_offset += adjust;
1357 break;
1358 }
1359
1360 if (overflow)
1361 {
1362 ((*_bfd_error_handler)
1363 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1364 abfd, (unsigned long) irel->r_vaddr));
1365 bfd_set_error (bfd_error_bad_value);
1366 return FALSE;
1367 }
1368 }
1369
1370 irel->r_vaddr = nraddr + sec->vma;
1371 }
1372
1373 /* Look through all the other sections. If there contain any IMM32
1374 relocs against internal symbols which we are not going to adjust
1375 below, we may need to adjust the addends. */
1376 for (o = abfd->sections; o != NULL; o = o->next)
1377 {
1378 struct internal_reloc *internal_relocs;
1379 struct internal_reloc *irelscan, *irelscanend;
1380 bfd_byte *ocontents;
1381
1382 if (o == sec
1383 || (o->flags & SEC_RELOC) == 0
1384 || o->reloc_count == 0)
1385 continue;
1386
1387 /* We always cache the relocs. Perhaps, if info->keep_memory is
1388 FALSE, we should free them, if we are permitted to, when we
1389 leave sh_coff_relax_section. */
1390 internal_relocs = (_bfd_coff_read_internal_relocs
1391 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1392 (struct internal_reloc *) NULL));
1393 if (internal_relocs == NULL)
1394 return FALSE;
1395
1396 ocontents = NULL;
1397 irelscanend = internal_relocs + o->reloc_count;
1398 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1399 {
1400 struct internal_syment sym;
1401
1402 #ifdef COFF_WITH_PE
1403 if (irelscan->r_type != R_SH_IMM32
1404 && irelscan->r_type != R_SH_IMAGEBASE
1405 && irelscan->r_type != R_SH_IMM32CE)
1406 #else
1407 if (irelscan->r_type != R_SH_IMM32)
1408 #endif
1409 continue;
1410
1411 bfd_coff_swap_sym_in (abfd,
1412 ((bfd_byte *) obj_coff_external_syms (abfd)
1413 + (irelscan->r_symndx
1414 * bfd_coff_symesz (abfd))),
1415 &sym);
1416 if (sym.n_sclass != C_EXT
1417 && sym.n_scnum == sec->target_index
1418 && ((bfd_vma) sym.n_value <= addr
1419 || (bfd_vma) sym.n_value >= toaddr))
1420 {
1421 bfd_vma val;
1422
1423 if (ocontents == NULL)
1424 {
1425 if (coff_section_data (abfd, o)->contents != NULL)
1426 ocontents = coff_section_data (abfd, o)->contents;
1427 else
1428 {
1429 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1430 return FALSE;
1431 /* We always cache the section contents.
1432 Perhaps, if info->keep_memory is FALSE, we
1433 should free them, if we are permitted to,
1434 when we leave sh_coff_relax_section. */
1435 coff_section_data (abfd, o)->contents = ocontents;
1436 }
1437 }
1438
1439 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1440 val += sym.n_value;
1441 if (val > addr && val < toaddr)
1442 bfd_put_32 (abfd, val - count,
1443 ocontents + irelscan->r_vaddr - o->vma);
1444
1445 coff_section_data (abfd, o)->keep_contents = TRUE;
1446 }
1447 }
1448 }
1449
1450 /* Adjusting the internal symbols will not work if something has
1451 already retrieved the generic symbols. It would be possible to
1452 make this work by adjusting the generic symbols at the same time.
1453 However, this case should not arise in normal usage. */
1454 if (obj_symbols (abfd) != NULL
1455 || obj_raw_syments (abfd) != NULL)
1456 {
1457 ((*_bfd_error_handler)
1458 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1459 bfd_set_error (bfd_error_invalid_operation);
1460 return FALSE;
1461 }
1462
1463 /* Adjust all the symbols. */
1464 sym_hash = obj_coff_sym_hashes (abfd);
1465 symesz = bfd_coff_symesz (abfd);
1466 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1467 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1468 while (esym < esymend)
1469 {
1470 struct internal_syment isym;
1471
1472 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1473
1474 if (isym.n_scnum == sec->target_index
1475 && (bfd_vma) isym.n_value > addr
1476 && (bfd_vma) isym.n_value < toaddr)
1477 {
1478 isym.n_value -= count;
1479
1480 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1481
1482 if (*sym_hash != NULL)
1483 {
1484 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1485 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1486 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1487 && (*sym_hash)->root.u.def.value < toaddr);
1488 (*sym_hash)->root.u.def.value -= count;
1489 }
1490 }
1491
1492 esym += (isym.n_numaux + 1) * symesz;
1493 sym_hash += isym.n_numaux + 1;
1494 }
1495
1496 /* See if we can move the ALIGN reloc forward. We have adjusted
1497 r_vaddr for it already. */
1498 if (irelalign != NULL)
1499 {
1500 bfd_vma alignto, alignaddr;
1501
1502 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1503 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1504 1 << irelalign->r_offset);
1505 if (alignto != alignaddr)
1506 {
1507 /* Tail recursion. */
1508 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1509 (int) (alignto - alignaddr));
1510 }
1511 }
1512
1513 return TRUE;
1514 }
1515 \f
1516 /* This is yet another version of the SH opcode table, used to rapidly
1517 get information about a particular instruction. */
1518
1519 /* The opcode map is represented by an array of these structures. The
1520 array is indexed by the high order four bits in the instruction. */
1521
1522 struct sh_major_opcode
1523 {
1524 /* A pointer to the instruction list. This is an array which
1525 contains all the instructions with this major opcode. */
1526 const struct sh_minor_opcode *minor_opcodes;
1527 /* The number of elements in minor_opcodes. */
1528 unsigned short count;
1529 };
1530
1531 /* This structure holds information for a set of SH opcodes. The
1532 instruction code is anded with the mask value, and the resulting
1533 value is used to search the order opcode list. */
1534
1535 struct sh_minor_opcode
1536 {
1537 /* The sorted opcode list. */
1538 const struct sh_opcode *opcodes;
1539 /* The number of elements in opcodes. */
1540 unsigned short count;
1541 /* The mask value to use when searching the opcode list. */
1542 unsigned short mask;
1543 };
1544
1545 /* This structure holds information for an SH instruction. An array
1546 of these structures is sorted in order by opcode. */
1547
1548 struct sh_opcode
1549 {
1550 /* The code for this instruction, after it has been anded with the
1551 mask value in the sh_major_opcode structure. */
1552 unsigned short opcode;
1553 /* Flags for this instruction. */
1554 unsigned long flags;
1555 };
1556
1557 /* Flag which appear in the sh_opcode structure. */
1558
1559 /* This instruction loads a value from memory. */
1560 #define LOAD (0x1)
1561
1562 /* This instruction stores a value to memory. */
1563 #define STORE (0x2)
1564
1565 /* This instruction is a branch. */
1566 #define BRANCH (0x4)
1567
1568 /* This instruction has a delay slot. */
1569 #define DELAY (0x8)
1570
1571 /* This instruction uses the value in the register in the field at
1572 mask 0x0f00 of the instruction. */
1573 #define USES1 (0x10)
1574 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1575
1576 /* This instruction uses the value in the register in the field at
1577 mask 0x00f0 of the instruction. */
1578 #define USES2 (0x20)
1579 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1580
1581 /* This instruction uses the value in register 0. */
1582 #define USESR0 (0x40)
1583
1584 /* This instruction sets the value in the register in the field at
1585 mask 0x0f00 of the instruction. */
1586 #define SETS1 (0x80)
1587 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1588
1589 /* This instruction sets the value in the register in the field at
1590 mask 0x00f0 of the instruction. */
1591 #define SETS2 (0x100)
1592 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1593
1594 /* This instruction sets register 0. */
1595 #define SETSR0 (0x200)
1596
1597 /* This instruction sets a special register. */
1598 #define SETSSP (0x400)
1599
1600 /* This instruction uses a special register. */
1601 #define USESSP (0x800)
1602
1603 /* This instruction uses the floating point register in the field at
1604 mask 0x0f00 of the instruction. */
1605 #define USESF1 (0x1000)
1606 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1607
1608 /* This instruction uses the floating point register in the field at
1609 mask 0x00f0 of the instruction. */
1610 #define USESF2 (0x2000)
1611 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1612
1613 /* This instruction uses floating point register 0. */
1614 #define USESF0 (0x4000)
1615
1616 /* This instruction sets the floating point register in the field at
1617 mask 0x0f00 of the instruction. */
1618 #define SETSF1 (0x8000)
1619 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1620
1621 #define USESAS (0x10000)
1622 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1623 #define USESR8 (0x20000)
1624 #define SETSAS (0x40000)
1625 #define SETSAS_REG(x) USESAS_REG (x)
1626
1627 #define MAP(a) a, sizeof a / sizeof a[0]
1628
1629 #ifndef COFF_IMAGE_WITH_PE
1630 static bfd_boolean sh_insn_uses_reg
1631 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1632 static bfd_boolean sh_insn_sets_reg
1633 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1634 static bfd_boolean sh_insn_uses_or_sets_reg
1635 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1636 static bfd_boolean sh_insn_uses_freg
1637 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1638 static bfd_boolean sh_insn_sets_freg
1639 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1640 static bfd_boolean sh_insn_uses_or_sets_freg
1641 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1642 static bfd_boolean sh_insns_conflict
1643 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1644 const struct sh_opcode *));
1645 static bfd_boolean sh_load_use
1646 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1647 const struct sh_opcode *));
1648
1649 /* The opcode maps. */
1650
1651 static const struct sh_opcode sh_opcode00[] =
1652 {
1653 { 0x0008, SETSSP }, /* clrt */
1654 { 0x0009, 0 }, /* nop */
1655 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1656 { 0x0018, SETSSP }, /* sett */
1657 { 0x0019, SETSSP }, /* div0u */
1658 { 0x001b, 0 }, /* sleep */
1659 { 0x0028, SETSSP }, /* clrmac */
1660 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1661 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1662 { 0x0048, SETSSP }, /* clrs */
1663 { 0x0058, SETSSP } /* sets */
1664 };
1665
1666 static const struct sh_opcode sh_opcode01[] =
1667 {
1668 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1669 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1670 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1671 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1672 { 0x0029, SETS1 | USESSP }, /* movt rn */
1673 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1674 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1675 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1676 { 0x0083, LOAD | USES1 }, /* pref @rn */
1677 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1678 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1679 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1680 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1681 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1682 };
1683
1684 static const struct sh_opcode sh_opcode02[] =
1685 {
1686 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1687 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1688 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1689 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1690 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1691 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1692 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1693 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1694 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1695 };
1696
1697 static const struct sh_minor_opcode sh_opcode0[] =
1698 {
1699 { MAP (sh_opcode00), 0xffff },
1700 { MAP (sh_opcode01), 0xf0ff },
1701 { MAP (sh_opcode02), 0xf00f }
1702 };
1703
1704 static const struct sh_opcode sh_opcode10[] =
1705 {
1706 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1707 };
1708
1709 static const struct sh_minor_opcode sh_opcode1[] =
1710 {
1711 { MAP (sh_opcode10), 0xf000 }
1712 };
1713
1714 static const struct sh_opcode sh_opcode20[] =
1715 {
1716 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1717 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1718 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1719 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1720 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1721 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1722 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1723 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1724 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1725 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1726 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1727 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1728 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1729 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1730 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1731 };
1732
1733 static const struct sh_minor_opcode sh_opcode2[] =
1734 {
1735 { MAP (sh_opcode20), 0xf00f }
1736 };
1737
1738 static const struct sh_opcode sh_opcode30[] =
1739 {
1740 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1741 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1742 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1743 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1744 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1745 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1746 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1747 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1748 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1749 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1750 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1751 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1752 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1753 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1754 };
1755
1756 static const struct sh_minor_opcode sh_opcode3[] =
1757 {
1758 { MAP (sh_opcode30), 0xf00f }
1759 };
1760
1761 static const struct sh_opcode sh_opcode40[] =
1762 {
1763 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1764 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1765 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1766 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1767 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1768 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1769 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1770 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1771 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1772 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1773 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1774 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1775 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1776 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1777 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1778 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1779 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1780 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1781 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1782 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1783 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1784 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1785 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1786 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1787 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1788 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1789 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1790 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1791 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1792 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1793 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1794 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1795 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1796 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1797 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1798 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1799 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1800 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1801 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1802 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1803 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1804 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1805 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1806 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1807 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1808 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1809 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1810 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1811 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1812 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1813 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1814 };
1815
1816 static const struct sh_opcode sh_opcode41[] =
1817 {
1818 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1819 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1820 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1821 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1822 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1823 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1824 };
1825
1826 static const struct sh_minor_opcode sh_opcode4[] =
1827 {
1828 { MAP (sh_opcode40), 0xf0ff },
1829 { MAP (sh_opcode41), 0xf00f }
1830 };
1831
1832 static const struct sh_opcode sh_opcode50[] =
1833 {
1834 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1835 };
1836
1837 static const struct sh_minor_opcode sh_opcode5[] =
1838 {
1839 { MAP (sh_opcode50), 0xf000 }
1840 };
1841
1842 static const struct sh_opcode sh_opcode60[] =
1843 {
1844 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1845 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1846 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1847 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1848 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1849 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1850 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1851 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1852 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1853 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1854 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1855 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1856 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1857 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1858 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1859 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1860 };
1861
1862 static const struct sh_minor_opcode sh_opcode6[] =
1863 {
1864 { MAP (sh_opcode60), 0xf00f }
1865 };
1866
1867 static const struct sh_opcode sh_opcode70[] =
1868 {
1869 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1870 };
1871
1872 static const struct sh_minor_opcode sh_opcode7[] =
1873 {
1874 { MAP (sh_opcode70), 0xf000 }
1875 };
1876
1877 static const struct sh_opcode sh_opcode80[] =
1878 {
1879 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1880 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1881 { 0x8200, SETSSP }, /* setrc #imm */
1882 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1883 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1884 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1885 { 0x8900, BRANCH | USESSP }, /* bt label */
1886 { 0x8b00, BRANCH | USESSP }, /* bf label */
1887 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1888 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1889 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1890 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1891 };
1892
1893 static const struct sh_minor_opcode sh_opcode8[] =
1894 {
1895 { MAP (sh_opcode80), 0xff00 }
1896 };
1897
1898 static const struct sh_opcode sh_opcode90[] =
1899 {
1900 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1901 };
1902
1903 static const struct sh_minor_opcode sh_opcode9[] =
1904 {
1905 { MAP (sh_opcode90), 0xf000 }
1906 };
1907
1908 static const struct sh_opcode sh_opcodea0[] =
1909 {
1910 { 0xa000, BRANCH | DELAY } /* bra label */
1911 };
1912
1913 static const struct sh_minor_opcode sh_opcodea[] =
1914 {
1915 { MAP (sh_opcodea0), 0xf000 }
1916 };
1917
1918 static const struct sh_opcode sh_opcodeb0[] =
1919 {
1920 { 0xb000, BRANCH | DELAY } /* bsr label */
1921 };
1922
1923 static const struct sh_minor_opcode sh_opcodeb[] =
1924 {
1925 { MAP (sh_opcodeb0), 0xf000 }
1926 };
1927
1928 static const struct sh_opcode sh_opcodec0[] =
1929 {
1930 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1931 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1932 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1933 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1934 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1935 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1936 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1937 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1938 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1939 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1940 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1941 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1942 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1943 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1944 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1945 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1946 };
1947
1948 static const struct sh_minor_opcode sh_opcodec[] =
1949 {
1950 { MAP (sh_opcodec0), 0xff00 }
1951 };
1952
1953 static const struct sh_opcode sh_opcoded0[] =
1954 {
1955 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1956 };
1957
1958 static const struct sh_minor_opcode sh_opcoded[] =
1959 {
1960 { MAP (sh_opcoded0), 0xf000 }
1961 };
1962
1963 static const struct sh_opcode sh_opcodee0[] =
1964 {
1965 { 0xe000, SETS1 } /* mov #imm,rn */
1966 };
1967
1968 static const struct sh_minor_opcode sh_opcodee[] =
1969 {
1970 { MAP (sh_opcodee0), 0xf000 }
1971 };
1972
1973 static const struct sh_opcode sh_opcodef0[] =
1974 {
1975 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1976 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1977 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1978 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1979 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1980 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1981 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1982 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1983 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1984 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1985 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1986 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1987 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1988 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1989 };
1990
1991 static const struct sh_opcode sh_opcodef1[] =
1992 {
1993 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1994 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1995 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1996 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1997 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1998 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1999 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2000 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2001 { 0xf08d, SETSF1 }, /* fldi0 fn */
2002 { 0xf09d, SETSF1 } /* fldi1 fn */
2003 };
2004
2005 static const struct sh_minor_opcode sh_opcodef[] =
2006 {
2007 { MAP (sh_opcodef0), 0xf00f },
2008 { MAP (sh_opcodef1), 0xf0ff }
2009 };
2010
2011 static struct sh_major_opcode sh_opcodes[] =
2012 {
2013 { MAP (sh_opcode0) },
2014 { MAP (sh_opcode1) },
2015 { MAP (sh_opcode2) },
2016 { MAP (sh_opcode3) },
2017 { MAP (sh_opcode4) },
2018 { MAP (sh_opcode5) },
2019 { MAP (sh_opcode6) },
2020 { MAP (sh_opcode7) },
2021 { MAP (sh_opcode8) },
2022 { MAP (sh_opcode9) },
2023 { MAP (sh_opcodea) },
2024 { MAP (sh_opcodeb) },
2025 { MAP (sh_opcodec) },
2026 { MAP (sh_opcoded) },
2027 { MAP (sh_opcodee) },
2028 { MAP (sh_opcodef) }
2029 };
2030
2031 /* The double data transfer / parallel processing insns are not
2032 described here. This will cause sh_align_load_span to leave them alone. */
2033
2034 static const struct sh_opcode sh_dsp_opcodef0[] =
2035 {
2036 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2037 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2038 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2039 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2040 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2041 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2042 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2043 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2044 };
2045
2046 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2047 {
2048 { MAP (sh_dsp_opcodef0), 0xfc0d }
2049 };
2050
2051 /* Given an instruction, return a pointer to the corresponding
2052 sh_opcode structure. Return NULL if the instruction is not
2053 recognized. */
2054
2055 static const struct sh_opcode *
2056 sh_insn_info (insn)
2057 unsigned int insn;
2058 {
2059 const struct sh_major_opcode *maj;
2060 const struct sh_minor_opcode *min, *minend;
2061
2062 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2063 min = maj->minor_opcodes;
2064 minend = min + maj->count;
2065 for (; min < minend; min++)
2066 {
2067 unsigned int l;
2068 const struct sh_opcode *op, *opend;
2069
2070 l = insn & min->mask;
2071 op = min->opcodes;
2072 opend = op + min->count;
2073
2074 /* Since the opcodes tables are sorted, we could use a binary
2075 search here if the count were above some cutoff value. */
2076 for (; op < opend; op++)
2077 if (op->opcode == l)
2078 return op;
2079 }
2080
2081 return NULL;
2082 }
2083
2084 /* See whether an instruction uses or sets a general purpose register */
2085
2086 static bfd_boolean
2087 sh_insn_uses_or_sets_reg (insn, op, reg)
2088 unsigned int insn;
2089 const struct sh_opcode *op;
2090 unsigned int reg;
2091 {
2092 if (sh_insn_uses_reg (insn, op, reg))
2093 return TRUE;
2094
2095 return sh_insn_sets_reg (insn, op, reg);
2096 }
2097
2098 /* See whether an instruction uses a general purpose register. */
2099
2100 static bfd_boolean
2101 sh_insn_uses_reg (insn, op, reg)
2102 unsigned int insn;
2103 const struct sh_opcode *op;
2104 unsigned int reg;
2105 {
2106 unsigned int f;
2107
2108 f = op->flags;
2109
2110 if ((f & USES1) != 0
2111 && USES1_REG (insn) == reg)
2112 return TRUE;
2113 if ((f & USES2) != 0
2114 && USES2_REG (insn) == reg)
2115 return TRUE;
2116 if ((f & USESR0) != 0
2117 && reg == 0)
2118 return TRUE;
2119 if ((f & USESAS) && reg == USESAS_REG (insn))
2120 return TRUE;
2121 if ((f & USESR8) && reg == 8)
2122 return TRUE;
2123
2124 return FALSE;
2125 }
2126
2127 /* See whether an instruction sets a general purpose register. */
2128
2129 static bfd_boolean
2130 sh_insn_sets_reg (insn, op, reg)
2131 unsigned int insn;
2132 const struct sh_opcode *op;
2133 unsigned int reg;
2134 {
2135 unsigned int f;
2136
2137 f = op->flags;
2138
2139 if ((f & SETS1) != 0
2140 && SETS1_REG (insn) == reg)
2141 return TRUE;
2142 if ((f & SETS2) != 0
2143 && SETS2_REG (insn) == reg)
2144 return TRUE;
2145 if ((f & SETSR0) != 0
2146 && reg == 0)
2147 return TRUE;
2148 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2149 return TRUE;
2150
2151 return FALSE;
2152 }
2153
2154 /* See whether an instruction uses or sets a floating point register */
2155
2156 static bfd_boolean
2157 sh_insn_uses_or_sets_freg (insn, op, reg)
2158 unsigned int insn;
2159 const struct sh_opcode *op;
2160 unsigned int reg;
2161 {
2162 if (sh_insn_uses_freg (insn, op, reg))
2163 return TRUE;
2164
2165 return sh_insn_sets_freg (insn, op, reg);
2166 }
2167
2168 /* See whether an instruction uses a floating point register. */
2169
2170 static bfd_boolean
2171 sh_insn_uses_freg (insn, op, freg)
2172 unsigned int insn;
2173 const struct sh_opcode *op;
2174 unsigned int freg;
2175 {
2176 unsigned int f;
2177
2178 f = op->flags;
2179
2180 /* We can't tell if this is a double-precision insn, so just play safe
2181 and assume that it might be. So not only have we test FREG against
2182 itself, but also even FREG against FREG+1 - if the using insn uses
2183 just the low part of a double precision value - but also an odd
2184 FREG against FREG-1 - if the setting insn sets just the low part
2185 of a double precision value.
2186 So what this all boils down to is that we have to ignore the lowest
2187 bit of the register number. */
2188
2189 if ((f & USESF1) != 0
2190 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2191 return TRUE;
2192 if ((f & USESF2) != 0
2193 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2194 return TRUE;
2195 if ((f & USESF0) != 0
2196 && freg == 0)
2197 return TRUE;
2198
2199 return FALSE;
2200 }
2201
2202 /* See whether an instruction sets a floating point register. */
2203
2204 static bfd_boolean
2205 sh_insn_sets_freg (insn, op, freg)
2206 unsigned int insn;
2207 const struct sh_opcode *op;
2208 unsigned int freg;
2209 {
2210 unsigned int f;
2211
2212 f = op->flags;
2213
2214 /* We can't tell if this is a double-precision insn, so just play safe
2215 and assume that it might be. So not only have we test FREG against
2216 itself, but also even FREG against FREG+1 - if the using insn uses
2217 just the low part of a double precision value - but also an odd
2218 FREG against FREG-1 - if the setting insn sets just the low part
2219 of a double precision value.
2220 So what this all boils down to is that we have to ignore the lowest
2221 bit of the register number. */
2222
2223 if ((f & SETSF1) != 0
2224 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2225 return TRUE;
2226
2227 return FALSE;
2228 }
2229
2230 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2231 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2232 This should return TRUE if there is a conflict, or FALSE if the
2233 instructions can be swapped safely. */
2234
2235 static bfd_boolean
2236 sh_insns_conflict (i1, op1, i2, op2)
2237 unsigned int i1;
2238 const struct sh_opcode *op1;
2239 unsigned int i2;
2240 const struct sh_opcode *op2;
2241 {
2242 unsigned int f1, f2;
2243
2244 f1 = op1->flags;
2245 f2 = op2->flags;
2246
2247 /* Load of fpscr conflicts with floating point operations.
2248 FIXME: shouldn't test raw opcodes here. */
2249 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2250 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2251 return TRUE;
2252
2253 if ((f1 & (BRANCH | DELAY)) != 0
2254 || (f2 & (BRANCH | DELAY)) != 0)
2255 return TRUE;
2256
2257 if (((f1 | f2) & SETSSP)
2258 && (f1 & (SETSSP | USESSP))
2259 && (f2 & (SETSSP | USESSP)))
2260 return TRUE;
2261
2262 if ((f1 & SETS1) != 0
2263 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2264 return TRUE;
2265 if ((f1 & SETS2) != 0
2266 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2267 return TRUE;
2268 if ((f1 & SETSR0) != 0
2269 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2270 return TRUE;
2271 if ((f1 & SETSAS)
2272 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2273 return TRUE;
2274 if ((f1 & SETSF1) != 0
2275 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2276 return TRUE;
2277
2278 if ((f2 & SETS1) != 0
2279 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2280 return TRUE;
2281 if ((f2 & SETS2) != 0
2282 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2283 return TRUE;
2284 if ((f2 & SETSR0) != 0
2285 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2286 return TRUE;
2287 if ((f2 & SETSAS)
2288 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2289 return TRUE;
2290 if ((f2 & SETSF1) != 0
2291 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2292 return TRUE;
2293
2294 /* The instructions do not conflict. */
2295 return FALSE;
2296 }
2297
2298 /* I1 is a load instruction, and I2 is some other instruction. Return
2299 TRUE if I1 loads a register which I2 uses. */
2300
2301 static bfd_boolean
2302 sh_load_use (i1, op1, i2, op2)
2303 unsigned int i1;
2304 const struct sh_opcode *op1;
2305 unsigned int i2;
2306 const struct sh_opcode *op2;
2307 {
2308 unsigned int f1;
2309
2310 f1 = op1->flags;
2311
2312 if ((f1 & LOAD) == 0)
2313 return FALSE;
2314
2315 /* If both SETS1 and SETSSP are set, that means a load to a special
2316 register using postincrement addressing mode, which we don't care
2317 about here. */
2318 if ((f1 & SETS1) != 0
2319 && (f1 & SETSSP) == 0
2320 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2321 return TRUE;
2322
2323 if ((f1 & SETSR0) != 0
2324 && sh_insn_uses_reg (i2, op2, 0))
2325 return TRUE;
2326
2327 if ((f1 & SETSF1) != 0
2328 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2329 return TRUE;
2330
2331 return FALSE;
2332 }
2333
2334 /* Try to align loads and stores within a span of memory. This is
2335 called by both the ELF and the COFF sh targets. ABFD and SEC are
2336 the BFD and section we are examining. CONTENTS is the contents of
2337 the section. SWAP is the routine to call to swap two instructions.
2338 RELOCS is a pointer to the internal relocation information, to be
2339 passed to SWAP. PLABEL is a pointer to the current label in a
2340 sorted list of labels; LABEL_END is the end of the list. START and
2341 STOP are the range of memory to examine. If a swap is made,
2342 *PSWAPPED is set to TRUE. */
2343
2344 #ifdef COFF_WITH_PE
2345 static
2346 #endif
2347 bfd_boolean
2348 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2349 plabel, label_end, start, stop, pswapped)
2350 bfd *abfd;
2351 asection *sec;
2352 bfd_byte *contents;
2353 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2354 PTR relocs;
2355 bfd_vma **plabel;
2356 bfd_vma *label_end;
2357 bfd_vma start;
2358 bfd_vma stop;
2359 bfd_boolean *pswapped;
2360 {
2361 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2362 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2363 bfd_vma i;
2364
2365 /* The SH4 has a Harvard architecture, hence aligning loads is not
2366 desirable. In fact, it is counter-productive, since it interferes
2367 with the schedules generated by the compiler. */
2368 if (abfd->arch_info->mach == bfd_mach_sh4)
2369 return TRUE;
2370
2371 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2372 instructions. */
2373 if (dsp)
2374 {
2375 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2376 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2377 }
2378
2379 /* Instructions should be aligned on 2 byte boundaries. */
2380 if ((start & 1) == 1)
2381 ++start;
2382
2383 /* Now look through the unaligned addresses. */
2384 i = start;
2385 if ((i & 2) == 0)
2386 i += 2;
2387 for (; i < stop; i += 4)
2388 {
2389 unsigned int insn;
2390 const struct sh_opcode *op;
2391 unsigned int prev_insn = 0;
2392 const struct sh_opcode *prev_op = NULL;
2393
2394 insn = bfd_get_16 (abfd, contents + i);
2395 op = sh_insn_info (insn);
2396 if (op == NULL
2397 || (op->flags & (LOAD | STORE)) == 0)
2398 continue;
2399
2400 /* This is a load or store which is not on a four byte boundary. */
2401
2402 while (*plabel < label_end && **plabel < i)
2403 ++*plabel;
2404
2405 if (i > start)
2406 {
2407 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2408 /* If INSN is the field b of a parallel processing insn, it is not
2409 a load / store after all. Note that the test here might mistake
2410 the field_b of a pcopy insn for the starting code of a parallel
2411 processing insn; this might miss a swapping opportunity, but at
2412 least we're on the safe side. */
2413 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2414 continue;
2415
2416 /* Check if prev_insn is actually the field b of a parallel
2417 processing insn. Again, this can give a spurious match
2418 after a pcopy. */
2419 if (dsp && i - 2 > start)
2420 {
2421 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2422
2423 if ((pprev_insn & 0xfc00) == 0xf800)
2424 prev_op = NULL;
2425 else
2426 prev_op = sh_insn_info (prev_insn);
2427 }
2428 else
2429 prev_op = sh_insn_info (prev_insn);
2430
2431 /* If the load/store instruction is in a delay slot, we
2432 can't swap. */
2433 if (prev_op == NULL
2434 || (prev_op->flags & DELAY) != 0)
2435 continue;
2436 }
2437 if (i > start
2438 && (*plabel >= label_end || **plabel != i)
2439 && prev_op != NULL
2440 && (prev_op->flags & (LOAD | STORE)) == 0
2441 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2442 {
2443 bfd_boolean ok;
2444
2445 /* The load/store instruction does not have a label, and
2446 there is a previous instruction; PREV_INSN is not
2447 itself a load/store instruction, and PREV_INSN and
2448 INSN do not conflict. */
2449
2450 ok = TRUE;
2451
2452 if (i >= start + 4)
2453 {
2454 unsigned int prev2_insn;
2455 const struct sh_opcode *prev2_op;
2456
2457 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2458 prev2_op = sh_insn_info (prev2_insn);
2459
2460 /* If the instruction before PREV_INSN has a delay
2461 slot--that is, PREV_INSN is in a delay slot--we
2462 can not swap. */
2463 if (prev2_op == NULL
2464 || (prev2_op->flags & DELAY) != 0)
2465 ok = FALSE;
2466
2467 /* If the instruction before PREV_INSN is a load,
2468 and it sets a register which INSN uses, then
2469 putting INSN immediately after PREV_INSN will
2470 cause a pipeline bubble, so there is no point to
2471 making the swap. */
2472 if (ok
2473 && (prev2_op->flags & LOAD) != 0
2474 && sh_load_use (prev2_insn, prev2_op, insn, op))
2475 ok = FALSE;
2476 }
2477
2478 if (ok)
2479 {
2480 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2481 return FALSE;
2482 *pswapped = TRUE;
2483 continue;
2484 }
2485 }
2486
2487 while (*plabel < label_end && **plabel < i + 2)
2488 ++*plabel;
2489
2490 if (i + 2 < stop
2491 && (*plabel >= label_end || **plabel != i + 2))
2492 {
2493 unsigned int next_insn;
2494 const struct sh_opcode *next_op;
2495
2496 /* There is an instruction after the load/store
2497 instruction, and it does not have a label. */
2498 next_insn = bfd_get_16 (abfd, contents + i + 2);
2499 next_op = sh_insn_info (next_insn);
2500 if (next_op != NULL
2501 && (next_op->flags & (LOAD | STORE)) == 0
2502 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2503 {
2504 bfd_boolean ok;
2505
2506 /* NEXT_INSN is not itself a load/store instruction,
2507 and it does not conflict with INSN. */
2508
2509 ok = TRUE;
2510
2511 /* If PREV_INSN is a load, and it sets a register
2512 which NEXT_INSN uses, then putting NEXT_INSN
2513 immediately after PREV_INSN will cause a pipeline
2514 bubble, so there is no reason to make this swap. */
2515 if (prev_op != NULL
2516 && (prev_op->flags & LOAD) != 0
2517 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2518 ok = FALSE;
2519
2520 /* If INSN is a load, and it sets a register which
2521 the insn after NEXT_INSN uses, then doing the
2522 swap will cause a pipeline bubble, so there is no
2523 reason to make the swap. However, if the insn
2524 after NEXT_INSN is itself a load or store
2525 instruction, then it is misaligned, so
2526 optimistically hope that it will be swapped
2527 itself, and just live with the pipeline bubble if
2528 it isn't. */
2529 if (ok
2530 && i + 4 < stop
2531 && (op->flags & LOAD) != 0)
2532 {
2533 unsigned int next2_insn;
2534 const struct sh_opcode *next2_op;
2535
2536 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2537 next2_op = sh_insn_info (next2_insn);
2538 if (next2_op == NULL
2539 || ((next2_op->flags & (LOAD | STORE)) == 0
2540 && sh_load_use (insn, op, next2_insn, next2_op)))
2541 ok = FALSE;
2542 }
2543
2544 if (ok)
2545 {
2546 if (! (*swap) (abfd, sec, relocs, contents, i))
2547 return FALSE;
2548 *pswapped = TRUE;
2549 continue;
2550 }
2551 }
2552 }
2553 }
2554
2555 return TRUE;
2556 }
2557 #endif /* not COFF_IMAGE_WITH_PE */
2558
2559 /* Look for loads and stores which we can align to four byte
2560 boundaries. See the longer comment above sh_relax_section for why
2561 this is desirable. This sets *PSWAPPED if some instruction was
2562 swapped. */
2563
2564 static bfd_boolean
2565 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2566 bfd *abfd;
2567 asection *sec;
2568 struct internal_reloc *internal_relocs;
2569 bfd_byte *contents;
2570 bfd_boolean *pswapped;
2571 {
2572 struct internal_reloc *irel, *irelend;
2573 bfd_vma *labels = NULL;
2574 bfd_vma *label, *label_end;
2575 bfd_size_type amt;
2576
2577 *pswapped = FALSE;
2578
2579 irelend = internal_relocs + sec->reloc_count;
2580
2581 /* Get all the addresses with labels on them. */
2582 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2583 labels = (bfd_vma *) bfd_malloc (amt);
2584 if (labels == NULL)
2585 goto error_return;
2586 label_end = labels;
2587 for (irel = internal_relocs; irel < irelend; irel++)
2588 {
2589 if (irel->r_type == R_SH_LABEL)
2590 {
2591 *label_end = irel->r_vaddr - sec->vma;
2592 ++label_end;
2593 }
2594 }
2595
2596 /* Note that the assembler currently always outputs relocs in
2597 address order. If that ever changes, this code will need to sort
2598 the label values and the relocs. */
2599
2600 label = labels;
2601
2602 for (irel = internal_relocs; irel < irelend; irel++)
2603 {
2604 bfd_vma start, stop;
2605
2606 if (irel->r_type != R_SH_CODE)
2607 continue;
2608
2609 start = irel->r_vaddr - sec->vma;
2610
2611 for (irel++; irel < irelend; irel++)
2612 if (irel->r_type == R_SH_DATA)
2613 break;
2614 if (irel < irelend)
2615 stop = irel->r_vaddr - sec->vma;
2616 else
2617 stop = sec->size;
2618
2619 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2620 (PTR) internal_relocs, &label,
2621 label_end, start, stop, pswapped))
2622 goto error_return;
2623 }
2624
2625 free (labels);
2626
2627 return TRUE;
2628
2629 error_return:
2630 if (labels != NULL)
2631 free (labels);
2632 return FALSE;
2633 }
2634
2635 /* Swap two SH instructions. */
2636
2637 static bfd_boolean
2638 sh_swap_insns (abfd, sec, relocs, contents, addr)
2639 bfd *abfd;
2640 asection *sec;
2641 PTR relocs;
2642 bfd_byte *contents;
2643 bfd_vma addr;
2644 {
2645 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2646 unsigned short i1, i2;
2647 struct internal_reloc *irel, *irelend;
2648
2649 /* Swap the instructions themselves. */
2650 i1 = bfd_get_16 (abfd, contents + addr);
2651 i2 = bfd_get_16 (abfd, contents + addr + 2);
2652 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2653 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2654
2655 /* Adjust all reloc addresses. */
2656 irelend = internal_relocs + sec->reloc_count;
2657 for (irel = internal_relocs; irel < irelend; irel++)
2658 {
2659 int type, add;
2660
2661 /* There are a few special types of relocs that we don't want to
2662 adjust. These relocs do not apply to the instruction itself,
2663 but are only associated with the address. */
2664 type = irel->r_type;
2665 if (type == R_SH_ALIGN
2666 || type == R_SH_CODE
2667 || type == R_SH_DATA
2668 || type == R_SH_LABEL)
2669 continue;
2670
2671 /* If an R_SH_USES reloc points to one of the addresses being
2672 swapped, we must adjust it. It would be incorrect to do this
2673 for a jump, though, since we want to execute both
2674 instructions after the jump. (We have avoided swapping
2675 around a label, so the jump will not wind up executing an
2676 instruction it shouldn't). */
2677 if (type == R_SH_USES)
2678 {
2679 bfd_vma off;
2680
2681 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2682 if (off == addr)
2683 irel->r_offset += 2;
2684 else if (off == addr + 2)
2685 irel->r_offset -= 2;
2686 }
2687
2688 if (irel->r_vaddr - sec->vma == addr)
2689 {
2690 irel->r_vaddr += 2;
2691 add = -2;
2692 }
2693 else if (irel->r_vaddr - sec->vma == addr + 2)
2694 {
2695 irel->r_vaddr -= 2;
2696 add = 2;
2697 }
2698 else
2699 add = 0;
2700
2701 if (add != 0)
2702 {
2703 bfd_byte *loc;
2704 unsigned short insn, oinsn;
2705 bfd_boolean overflow;
2706
2707 loc = contents + irel->r_vaddr - sec->vma;
2708 overflow = FALSE;
2709 switch (type)
2710 {
2711 default:
2712 break;
2713
2714 case R_SH_PCDISP8BY2:
2715 case R_SH_PCRELIMM8BY2:
2716 insn = bfd_get_16 (abfd, loc);
2717 oinsn = insn;
2718 insn += add / 2;
2719 if ((oinsn & 0xff00) != (insn & 0xff00))
2720 overflow = TRUE;
2721 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2722 break;
2723
2724 case R_SH_PCDISP:
2725 insn = bfd_get_16 (abfd, loc);
2726 oinsn = insn;
2727 insn += add / 2;
2728 if ((oinsn & 0xf000) != (insn & 0xf000))
2729 overflow = TRUE;
2730 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2731 break;
2732
2733 case R_SH_PCRELIMM8BY4:
2734 /* This reloc ignores the least significant 3 bits of
2735 the program counter before adding in the offset.
2736 This means that if ADDR is at an even address, the
2737 swap will not affect the offset. If ADDR is an at an
2738 odd address, then the instruction will be crossing a
2739 four byte boundary, and must be adjusted. */
2740 if ((addr & 3) != 0)
2741 {
2742 insn = bfd_get_16 (abfd, loc);
2743 oinsn = insn;
2744 insn += add / 2;
2745 if ((oinsn & 0xff00) != (insn & 0xff00))
2746 overflow = TRUE;
2747 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2748 }
2749
2750 break;
2751 }
2752
2753 if (overflow)
2754 {
2755 ((*_bfd_error_handler)
2756 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2757 abfd, (unsigned long) irel->r_vaddr));
2758 bfd_set_error (bfd_error_bad_value);
2759 return FALSE;
2760 }
2761 }
2762 }
2763
2764 return TRUE;
2765 }
2766 \f
2767 /* This is a modification of _bfd_coff_generic_relocate_section, which
2768 will handle SH relaxing. */
2769
2770 static bfd_boolean
2771 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2772 relocs, syms, sections)
2773 bfd *output_bfd ATTRIBUTE_UNUSED;
2774 struct bfd_link_info *info;
2775 bfd *input_bfd;
2776 asection *input_section;
2777 bfd_byte *contents;
2778 struct internal_reloc *relocs;
2779 struct internal_syment *syms;
2780 asection **sections;
2781 {
2782 struct internal_reloc *rel;
2783 struct internal_reloc *relend;
2784
2785 rel = relocs;
2786 relend = rel + input_section->reloc_count;
2787 for (; rel < relend; rel++)
2788 {
2789 long symndx;
2790 struct coff_link_hash_entry *h;
2791 struct internal_syment *sym;
2792 bfd_vma addend;
2793 bfd_vma val;
2794 reloc_howto_type *howto;
2795 bfd_reloc_status_type rstat;
2796
2797 /* Almost all relocs have to do with relaxing. If any work must
2798 be done for them, it has been done in sh_relax_section. */
2799 if (rel->r_type != R_SH_IMM32
2800 #ifdef COFF_WITH_PE
2801 && rel->r_type != R_SH_IMM32CE
2802 && rel->r_type != R_SH_IMAGEBASE
2803 #endif
2804 && rel->r_type != R_SH_PCDISP)
2805 continue;
2806
2807 symndx = rel->r_symndx;
2808
2809 if (symndx == -1)
2810 {
2811 h = NULL;
2812 sym = NULL;
2813 }
2814 else
2815 {
2816 if (symndx < 0
2817 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2818 {
2819 (*_bfd_error_handler)
2820 ("%B: illegal symbol index %ld in relocs",
2821 input_bfd, symndx);
2822 bfd_set_error (bfd_error_bad_value);
2823 return FALSE;
2824 }
2825 h = obj_coff_sym_hashes (input_bfd)[symndx];
2826 sym = syms + symndx;
2827 }
2828
2829 if (sym != NULL && sym->n_scnum != 0)
2830 addend = - sym->n_value;
2831 else
2832 addend = 0;
2833
2834 if (rel->r_type == R_SH_PCDISP)
2835 addend -= 4;
2836
2837 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2838 howto = NULL;
2839 else
2840 howto = &sh_coff_howtos[rel->r_type];
2841
2842 if (howto == NULL)
2843 {
2844 bfd_set_error (bfd_error_bad_value);
2845 return FALSE;
2846 }
2847
2848 #ifdef COFF_WITH_PE
2849 if (rel->r_type == R_SH_IMAGEBASE)
2850 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2851 #endif
2852
2853 val = 0;
2854
2855 if (h == NULL)
2856 {
2857 asection *sec;
2858
2859 /* There is nothing to do for an internal PCDISP reloc. */
2860 if (rel->r_type == R_SH_PCDISP)
2861 continue;
2862
2863 if (symndx == -1)
2864 {
2865 sec = bfd_abs_section_ptr;
2866 val = 0;
2867 }
2868 else
2869 {
2870 sec = sections[symndx];
2871 val = (sec->output_section->vma
2872 + sec->output_offset
2873 + sym->n_value
2874 - sec->vma);
2875 }
2876 }
2877 else
2878 {
2879 if (h->root.type == bfd_link_hash_defined
2880 || h->root.type == bfd_link_hash_defweak)
2881 {
2882 asection *sec;
2883
2884 sec = h->root.u.def.section;
2885 val = (h->root.u.def.value
2886 + sec->output_section->vma
2887 + sec->output_offset);
2888 }
2889 else if (! info->relocatable)
2890 {
2891 if (! ((*info->callbacks->undefined_symbol)
2892 (info, h->root.root.string, input_bfd, input_section,
2893 rel->r_vaddr - input_section->vma, TRUE)))
2894 return FALSE;
2895 }
2896 }
2897
2898 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2899 contents,
2900 rel->r_vaddr - input_section->vma,
2901 val, addend);
2902
2903 switch (rstat)
2904 {
2905 default:
2906 abort ();
2907 case bfd_reloc_ok:
2908 break;
2909 case bfd_reloc_overflow:
2910 {
2911 const char *name;
2912 char buf[SYMNMLEN + 1];
2913
2914 if (symndx == -1)
2915 name = "*ABS*";
2916 else if (h != NULL)
2917 name = NULL;
2918 else if (sym->_n._n_n._n_zeroes == 0
2919 && sym->_n._n_n._n_offset != 0)
2920 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2921 else
2922 {
2923 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2924 buf[SYMNMLEN] = '\0';
2925 name = buf;
2926 }
2927
2928 if (! ((*info->callbacks->reloc_overflow)
2929 (info, (h ? &h->root : NULL), name, howto->name,
2930 (bfd_vma) 0, input_bfd, input_section,
2931 rel->r_vaddr - input_section->vma)))
2932 return FALSE;
2933 }
2934 }
2935 }
2936
2937 return TRUE;
2938 }
2939
2940 /* This is a version of bfd_generic_get_relocated_section_contents
2941 which uses sh_relocate_section. */
2942
2943 static bfd_byte *
2944 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2945 data, relocatable, symbols)
2946 bfd *output_bfd;
2947 struct bfd_link_info *link_info;
2948 struct bfd_link_order *link_order;
2949 bfd_byte *data;
2950 bfd_boolean relocatable;
2951 asymbol **symbols;
2952 {
2953 asection *input_section = link_order->u.indirect.section;
2954 bfd *input_bfd = input_section->owner;
2955 asection **sections = NULL;
2956 struct internal_reloc *internal_relocs = NULL;
2957 struct internal_syment *internal_syms = NULL;
2958
2959 /* We only need to handle the case of relaxing, or of having a
2960 particular set of section contents, specially. */
2961 if (relocatable
2962 || coff_section_data (input_bfd, input_section) == NULL
2963 || coff_section_data (input_bfd, input_section)->contents == NULL)
2964 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2965 link_order, data,
2966 relocatable,
2967 symbols);
2968
2969 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2970 (size_t) input_section->size);
2971
2972 if ((input_section->flags & SEC_RELOC) != 0
2973 && input_section->reloc_count > 0)
2974 {
2975 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2976 bfd_byte *esym, *esymend;
2977 struct internal_syment *isymp;
2978 asection **secpp;
2979 bfd_size_type amt;
2980
2981 if (! _bfd_coff_get_external_symbols (input_bfd))
2982 goto error_return;
2983
2984 internal_relocs = (_bfd_coff_read_internal_relocs
2985 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2986 FALSE, (struct internal_reloc *) NULL));
2987 if (internal_relocs == NULL)
2988 goto error_return;
2989
2990 amt = obj_raw_syment_count (input_bfd);
2991 amt *= sizeof (struct internal_syment);
2992 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2993 if (internal_syms == NULL)
2994 goto error_return;
2995
2996 amt = obj_raw_syment_count (input_bfd);
2997 amt *= sizeof (asection *);
2998 sections = (asection **) bfd_malloc (amt);
2999 if (sections == NULL)
3000 goto error_return;
3001
3002 isymp = internal_syms;
3003 secpp = sections;
3004 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3005 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3006 while (esym < esymend)
3007 {
3008 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3009
3010 if (isymp->n_scnum != 0)
3011 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3012 else
3013 {
3014 if (isymp->n_value == 0)
3015 *secpp = bfd_und_section_ptr;
3016 else
3017 *secpp = bfd_com_section_ptr;
3018 }
3019
3020 esym += (isymp->n_numaux + 1) * symesz;
3021 secpp += isymp->n_numaux + 1;
3022 isymp += isymp->n_numaux + 1;
3023 }
3024
3025 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3026 input_section, data, internal_relocs,
3027 internal_syms, sections))
3028 goto error_return;
3029
3030 free (sections);
3031 sections = NULL;
3032 free (internal_syms);
3033 internal_syms = NULL;
3034 free (internal_relocs);
3035 internal_relocs = NULL;
3036 }
3037
3038 return data;
3039
3040 error_return:
3041 if (internal_relocs != NULL)
3042 free (internal_relocs);
3043 if (internal_syms != NULL)
3044 free (internal_syms);
3045 if (sections != NULL)
3046 free (sections);
3047 return NULL;
3048 }
3049
3050 /* The target vectors. */
3051
3052 #ifndef TARGET_SHL_SYM
3053 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3054 #endif
3055
3056 #ifdef TARGET_SHL_SYM
3057 #define TARGET_SYM TARGET_SHL_SYM
3058 #else
3059 #define TARGET_SYM shlcoff_vec
3060 #endif
3061
3062 #ifndef TARGET_SHL_NAME
3063 #define TARGET_SHL_NAME "coff-shl"
3064 #endif
3065
3066 #ifdef COFF_WITH_PE
3067 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3068 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3069 #else
3070 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3071 0, '_', NULL, COFF_SWAP_TABLE)
3072 #endif
3073
3074 #ifndef TARGET_SHL_SYM
3075 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3076 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3077 /* Some people want versions of the SH COFF target which do not align
3078 to 16 byte boundaries. We implement that by adding a couple of new
3079 target vectors. These are just like the ones above, but they
3080 change the default section alignment. To generate them in the
3081 assembler, use -small. To use them in the linker, use -b
3082 coff-sh{l}-small and -oformat coff-sh{l}-small.
3083
3084 Yes, this is a horrible hack. A general solution for setting
3085 section alignment in COFF is rather complex. ELF handles this
3086 correctly. */
3087
3088 /* Only recognize the small versions if the target was not defaulted.
3089 Otherwise we won't recognize the non default endianness. */
3090
3091 static const bfd_target *
3092 coff_small_object_p (abfd)
3093 bfd *abfd;
3094 {
3095 if (abfd->target_defaulted)
3096 {
3097 bfd_set_error (bfd_error_wrong_format);
3098 return NULL;
3099 }
3100 return coff_object_p (abfd);
3101 }
3102
3103 /* Set the section alignment for the small versions. */
3104
3105 static bfd_boolean
3106 coff_small_new_section_hook (abfd, section)
3107 bfd *abfd;
3108 asection *section;
3109 {
3110 if (! coff_new_section_hook (abfd, section))
3111 return FALSE;
3112
3113 /* We must align to at least a four byte boundary, because longword
3114 accesses must be on a four byte boundary. */
3115 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3116 section->alignment_power = 2;
3117
3118 return TRUE;
3119 }
3120
3121 /* This is copied from bfd_coff_std_swap_table so that we can change
3122 the default section alignment power. */
3123
3124 static bfd_coff_backend_data bfd_coff_small_swap_table =
3125 {
3126 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3127 coff_swap_aux_out, coff_swap_sym_out,
3128 coff_swap_lineno_out, coff_swap_reloc_out,
3129 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3130 coff_swap_scnhdr_out,
3131 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3132 #ifdef COFF_LONG_FILENAMES
3133 TRUE,
3134 #else
3135 FALSE,
3136 #endif
3137 COFF_DEFAULT_LONG_SECTION_NAMES,
3138 2,
3139 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3140 TRUE,
3141 #else
3142 FALSE,
3143 #endif
3144 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3145 4,
3146 #else
3147 2,
3148 #endif
3149 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3150 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3151 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3152 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3153 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3154 coff_classify_symbol, coff_compute_section_file_positions,
3155 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3156 coff_adjust_symndx, coff_link_add_one_symbol,
3157 coff_link_output_has_begun, coff_final_link_postscript,
3158 bfd_pe_print_pdata
3159 };
3160
3161 #define coff_small_close_and_cleanup \
3162 coff_close_and_cleanup
3163 #define coff_small_bfd_free_cached_info \
3164 coff_bfd_free_cached_info
3165 #define coff_small_get_section_contents \
3166 coff_get_section_contents
3167 #define coff_small_get_section_contents_in_window \
3168 coff_get_section_contents_in_window
3169
3170 extern const bfd_target shlcoff_small_vec;
3171
3172 const bfd_target shcoff_small_vec =
3173 {
3174 "coff-sh-small", /* name */
3175 bfd_target_coff_flavour,
3176 BFD_ENDIAN_BIG, /* data byte order is big */
3177 BFD_ENDIAN_BIG, /* header byte order is big */
3178
3179 (HAS_RELOC | EXEC_P | /* object flags */
3180 HAS_LINENO | HAS_DEBUG |
3181 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3182
3183 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3184 '_', /* leading symbol underscore */
3185 '/', /* ar_pad_char */
3186 15, /* ar_max_namelen */
3187 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3188 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3189 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3190 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3191 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3192 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3193
3194 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3195 bfd_generic_archive_p, _bfd_dummy_target},
3196 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3197 bfd_false},
3198 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3199 _bfd_write_archive_contents, bfd_false},
3200
3201 BFD_JUMP_TABLE_GENERIC (coff_small),
3202 BFD_JUMP_TABLE_COPY (coff),
3203 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3204 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3205 BFD_JUMP_TABLE_SYMBOLS (coff),
3206 BFD_JUMP_TABLE_RELOCS (coff),
3207 BFD_JUMP_TABLE_WRITE (coff),
3208 BFD_JUMP_TABLE_LINK (coff),
3209 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3210
3211 & shlcoff_small_vec,
3212
3213 (PTR) &bfd_coff_small_swap_table
3214 };
3215
3216 const bfd_target shlcoff_small_vec =
3217 {
3218 "coff-shl-small", /* name */
3219 bfd_target_coff_flavour,
3220 BFD_ENDIAN_LITTLE, /* data byte order is little */
3221 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3222
3223 (HAS_RELOC | EXEC_P | /* object flags */
3224 HAS_LINENO | HAS_DEBUG |
3225 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3226
3227 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3228 '_', /* leading symbol underscore */
3229 '/', /* ar_pad_char */
3230 15, /* ar_max_namelen */
3231 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3232 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3233 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3234 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3235 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3236 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3237
3238 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3239 bfd_generic_archive_p, _bfd_dummy_target},
3240 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3241 bfd_false},
3242 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3243 _bfd_write_archive_contents, bfd_false},
3244
3245 BFD_JUMP_TABLE_GENERIC (coff_small),
3246 BFD_JUMP_TABLE_COPY (coff),
3247 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3248 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3249 BFD_JUMP_TABLE_SYMBOLS (coff),
3250 BFD_JUMP_TABLE_RELOCS (coff),
3251 BFD_JUMP_TABLE_WRITE (coff),
3252 BFD_JUMP_TABLE_LINK (coff),
3253 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3254
3255 & shcoff_small_vec,
3256
3257 (PTR) &bfd_coff_small_swap_table
3258 };
3259 #endif
This page took 0.104439 seconds and 4 git commands to generate.