Include "top.h".
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
... / ...
CommitLineData
1/* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7
8 This file is part of BFD, the Binary File Descriptor library.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
23 MA 02110-1301, USA. */
24
25#include "sysdep.h"
26#include "bfd.h"
27#include "libiberty.h"
28#include "libbfd.h"
29#include "bfdlink.h"
30#include "coff/sh.h"
31#include "coff/internal.h"
32
33#ifdef COFF_WITH_PE
34#include "coff/pe.h"
35
36#ifndef COFF_IMAGE_WITH_PE
37static bfd_boolean sh_align_load_span
38 PARAMS ((bfd *, asection *, bfd_byte *,
39 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
40 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
41
42#define _bfd_sh_align_load_span sh_align_load_span
43#endif
44#endif
45
46#include "libcoff.h"
47
48/* Internal functions. */
49static bfd_reloc_status_type sh_reloc
50 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
51static long get_symbol_value PARAMS ((asymbol *));
52static bfd_boolean sh_relax_section
53 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
54static bfd_boolean sh_relax_delete_bytes
55 PARAMS ((bfd *, asection *, bfd_vma, int));
56#ifndef COFF_IMAGE_WITH_PE
57static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
58#endif
59static bfd_boolean sh_align_loads
60 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
61 bfd_boolean *));
62static bfd_boolean sh_swap_insns
63 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
64static bfd_boolean sh_relocate_section
65 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
66 struct internal_reloc *, struct internal_syment *, asection **));
67static bfd_byte *sh_coff_get_relocated_section_contents
68 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
69 bfd_byte *, bfd_boolean, asymbol **));
70static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
71
72#ifdef COFF_WITH_PE
73/* Can't build import tables with 2**4 alignment. */
74#define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
75#else
76/* Default section alignment to 2**4. */
77#define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
78#endif
79
80#ifdef COFF_IMAGE_WITH_PE
81/* Align PE executables. */
82#define COFF_PAGE_SIZE 0x1000
83#endif
84
85/* Generate long file names. */
86#define COFF_LONG_FILENAMES
87
88#ifdef COFF_WITH_PE
89static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
90/* Return TRUE if this relocation should
91 appear in the output .reloc section. */
92static bfd_boolean in_reloc_p (abfd, howto)
93 bfd * abfd ATTRIBUTE_UNUSED;
94 reloc_howto_type * howto;
95{
96 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
97}
98#endif
99
100/* The supported relocations. There are a lot of relocations defined
101 in coff/internal.h which we do not expect to ever see. */
102static reloc_howto_type sh_coff_howtos[] =
103{
104 EMPTY_HOWTO (0),
105 EMPTY_HOWTO (1),
106#ifdef COFF_WITH_PE
107 /* Windows CE */
108 HOWTO (R_SH_IMM32CE, /* type */
109 0, /* rightshift */
110 2, /* size (0 = byte, 1 = short, 2 = long) */
111 32, /* bitsize */
112 FALSE, /* pc_relative */
113 0, /* bitpos */
114 complain_overflow_bitfield, /* complain_on_overflow */
115 sh_reloc, /* special_function */
116 "r_imm32ce", /* name */
117 TRUE, /* partial_inplace */
118 0xffffffff, /* src_mask */
119 0xffffffff, /* dst_mask */
120 FALSE), /* pcrel_offset */
121#else
122 EMPTY_HOWTO (2),
123#endif
124 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
125 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
126 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
127 EMPTY_HOWTO (6), /* R_SH_IMM24 */
128 EMPTY_HOWTO (7), /* R_SH_LOW16 */
129 EMPTY_HOWTO (8),
130 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
131
132 HOWTO (R_SH_PCDISP8BY2, /* type */
133 1, /* rightshift */
134 1, /* size (0 = byte, 1 = short, 2 = long) */
135 8, /* bitsize */
136 TRUE, /* pc_relative */
137 0, /* bitpos */
138 complain_overflow_signed, /* complain_on_overflow */
139 sh_reloc, /* special_function */
140 "r_pcdisp8by2", /* name */
141 TRUE, /* partial_inplace */
142 0xff, /* src_mask */
143 0xff, /* dst_mask */
144 TRUE), /* pcrel_offset */
145
146 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
147
148 HOWTO (R_SH_PCDISP, /* type */
149 1, /* rightshift */
150 1, /* size (0 = byte, 1 = short, 2 = long) */
151 12, /* bitsize */
152 TRUE, /* pc_relative */
153 0, /* bitpos */
154 complain_overflow_signed, /* complain_on_overflow */
155 sh_reloc, /* special_function */
156 "r_pcdisp12by2", /* name */
157 TRUE, /* partial_inplace */
158 0xfff, /* src_mask */
159 0xfff, /* dst_mask */
160 TRUE), /* pcrel_offset */
161
162 EMPTY_HOWTO (13),
163
164 HOWTO (R_SH_IMM32, /* type */
165 0, /* rightshift */
166 2, /* size (0 = byte, 1 = short, 2 = long) */
167 32, /* bitsize */
168 FALSE, /* pc_relative */
169 0, /* bitpos */
170 complain_overflow_bitfield, /* complain_on_overflow */
171 sh_reloc, /* special_function */
172 "r_imm32", /* name */
173 TRUE, /* partial_inplace */
174 0xffffffff, /* src_mask */
175 0xffffffff, /* dst_mask */
176 FALSE), /* pcrel_offset */
177
178 EMPTY_HOWTO (15),
179#ifdef COFF_WITH_PE
180 HOWTO (R_SH_IMAGEBASE, /* type */
181 0, /* rightshift */
182 2, /* size (0 = byte, 1 = short, 2 = long) */
183 32, /* bitsize */
184 FALSE, /* pc_relative */
185 0, /* bitpos */
186 complain_overflow_bitfield, /* complain_on_overflow */
187 sh_reloc, /* special_function */
188 "rva32", /* name */
189 TRUE, /* partial_inplace */
190 0xffffffff, /* src_mask */
191 0xffffffff, /* dst_mask */
192 FALSE), /* pcrel_offset */
193#else
194 EMPTY_HOWTO (16), /* R_SH_IMM8 */
195#endif
196 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
197 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
198 EMPTY_HOWTO (19), /* R_SH_IMM4 */
199 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
200 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
201
202 HOWTO (R_SH_PCRELIMM8BY2, /* type */
203 1, /* rightshift */
204 1, /* size (0 = byte, 1 = short, 2 = long) */
205 8, /* bitsize */
206 TRUE, /* pc_relative */
207 0, /* bitpos */
208 complain_overflow_unsigned, /* complain_on_overflow */
209 sh_reloc, /* special_function */
210 "r_pcrelimm8by2", /* name */
211 TRUE, /* partial_inplace */
212 0xff, /* src_mask */
213 0xff, /* dst_mask */
214 TRUE), /* pcrel_offset */
215
216 HOWTO (R_SH_PCRELIMM8BY4, /* type */
217 2, /* rightshift */
218 1, /* size (0 = byte, 1 = short, 2 = long) */
219 8, /* bitsize */
220 TRUE, /* pc_relative */
221 0, /* bitpos */
222 complain_overflow_unsigned, /* complain_on_overflow */
223 sh_reloc, /* special_function */
224 "r_pcrelimm8by4", /* name */
225 TRUE, /* partial_inplace */
226 0xff, /* src_mask */
227 0xff, /* dst_mask */
228 TRUE), /* pcrel_offset */
229
230 HOWTO (R_SH_IMM16, /* type */
231 0, /* rightshift */
232 1, /* size (0 = byte, 1 = short, 2 = long) */
233 16, /* bitsize */
234 FALSE, /* pc_relative */
235 0, /* bitpos */
236 complain_overflow_bitfield, /* complain_on_overflow */
237 sh_reloc, /* special_function */
238 "r_imm16", /* name */
239 TRUE, /* partial_inplace */
240 0xffff, /* src_mask */
241 0xffff, /* dst_mask */
242 FALSE), /* pcrel_offset */
243
244 HOWTO (R_SH_SWITCH16, /* type */
245 0, /* rightshift */
246 1, /* size (0 = byte, 1 = short, 2 = long) */
247 16, /* bitsize */
248 FALSE, /* pc_relative */
249 0, /* bitpos */
250 complain_overflow_bitfield, /* complain_on_overflow */
251 sh_reloc, /* special_function */
252 "r_switch16", /* name */
253 TRUE, /* partial_inplace */
254 0xffff, /* src_mask */
255 0xffff, /* dst_mask */
256 FALSE), /* pcrel_offset */
257
258 HOWTO (R_SH_SWITCH32, /* type */
259 0, /* rightshift */
260 2, /* size (0 = byte, 1 = short, 2 = long) */
261 32, /* bitsize */
262 FALSE, /* pc_relative */
263 0, /* bitpos */
264 complain_overflow_bitfield, /* complain_on_overflow */
265 sh_reloc, /* special_function */
266 "r_switch32", /* name */
267 TRUE, /* partial_inplace */
268 0xffffffff, /* src_mask */
269 0xffffffff, /* dst_mask */
270 FALSE), /* pcrel_offset */
271
272 HOWTO (R_SH_USES, /* type */
273 0, /* rightshift */
274 1, /* size (0 = byte, 1 = short, 2 = long) */
275 16, /* bitsize */
276 FALSE, /* pc_relative */
277 0, /* bitpos */
278 complain_overflow_bitfield, /* complain_on_overflow */
279 sh_reloc, /* special_function */
280 "r_uses", /* name */
281 TRUE, /* partial_inplace */
282 0xffff, /* src_mask */
283 0xffff, /* dst_mask */
284 FALSE), /* pcrel_offset */
285
286 HOWTO (R_SH_COUNT, /* type */
287 0, /* rightshift */
288 2, /* size (0 = byte, 1 = short, 2 = long) */
289 32, /* bitsize */
290 FALSE, /* pc_relative */
291 0, /* bitpos */
292 complain_overflow_bitfield, /* complain_on_overflow */
293 sh_reloc, /* special_function */
294 "r_count", /* name */
295 TRUE, /* partial_inplace */
296 0xffffffff, /* src_mask */
297 0xffffffff, /* dst_mask */
298 FALSE), /* pcrel_offset */
299
300 HOWTO (R_SH_ALIGN, /* type */
301 0, /* rightshift */
302 2, /* size (0 = byte, 1 = short, 2 = long) */
303 32, /* bitsize */
304 FALSE, /* pc_relative */
305 0, /* bitpos */
306 complain_overflow_bitfield, /* complain_on_overflow */
307 sh_reloc, /* special_function */
308 "r_align", /* name */
309 TRUE, /* partial_inplace */
310 0xffffffff, /* src_mask */
311 0xffffffff, /* dst_mask */
312 FALSE), /* pcrel_offset */
313
314 HOWTO (R_SH_CODE, /* type */
315 0, /* rightshift */
316 2, /* size (0 = byte, 1 = short, 2 = long) */
317 32, /* bitsize */
318 FALSE, /* pc_relative */
319 0, /* bitpos */
320 complain_overflow_bitfield, /* complain_on_overflow */
321 sh_reloc, /* special_function */
322 "r_code", /* name */
323 TRUE, /* partial_inplace */
324 0xffffffff, /* src_mask */
325 0xffffffff, /* dst_mask */
326 FALSE), /* pcrel_offset */
327
328 HOWTO (R_SH_DATA, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield, /* complain_on_overflow */
335 sh_reloc, /* special_function */
336 "r_data", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_SH_LABEL, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield, /* complain_on_overflow */
349 sh_reloc, /* special_function */
350 "r_label", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_SH_SWITCH8, /* type */
357 0, /* rightshift */
358 0, /* size (0 = byte, 1 = short, 2 = long) */
359 8, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield, /* complain_on_overflow */
363 sh_reloc, /* special_function */
364 "r_switch8", /* name */
365 TRUE, /* partial_inplace */
366 0xff, /* src_mask */
367 0xff, /* dst_mask */
368 FALSE) /* pcrel_offset */
369};
370
371#define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
372
373/* Check for a bad magic number. */
374#define BADMAG(x) SHBADMAG(x)
375
376/* Customize coffcode.h (this is not currently used). */
377#define SH 1
378
379/* FIXME: This should not be set here. */
380#define __A_MAGIC_SET__
381
382#ifndef COFF_WITH_PE
383/* Swap the r_offset field in and out. */
384#define SWAP_IN_RELOC_OFFSET H_GET_32
385#define SWAP_OUT_RELOC_OFFSET H_PUT_32
386
387/* Swap out extra information in the reloc structure. */
388#define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
389 do \
390 { \
391 dst->r_stuff[0] = 'S'; \
392 dst->r_stuff[1] = 'C'; \
393 } \
394 while (0)
395#endif
396
397/* Get the value of a symbol, when performing a relocation. */
398
399static long
400get_symbol_value (symbol)
401 asymbol *symbol;
402{
403 bfd_vma relocation;
404
405 if (bfd_is_com_section (symbol->section))
406 relocation = 0;
407 else
408 relocation = (symbol->value +
409 symbol->section->output_section->vma +
410 symbol->section->output_offset);
411
412 return relocation;
413}
414
415#ifdef COFF_WITH_PE
416/* Convert an rtype to howto for the COFF backend linker.
417 Copied from coff-i386. */
418#define coff_rtype_to_howto coff_sh_rtype_to_howto
419static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
420
421static reloc_howto_type *
422coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
423 bfd * abfd ATTRIBUTE_UNUSED;
424 asection * sec;
425 struct internal_reloc * rel;
426 struct coff_link_hash_entry * h;
427 struct internal_syment * sym;
428 bfd_vma * addendp;
429{
430 reloc_howto_type * howto;
431
432 howto = sh_coff_howtos + rel->r_type;
433
434 *addendp = 0;
435
436 if (howto->pc_relative)
437 *addendp += sec->vma;
438
439 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
440 {
441 /* This is a common symbol. The section contents include the
442 size (sym->n_value) as an addend. The relocate_section
443 function will be adding in the final value of the symbol. We
444 need to subtract out the current size in order to get the
445 correct result. */
446 BFD_ASSERT (h != NULL);
447 }
448
449 if (howto->pc_relative)
450 {
451 *addendp -= 4;
452
453 /* If the symbol is defined, then the generic code is going to
454 add back the symbol value in order to cancel out an
455 adjustment it made to the addend. However, we set the addend
456 to 0 at the start of this function. We need to adjust here,
457 to avoid the adjustment the generic code will make. FIXME:
458 This is getting a bit hackish. */
459 if (sym != NULL && sym->n_scnum != 0)
460 *addendp -= sym->n_value;
461 }
462
463 if (rel->r_type == R_SH_IMAGEBASE)
464 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
465
466 return howto;
467}
468
469#endif /* COFF_WITH_PE */
470
471/* This structure is used to map BFD reloc codes to SH PE relocs. */
472struct shcoff_reloc_map
473{
474 bfd_reloc_code_real_type bfd_reloc_val;
475 unsigned char shcoff_reloc_val;
476};
477
478#ifdef COFF_WITH_PE
479/* An array mapping BFD reloc codes to SH PE relocs. */
480static const struct shcoff_reloc_map sh_reloc_map[] =
481{
482 { BFD_RELOC_32, R_SH_IMM32CE },
483 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
484 { BFD_RELOC_CTOR, R_SH_IMM32CE },
485};
486#else
487/* An array mapping BFD reloc codes to SH PE relocs. */
488static const struct shcoff_reloc_map sh_reloc_map[] =
489{
490 { BFD_RELOC_32, R_SH_IMM32 },
491 { BFD_RELOC_CTOR, R_SH_IMM32 },
492};
493#endif
494
495/* Given a BFD reloc code, return the howto structure for the
496 corresponding SH PE reloc. */
497#define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
498#define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
499
500static reloc_howto_type *
501sh_coff_reloc_type_lookup (abfd, code)
502 bfd * abfd ATTRIBUTE_UNUSED;
503 bfd_reloc_code_real_type code;
504{
505 unsigned int i;
506
507 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
508 if (sh_reloc_map[i].bfd_reloc_val == code)
509 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
510
511 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
512 return NULL;
513}
514
515static reloc_howto_type *
516sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
517 const char *r_name)
518{
519 unsigned int i;
520
521 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
522 if (sh_coff_howtos[i].name != NULL
523 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
524 return &sh_coff_howtos[i];
525
526 return NULL;
527}
528
529/* This macro is used in coffcode.h to get the howto corresponding to
530 an internal reloc. */
531
532#define RTYPE2HOWTO(relent, internal) \
533 ((relent)->howto = \
534 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
535 ? &sh_coff_howtos[(internal)->r_type] \
536 : (reloc_howto_type *) NULL))
537
538/* This is the same as the macro in coffcode.h, except that it copies
539 r_offset into reloc_entry->addend for some relocs. */
540#define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
541 { \
542 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
543 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
544 coffsym = (obj_symbols (abfd) \
545 + (cache_ptr->sym_ptr_ptr - symbols)); \
546 else if (ptr) \
547 coffsym = coff_symbol_from (abfd, ptr); \
548 if (coffsym != (coff_symbol_type *) NULL \
549 && coffsym->native->u.syment.n_scnum == 0) \
550 cache_ptr->addend = 0; \
551 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
552 && ptr->section != (asection *) NULL) \
553 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
554 else \
555 cache_ptr->addend = 0; \
556 if ((reloc).r_type == R_SH_SWITCH8 \
557 || (reloc).r_type == R_SH_SWITCH16 \
558 || (reloc).r_type == R_SH_SWITCH32 \
559 || (reloc).r_type == R_SH_USES \
560 || (reloc).r_type == R_SH_COUNT \
561 || (reloc).r_type == R_SH_ALIGN) \
562 cache_ptr->addend = (reloc).r_offset; \
563 }
564
565/* This is the howto function for the SH relocations. */
566
567static bfd_reloc_status_type
568sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
569 error_message)
570 bfd *abfd;
571 arelent *reloc_entry;
572 asymbol *symbol_in;
573 PTR data;
574 asection *input_section;
575 bfd *output_bfd;
576 char **error_message ATTRIBUTE_UNUSED;
577{
578 unsigned long insn;
579 bfd_vma sym_value;
580 unsigned short r_type;
581 bfd_vma addr = reloc_entry->address;
582 bfd_byte *hit_data = addr + (bfd_byte *) data;
583
584 r_type = reloc_entry->howto->type;
585
586 if (output_bfd != NULL)
587 {
588 /* Partial linking--do nothing. */
589 reloc_entry->address += input_section->output_offset;
590 return bfd_reloc_ok;
591 }
592
593 /* Almost all relocs have to do with relaxing. If any work must be
594 done for them, it has been done in sh_relax_section. */
595 if (r_type != R_SH_IMM32
596#ifdef COFF_WITH_PE
597 && r_type != R_SH_IMM32CE
598 && r_type != R_SH_IMAGEBASE
599#endif
600 && (r_type != R_SH_PCDISP
601 || (symbol_in->flags & BSF_LOCAL) != 0))
602 return bfd_reloc_ok;
603
604 if (symbol_in != NULL
605 && bfd_is_und_section (symbol_in->section))
606 return bfd_reloc_undefined;
607
608 sym_value = get_symbol_value (symbol_in);
609
610 switch (r_type)
611 {
612 case R_SH_IMM32:
613#ifdef COFF_WITH_PE
614 case R_SH_IMM32CE:
615#endif
616 insn = bfd_get_32 (abfd, hit_data);
617 insn += sym_value + reloc_entry->addend;
618 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
619 break;
620#ifdef COFF_WITH_PE
621 case R_SH_IMAGEBASE:
622 insn = bfd_get_32 (abfd, hit_data);
623 insn += sym_value + reloc_entry->addend;
624 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
625 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
626 break;
627#endif
628 case R_SH_PCDISP:
629 insn = bfd_get_16 (abfd, hit_data);
630 sym_value += reloc_entry->addend;
631 sym_value -= (input_section->output_section->vma
632 + input_section->output_offset
633 + addr
634 + 4);
635 sym_value += (insn & 0xfff) << 1;
636 if (insn & 0x800)
637 sym_value -= 0x1000;
638 insn = (insn & 0xf000) | (sym_value & 0xfff);
639 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
640 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
641 return bfd_reloc_overflow;
642 break;
643 default:
644 abort ();
645 break;
646 }
647
648 return bfd_reloc_ok;
649}
650
651#define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
652
653/* We can do relaxing. */
654#define coff_bfd_relax_section sh_relax_section
655
656/* We use the special COFF backend linker. */
657#define coff_relocate_section sh_relocate_section
658
659/* When relaxing, we need to use special code to get the relocated
660 section contents. */
661#define coff_bfd_get_relocated_section_contents \
662 sh_coff_get_relocated_section_contents
663
664#include "coffcode.h"
665\f
666/* This function handles relaxing on the SH.
667
668 Function calls on the SH look like this:
669
670 movl L1,r0
671 ...
672 jsr @r0
673 ...
674 L1:
675 .long function
676
677 The compiler and assembler will cooperate to create R_SH_USES
678 relocs on the jsr instructions. The r_offset field of the
679 R_SH_USES reloc is the PC relative offset to the instruction which
680 loads the register (the r_offset field is computed as though it
681 were a jump instruction, so the offset value is actually from four
682 bytes past the instruction). The linker can use this reloc to
683 determine just which function is being called, and thus decide
684 whether it is possible to replace the jsr with a bsr.
685
686 If multiple function calls are all based on a single register load
687 (i.e., the same function is called multiple times), the compiler
688 guarantees that each function call will have an R_SH_USES reloc.
689 Therefore, if the linker is able to convert each R_SH_USES reloc
690 which refers to that address, it can safely eliminate the register
691 load.
692
693 When the assembler creates an R_SH_USES reloc, it examines it to
694 determine which address is being loaded (L1 in the above example).
695 It then counts the number of references to that address, and
696 creates an R_SH_COUNT reloc at that address. The r_offset field of
697 the R_SH_COUNT reloc will be the number of references. If the
698 linker is able to eliminate a register load, it can use the
699 R_SH_COUNT reloc to see whether it can also eliminate the function
700 address.
701
702 SH relaxing also handles another, unrelated, matter. On the SH, if
703 a load or store instruction is not aligned on a four byte boundary,
704 the memory cycle interferes with the 32 bit instruction fetch,
705 causing a one cycle bubble in the pipeline. Therefore, we try to
706 align load and store instructions on four byte boundaries if we
707 can, by swapping them with one of the adjacent instructions. */
708
709static bfd_boolean
710sh_relax_section (abfd, sec, link_info, again)
711 bfd *abfd;
712 asection *sec;
713 struct bfd_link_info *link_info;
714 bfd_boolean *again;
715{
716 struct internal_reloc *internal_relocs;
717 bfd_boolean have_code;
718 struct internal_reloc *irel, *irelend;
719 bfd_byte *contents = NULL;
720
721 *again = FALSE;
722
723 if (link_info->relocatable
724 || (sec->flags & SEC_RELOC) == 0
725 || sec->reloc_count == 0)
726 return TRUE;
727
728 if (coff_section_data (abfd, sec) == NULL)
729 {
730 bfd_size_type amt = sizeof (struct coff_section_tdata);
731 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
732 if (sec->used_by_bfd == NULL)
733 return FALSE;
734 }
735
736 internal_relocs = (_bfd_coff_read_internal_relocs
737 (abfd, sec, link_info->keep_memory,
738 (bfd_byte *) NULL, FALSE,
739 (struct internal_reloc *) NULL));
740 if (internal_relocs == NULL)
741 goto error_return;
742
743 have_code = FALSE;
744
745 irelend = internal_relocs + sec->reloc_count;
746 for (irel = internal_relocs; irel < irelend; irel++)
747 {
748 bfd_vma laddr, paddr, symval;
749 unsigned short insn;
750 struct internal_reloc *irelfn, *irelscan, *irelcount;
751 struct internal_syment sym;
752 bfd_signed_vma foff;
753
754 if (irel->r_type == R_SH_CODE)
755 have_code = TRUE;
756
757 if (irel->r_type != R_SH_USES)
758 continue;
759
760 /* Get the section contents. */
761 if (contents == NULL)
762 {
763 if (coff_section_data (abfd, sec)->contents != NULL)
764 contents = coff_section_data (abfd, sec)->contents;
765 else
766 {
767 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
768 goto error_return;
769 }
770 }
771
772 /* The r_offset field of the R_SH_USES reloc will point us to
773 the register load. The 4 is because the r_offset field is
774 computed as though it were a jump offset, which are based
775 from 4 bytes after the jump instruction. */
776 laddr = irel->r_vaddr - sec->vma + 4;
777 /* Careful to sign extend the 32-bit offset. */
778 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
779 if (laddr >= sec->size)
780 {
781 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
782 abfd, (unsigned long) irel->r_vaddr);
783 continue;
784 }
785 insn = bfd_get_16 (abfd, contents + laddr);
786
787 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
788 if ((insn & 0xf000) != 0xd000)
789 {
790 ((*_bfd_error_handler)
791 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
792 abfd, (unsigned long) irel->r_vaddr, insn));
793 continue;
794 }
795
796 /* Get the address from which the register is being loaded. The
797 displacement in the mov.l instruction is quadrupled. It is a
798 displacement from four bytes after the movl instruction, but,
799 before adding in the PC address, two least significant bits
800 of the PC are cleared. We assume that the section is aligned
801 on a four byte boundary. */
802 paddr = insn & 0xff;
803 paddr *= 4;
804 paddr += (laddr + 4) &~ (bfd_vma) 3;
805 if (paddr >= sec->size)
806 {
807 ((*_bfd_error_handler)
808 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
809 abfd, (unsigned long) irel->r_vaddr));
810 continue;
811 }
812
813 /* Get the reloc for the address from which the register is
814 being loaded. This reloc will tell us which function is
815 actually being called. */
816 paddr += sec->vma;
817 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
818 if (irelfn->r_vaddr == paddr
819#ifdef COFF_WITH_PE
820 && (irelfn->r_type == R_SH_IMM32
821 || irelfn->r_type == R_SH_IMM32CE
822 || irelfn->r_type == R_SH_IMAGEBASE)
823
824#else
825 && irelfn->r_type == R_SH_IMM32
826#endif
827 )
828 break;
829 if (irelfn >= irelend)
830 {
831 ((*_bfd_error_handler)
832 ("%B: 0x%lx: warning: could not find expected reloc",
833 abfd, (unsigned long) paddr));
834 continue;
835 }
836
837 /* Get the value of the symbol referred to by the reloc. */
838 if (! _bfd_coff_get_external_symbols (abfd))
839 goto error_return;
840 bfd_coff_swap_sym_in (abfd,
841 ((bfd_byte *) obj_coff_external_syms (abfd)
842 + (irelfn->r_symndx
843 * bfd_coff_symesz (abfd))),
844 &sym);
845 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
846 {
847 ((*_bfd_error_handler)
848 ("%B: 0x%lx: warning: symbol in unexpected section",
849 abfd, (unsigned long) paddr));
850 continue;
851 }
852
853 if (sym.n_sclass != C_EXT)
854 {
855 symval = (sym.n_value
856 - sec->vma
857 + sec->output_section->vma
858 + sec->output_offset);
859 }
860 else
861 {
862 struct coff_link_hash_entry *h;
863
864 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
865 BFD_ASSERT (h != NULL);
866 if (h->root.type != bfd_link_hash_defined
867 && h->root.type != bfd_link_hash_defweak)
868 {
869 /* This appears to be a reference to an undefined
870 symbol. Just ignore it--it will be caught by the
871 regular reloc processing. */
872 continue;
873 }
874
875 symval = (h->root.u.def.value
876 + h->root.u.def.section->output_section->vma
877 + h->root.u.def.section->output_offset);
878 }
879
880 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
881
882 /* See if this function call can be shortened. */
883 foff = (symval
884 - (irel->r_vaddr
885 - sec->vma
886 + sec->output_section->vma
887 + sec->output_offset
888 + 4));
889 if (foff < -0x1000 || foff >= 0x1000)
890 {
891 /* After all that work, we can't shorten this function call. */
892 continue;
893 }
894
895 /* Shorten the function call. */
896
897 /* For simplicity of coding, we are going to modify the section
898 contents, the section relocs, and the BFD symbol table. We
899 must tell the rest of the code not to free up this
900 information. It would be possible to instead create a table
901 of changes which have to be made, as is done in coff-mips.c;
902 that would be more work, but would require less memory when
903 the linker is run. */
904
905 coff_section_data (abfd, sec)->relocs = internal_relocs;
906 coff_section_data (abfd, sec)->keep_relocs = TRUE;
907
908 coff_section_data (abfd, sec)->contents = contents;
909 coff_section_data (abfd, sec)->keep_contents = TRUE;
910
911 obj_coff_keep_syms (abfd) = TRUE;
912
913 /* Replace the jsr with a bsr. */
914
915 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
916 replace the jsr with a bsr. */
917 irel->r_type = R_SH_PCDISP;
918 irel->r_symndx = irelfn->r_symndx;
919 if (sym.n_sclass != C_EXT)
920 {
921 /* If this needs to be changed because of future relaxing,
922 it will be handled here like other internal PCDISP
923 relocs. */
924 bfd_put_16 (abfd,
925 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
926 contents + irel->r_vaddr - sec->vma);
927 }
928 else
929 {
930 /* We can't fully resolve this yet, because the external
931 symbol value may be changed by future relaxing. We let
932 the final link phase handle it. */
933 bfd_put_16 (abfd, (bfd_vma) 0xb000,
934 contents + irel->r_vaddr - sec->vma);
935 }
936
937 /* See if there is another R_SH_USES reloc referring to the same
938 register load. */
939 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
940 if (irelscan->r_type == R_SH_USES
941 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
942 break;
943 if (irelscan < irelend)
944 {
945 /* Some other function call depends upon this register load,
946 and we have not yet converted that function call.
947 Indeed, we may never be able to convert it. There is
948 nothing else we can do at this point. */
949 continue;
950 }
951
952 /* Look for a R_SH_COUNT reloc on the location where the
953 function address is stored. Do this before deleting any
954 bytes, to avoid confusion about the address. */
955 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
956 if (irelcount->r_vaddr == paddr
957 && irelcount->r_type == R_SH_COUNT)
958 break;
959
960 /* Delete the register load. */
961 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
962 goto error_return;
963
964 /* That will change things, so, just in case it permits some
965 other function call to come within range, we should relax
966 again. Note that this is not required, and it may be slow. */
967 *again = TRUE;
968
969 /* Now check whether we got a COUNT reloc. */
970 if (irelcount >= irelend)
971 {
972 ((*_bfd_error_handler)
973 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
974 abfd, (unsigned long) paddr));
975 continue;
976 }
977
978 /* The number of uses is stored in the r_offset field. We've
979 just deleted one. */
980 if (irelcount->r_offset == 0)
981 {
982 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
983 abfd, (unsigned long) paddr));
984 continue;
985 }
986
987 --irelcount->r_offset;
988
989 /* If there are no more uses, we can delete the address. Reload
990 the address from irelfn, in case it was changed by the
991 previous call to sh_relax_delete_bytes. */
992 if (irelcount->r_offset == 0)
993 {
994 if (! sh_relax_delete_bytes (abfd, sec,
995 irelfn->r_vaddr - sec->vma, 4))
996 goto error_return;
997 }
998
999 /* We've done all we can with that function call. */
1000 }
1001
1002 /* Look for load and store instructions that we can align on four
1003 byte boundaries. */
1004 if (have_code)
1005 {
1006 bfd_boolean swapped;
1007
1008 /* Get the section contents. */
1009 if (contents == NULL)
1010 {
1011 if (coff_section_data (abfd, sec)->contents != NULL)
1012 contents = coff_section_data (abfd, sec)->contents;
1013 else
1014 {
1015 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1016 goto error_return;
1017 }
1018 }
1019
1020 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1021 goto error_return;
1022
1023 if (swapped)
1024 {
1025 coff_section_data (abfd, sec)->relocs = internal_relocs;
1026 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1027
1028 coff_section_data (abfd, sec)->contents = contents;
1029 coff_section_data (abfd, sec)->keep_contents = TRUE;
1030
1031 obj_coff_keep_syms (abfd) = TRUE;
1032 }
1033 }
1034
1035 if (internal_relocs != NULL
1036 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1037 {
1038 if (! link_info->keep_memory)
1039 free (internal_relocs);
1040 else
1041 coff_section_data (abfd, sec)->relocs = internal_relocs;
1042 }
1043
1044 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1045 {
1046 if (! link_info->keep_memory)
1047 free (contents);
1048 else
1049 /* Cache the section contents for coff_link_input_bfd. */
1050 coff_section_data (abfd, sec)->contents = contents;
1051 }
1052
1053 return TRUE;
1054
1055 error_return:
1056 if (internal_relocs != NULL
1057 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1058 free (internal_relocs);
1059 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1060 free (contents);
1061 return FALSE;
1062}
1063
1064/* Delete some bytes from a section while relaxing. */
1065
1066static bfd_boolean
1067sh_relax_delete_bytes (abfd, sec, addr, count)
1068 bfd *abfd;
1069 asection *sec;
1070 bfd_vma addr;
1071 int count;
1072{
1073 bfd_byte *contents;
1074 struct internal_reloc *irel, *irelend;
1075 struct internal_reloc *irelalign;
1076 bfd_vma toaddr;
1077 bfd_byte *esym, *esymend;
1078 bfd_size_type symesz;
1079 struct coff_link_hash_entry **sym_hash;
1080 asection *o;
1081
1082 contents = coff_section_data (abfd, sec)->contents;
1083
1084 /* The deletion must stop at the next ALIGN reloc for an aligment
1085 power larger than the number of bytes we are deleting. */
1086
1087 irelalign = NULL;
1088 toaddr = sec->size;
1089
1090 irel = coff_section_data (abfd, sec)->relocs;
1091 irelend = irel + sec->reloc_count;
1092 for (; irel < irelend; irel++)
1093 {
1094 if (irel->r_type == R_SH_ALIGN
1095 && irel->r_vaddr - sec->vma > addr
1096 && count < (1 << irel->r_offset))
1097 {
1098 irelalign = irel;
1099 toaddr = irel->r_vaddr - sec->vma;
1100 break;
1101 }
1102 }
1103
1104 /* Actually delete the bytes. */
1105 memmove (contents + addr, contents + addr + count,
1106 (size_t) (toaddr - addr - count));
1107 if (irelalign == NULL)
1108 sec->size -= count;
1109 else
1110 {
1111 int i;
1112
1113#define NOP_OPCODE (0x0009)
1114
1115 BFD_ASSERT ((count & 1) == 0);
1116 for (i = 0; i < count; i += 2)
1117 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1118 }
1119
1120 /* Adjust all the relocs. */
1121 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1122 {
1123 bfd_vma nraddr, stop;
1124 bfd_vma start = 0;
1125 int insn = 0;
1126 struct internal_syment sym;
1127 int off, adjust, oinsn;
1128 bfd_signed_vma voff = 0;
1129 bfd_boolean overflow;
1130
1131 /* Get the new reloc address. */
1132 nraddr = irel->r_vaddr - sec->vma;
1133 if ((irel->r_vaddr - sec->vma > addr
1134 && irel->r_vaddr - sec->vma < toaddr)
1135 || (irel->r_type == R_SH_ALIGN
1136 && irel->r_vaddr - sec->vma == toaddr))
1137 nraddr -= count;
1138
1139 /* See if this reloc was for the bytes we have deleted, in which
1140 case we no longer care about it. Don't delete relocs which
1141 represent addresses, though. */
1142 if (irel->r_vaddr - sec->vma >= addr
1143 && irel->r_vaddr - sec->vma < addr + count
1144 && irel->r_type != R_SH_ALIGN
1145 && irel->r_type != R_SH_CODE
1146 && irel->r_type != R_SH_DATA
1147 && irel->r_type != R_SH_LABEL)
1148 irel->r_type = R_SH_UNUSED;
1149
1150 /* If this is a PC relative reloc, see if the range it covers
1151 includes the bytes we have deleted. */
1152 switch (irel->r_type)
1153 {
1154 default:
1155 break;
1156
1157 case R_SH_PCDISP8BY2:
1158 case R_SH_PCDISP:
1159 case R_SH_PCRELIMM8BY2:
1160 case R_SH_PCRELIMM8BY4:
1161 start = irel->r_vaddr - sec->vma;
1162 insn = bfd_get_16 (abfd, contents + nraddr);
1163 break;
1164 }
1165
1166 switch (irel->r_type)
1167 {
1168 default:
1169 start = stop = addr;
1170 break;
1171
1172 case R_SH_IMM32:
1173#ifdef COFF_WITH_PE
1174 case R_SH_IMM32CE:
1175 case R_SH_IMAGEBASE:
1176#endif
1177 /* If this reloc is against a symbol defined in this
1178 section, and the symbol will not be adjusted below, we
1179 must check the addend to see it will put the value in
1180 range to be adjusted, and hence must be changed. */
1181 bfd_coff_swap_sym_in (abfd,
1182 ((bfd_byte *) obj_coff_external_syms (abfd)
1183 + (irel->r_symndx
1184 * bfd_coff_symesz (abfd))),
1185 &sym);
1186 if (sym.n_sclass != C_EXT
1187 && sym.n_scnum == sec->target_index
1188 && ((bfd_vma) sym.n_value <= addr
1189 || (bfd_vma) sym.n_value >= toaddr))
1190 {
1191 bfd_vma val;
1192
1193 val = bfd_get_32 (abfd, contents + nraddr);
1194 val += sym.n_value;
1195 if (val > addr && val < toaddr)
1196 bfd_put_32 (abfd, val - count, contents + nraddr);
1197 }
1198 start = stop = addr;
1199 break;
1200
1201 case R_SH_PCDISP8BY2:
1202 off = insn & 0xff;
1203 if (off & 0x80)
1204 off -= 0x100;
1205 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1206 break;
1207
1208 case R_SH_PCDISP:
1209 bfd_coff_swap_sym_in (abfd,
1210 ((bfd_byte *) obj_coff_external_syms (abfd)
1211 + (irel->r_symndx
1212 * bfd_coff_symesz (abfd))),
1213 &sym);
1214 if (sym.n_sclass == C_EXT)
1215 start = stop = addr;
1216 else
1217 {
1218 off = insn & 0xfff;
1219 if (off & 0x800)
1220 off -= 0x1000;
1221 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1222 }
1223 break;
1224
1225 case R_SH_PCRELIMM8BY2:
1226 off = insn & 0xff;
1227 stop = start + 4 + off * 2;
1228 break;
1229
1230 case R_SH_PCRELIMM8BY4:
1231 off = insn & 0xff;
1232 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1233 break;
1234
1235 case R_SH_SWITCH8:
1236 case R_SH_SWITCH16:
1237 case R_SH_SWITCH32:
1238 /* These relocs types represent
1239 .word L2-L1
1240 The r_offset field holds the difference between the reloc
1241 address and L1. That is the start of the reloc, and
1242 adding in the contents gives us the top. We must adjust
1243 both the r_offset field and the section contents. */
1244
1245 start = irel->r_vaddr - sec->vma;
1246 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1247
1248 if (start > addr
1249 && start < toaddr
1250 && (stop <= addr || stop >= toaddr))
1251 irel->r_offset += count;
1252 else if (stop > addr
1253 && stop < toaddr
1254 && (start <= addr || start >= toaddr))
1255 irel->r_offset -= count;
1256
1257 start = stop;
1258
1259 if (irel->r_type == R_SH_SWITCH16)
1260 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1261 else if (irel->r_type == R_SH_SWITCH8)
1262 voff = bfd_get_8 (abfd, contents + nraddr);
1263 else
1264 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1265 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1266
1267 break;
1268
1269 case R_SH_USES:
1270 start = irel->r_vaddr - sec->vma;
1271 stop = (bfd_vma) ((bfd_signed_vma) start
1272 + (long) irel->r_offset
1273 + 4);
1274 break;
1275 }
1276
1277 if (start > addr
1278 && start < toaddr
1279 && (stop <= addr || stop >= toaddr))
1280 adjust = count;
1281 else if (stop > addr
1282 && stop < toaddr
1283 && (start <= addr || start >= toaddr))
1284 adjust = - count;
1285 else
1286 adjust = 0;
1287
1288 if (adjust != 0)
1289 {
1290 oinsn = insn;
1291 overflow = FALSE;
1292 switch (irel->r_type)
1293 {
1294 default:
1295 abort ();
1296 break;
1297
1298 case R_SH_PCDISP8BY2:
1299 case R_SH_PCRELIMM8BY2:
1300 insn += adjust / 2;
1301 if ((oinsn & 0xff00) != (insn & 0xff00))
1302 overflow = TRUE;
1303 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1304 break;
1305
1306 case R_SH_PCDISP:
1307 insn += adjust / 2;
1308 if ((oinsn & 0xf000) != (insn & 0xf000))
1309 overflow = TRUE;
1310 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1311 break;
1312
1313 case R_SH_PCRELIMM8BY4:
1314 BFD_ASSERT (adjust == count || count >= 4);
1315 if (count >= 4)
1316 insn += adjust / 4;
1317 else
1318 {
1319 if ((irel->r_vaddr & 3) == 0)
1320 ++insn;
1321 }
1322 if ((oinsn & 0xff00) != (insn & 0xff00))
1323 overflow = TRUE;
1324 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1325 break;
1326
1327 case R_SH_SWITCH8:
1328 voff += adjust;
1329 if (voff < 0 || voff >= 0xff)
1330 overflow = TRUE;
1331 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1332 break;
1333
1334 case R_SH_SWITCH16:
1335 voff += adjust;
1336 if (voff < - 0x8000 || voff >= 0x8000)
1337 overflow = TRUE;
1338 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1339 break;
1340
1341 case R_SH_SWITCH32:
1342 voff += adjust;
1343 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1344 break;
1345
1346 case R_SH_USES:
1347 irel->r_offset += adjust;
1348 break;
1349 }
1350
1351 if (overflow)
1352 {
1353 ((*_bfd_error_handler)
1354 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1355 abfd, (unsigned long) irel->r_vaddr));
1356 bfd_set_error (bfd_error_bad_value);
1357 return FALSE;
1358 }
1359 }
1360
1361 irel->r_vaddr = nraddr + sec->vma;
1362 }
1363
1364 /* Look through all the other sections. If there contain any IMM32
1365 relocs against internal symbols which we are not going to adjust
1366 below, we may need to adjust the addends. */
1367 for (o = abfd->sections; o != NULL; o = o->next)
1368 {
1369 struct internal_reloc *internal_relocs;
1370 struct internal_reloc *irelscan, *irelscanend;
1371 bfd_byte *ocontents;
1372
1373 if (o == sec
1374 || (o->flags & SEC_RELOC) == 0
1375 || o->reloc_count == 0)
1376 continue;
1377
1378 /* We always cache the relocs. Perhaps, if info->keep_memory is
1379 FALSE, we should free them, if we are permitted to, when we
1380 leave sh_coff_relax_section. */
1381 internal_relocs = (_bfd_coff_read_internal_relocs
1382 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1383 (struct internal_reloc *) NULL));
1384 if (internal_relocs == NULL)
1385 return FALSE;
1386
1387 ocontents = NULL;
1388 irelscanend = internal_relocs + o->reloc_count;
1389 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1390 {
1391 struct internal_syment sym;
1392
1393#ifdef COFF_WITH_PE
1394 if (irelscan->r_type != R_SH_IMM32
1395 && irelscan->r_type != R_SH_IMAGEBASE
1396 && irelscan->r_type != R_SH_IMM32CE)
1397#else
1398 if (irelscan->r_type != R_SH_IMM32)
1399#endif
1400 continue;
1401
1402 bfd_coff_swap_sym_in (abfd,
1403 ((bfd_byte *) obj_coff_external_syms (abfd)
1404 + (irelscan->r_symndx
1405 * bfd_coff_symesz (abfd))),
1406 &sym);
1407 if (sym.n_sclass != C_EXT
1408 && sym.n_scnum == sec->target_index
1409 && ((bfd_vma) sym.n_value <= addr
1410 || (bfd_vma) sym.n_value >= toaddr))
1411 {
1412 bfd_vma val;
1413
1414 if (ocontents == NULL)
1415 {
1416 if (coff_section_data (abfd, o)->contents != NULL)
1417 ocontents = coff_section_data (abfd, o)->contents;
1418 else
1419 {
1420 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1421 return FALSE;
1422 /* We always cache the section contents.
1423 Perhaps, if info->keep_memory is FALSE, we
1424 should free them, if we are permitted to,
1425 when we leave sh_coff_relax_section. */
1426 coff_section_data (abfd, o)->contents = ocontents;
1427 }
1428 }
1429
1430 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1431 val += sym.n_value;
1432 if (val > addr && val < toaddr)
1433 bfd_put_32 (abfd, val - count,
1434 ocontents + irelscan->r_vaddr - o->vma);
1435
1436 coff_section_data (abfd, o)->keep_contents = TRUE;
1437 }
1438 }
1439 }
1440
1441 /* Adjusting the internal symbols will not work if something has
1442 already retrieved the generic symbols. It would be possible to
1443 make this work by adjusting the generic symbols at the same time.
1444 However, this case should not arise in normal usage. */
1445 if (obj_symbols (abfd) != NULL
1446 || obj_raw_syments (abfd) != NULL)
1447 {
1448 ((*_bfd_error_handler)
1449 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1450 bfd_set_error (bfd_error_invalid_operation);
1451 return FALSE;
1452 }
1453
1454 /* Adjust all the symbols. */
1455 sym_hash = obj_coff_sym_hashes (abfd);
1456 symesz = bfd_coff_symesz (abfd);
1457 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1458 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1459 while (esym < esymend)
1460 {
1461 struct internal_syment isym;
1462
1463 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1464
1465 if (isym.n_scnum == sec->target_index
1466 && (bfd_vma) isym.n_value > addr
1467 && (bfd_vma) isym.n_value < toaddr)
1468 {
1469 isym.n_value -= count;
1470
1471 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1472
1473 if (*sym_hash != NULL)
1474 {
1475 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1476 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1477 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1478 && (*sym_hash)->root.u.def.value < toaddr);
1479 (*sym_hash)->root.u.def.value -= count;
1480 }
1481 }
1482
1483 esym += (isym.n_numaux + 1) * symesz;
1484 sym_hash += isym.n_numaux + 1;
1485 }
1486
1487 /* See if we can move the ALIGN reloc forward. We have adjusted
1488 r_vaddr for it already. */
1489 if (irelalign != NULL)
1490 {
1491 bfd_vma alignto, alignaddr;
1492
1493 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1494 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1495 1 << irelalign->r_offset);
1496 if (alignto != alignaddr)
1497 {
1498 /* Tail recursion. */
1499 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1500 (int) (alignto - alignaddr));
1501 }
1502 }
1503
1504 return TRUE;
1505}
1506\f
1507/* This is yet another version of the SH opcode table, used to rapidly
1508 get information about a particular instruction. */
1509
1510/* The opcode map is represented by an array of these structures. The
1511 array is indexed by the high order four bits in the instruction. */
1512
1513struct sh_major_opcode
1514{
1515 /* A pointer to the instruction list. This is an array which
1516 contains all the instructions with this major opcode. */
1517 const struct sh_minor_opcode *minor_opcodes;
1518 /* The number of elements in minor_opcodes. */
1519 unsigned short count;
1520};
1521
1522/* This structure holds information for a set of SH opcodes. The
1523 instruction code is anded with the mask value, and the resulting
1524 value is used to search the order opcode list. */
1525
1526struct sh_minor_opcode
1527{
1528 /* The sorted opcode list. */
1529 const struct sh_opcode *opcodes;
1530 /* The number of elements in opcodes. */
1531 unsigned short count;
1532 /* The mask value to use when searching the opcode list. */
1533 unsigned short mask;
1534};
1535
1536/* This structure holds information for an SH instruction. An array
1537 of these structures is sorted in order by opcode. */
1538
1539struct sh_opcode
1540{
1541 /* The code for this instruction, after it has been anded with the
1542 mask value in the sh_major_opcode structure. */
1543 unsigned short opcode;
1544 /* Flags for this instruction. */
1545 unsigned long flags;
1546};
1547
1548/* Flag which appear in the sh_opcode structure. */
1549
1550/* This instruction loads a value from memory. */
1551#define LOAD (0x1)
1552
1553/* This instruction stores a value to memory. */
1554#define STORE (0x2)
1555
1556/* This instruction is a branch. */
1557#define BRANCH (0x4)
1558
1559/* This instruction has a delay slot. */
1560#define DELAY (0x8)
1561
1562/* This instruction uses the value in the register in the field at
1563 mask 0x0f00 of the instruction. */
1564#define USES1 (0x10)
1565#define USES1_REG(x) ((x & 0x0f00) >> 8)
1566
1567/* This instruction uses the value in the register in the field at
1568 mask 0x00f0 of the instruction. */
1569#define USES2 (0x20)
1570#define USES2_REG(x) ((x & 0x00f0) >> 4)
1571
1572/* This instruction uses the value in register 0. */
1573#define USESR0 (0x40)
1574
1575/* This instruction sets the value in the register in the field at
1576 mask 0x0f00 of the instruction. */
1577#define SETS1 (0x80)
1578#define SETS1_REG(x) ((x & 0x0f00) >> 8)
1579
1580/* This instruction sets the value in the register in the field at
1581 mask 0x00f0 of the instruction. */
1582#define SETS2 (0x100)
1583#define SETS2_REG(x) ((x & 0x00f0) >> 4)
1584
1585/* This instruction sets register 0. */
1586#define SETSR0 (0x200)
1587
1588/* This instruction sets a special register. */
1589#define SETSSP (0x400)
1590
1591/* This instruction uses a special register. */
1592#define USESSP (0x800)
1593
1594/* This instruction uses the floating point register in the field at
1595 mask 0x0f00 of the instruction. */
1596#define USESF1 (0x1000)
1597#define USESF1_REG(x) ((x & 0x0f00) >> 8)
1598
1599/* This instruction uses the floating point register in the field at
1600 mask 0x00f0 of the instruction. */
1601#define USESF2 (0x2000)
1602#define USESF2_REG(x) ((x & 0x00f0) >> 4)
1603
1604/* This instruction uses floating point register 0. */
1605#define USESF0 (0x4000)
1606
1607/* This instruction sets the floating point register in the field at
1608 mask 0x0f00 of the instruction. */
1609#define SETSF1 (0x8000)
1610#define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1611
1612#define USESAS (0x10000)
1613#define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1614#define USESR8 (0x20000)
1615#define SETSAS (0x40000)
1616#define SETSAS_REG(x) USESAS_REG (x)
1617
1618#define MAP(a) a, sizeof a / sizeof a[0]
1619
1620#ifndef COFF_IMAGE_WITH_PE
1621static bfd_boolean sh_insn_uses_reg
1622 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1623static bfd_boolean sh_insn_sets_reg
1624 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1625static bfd_boolean sh_insn_uses_or_sets_reg
1626 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1627static bfd_boolean sh_insn_uses_freg
1628 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1629static bfd_boolean sh_insn_sets_freg
1630 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1631static bfd_boolean sh_insn_uses_or_sets_freg
1632 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1633static bfd_boolean sh_insns_conflict
1634 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1635 const struct sh_opcode *));
1636static bfd_boolean sh_load_use
1637 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1638 const struct sh_opcode *));
1639
1640/* The opcode maps. */
1641
1642static const struct sh_opcode sh_opcode00[] =
1643{
1644 { 0x0008, SETSSP }, /* clrt */
1645 { 0x0009, 0 }, /* nop */
1646 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1647 { 0x0018, SETSSP }, /* sett */
1648 { 0x0019, SETSSP }, /* div0u */
1649 { 0x001b, 0 }, /* sleep */
1650 { 0x0028, SETSSP }, /* clrmac */
1651 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1652 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1653 { 0x0048, SETSSP }, /* clrs */
1654 { 0x0058, SETSSP } /* sets */
1655};
1656
1657static const struct sh_opcode sh_opcode01[] =
1658{
1659 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1660 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1661 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1662 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1663 { 0x0029, SETS1 | USESSP }, /* movt rn */
1664 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1665 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1666 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1667 { 0x0083, LOAD | USES1 }, /* pref @rn */
1668 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1669 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1670 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1671 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1672 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1673};
1674
1675static const struct sh_opcode sh_opcode02[] =
1676{
1677 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1678 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1679 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1680 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1681 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1682 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1683 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1684 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1685 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1686};
1687
1688static const struct sh_minor_opcode sh_opcode0[] =
1689{
1690 { MAP (sh_opcode00), 0xffff },
1691 { MAP (sh_opcode01), 0xf0ff },
1692 { MAP (sh_opcode02), 0xf00f }
1693};
1694
1695static const struct sh_opcode sh_opcode10[] =
1696{
1697 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1698};
1699
1700static const struct sh_minor_opcode sh_opcode1[] =
1701{
1702 { MAP (sh_opcode10), 0xf000 }
1703};
1704
1705static const struct sh_opcode sh_opcode20[] =
1706{
1707 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1708 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1709 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1710 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1711 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1712 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1713 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1714 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1715 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1716 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1717 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1718 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1719 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1720 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1721 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1722};
1723
1724static const struct sh_minor_opcode sh_opcode2[] =
1725{
1726 { MAP (sh_opcode20), 0xf00f }
1727};
1728
1729static const struct sh_opcode sh_opcode30[] =
1730{
1731 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1732 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1733 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1734 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1735 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1736 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1737 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1738 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1739 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1740 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1741 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1742 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1743 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1744 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1745};
1746
1747static const struct sh_minor_opcode sh_opcode3[] =
1748{
1749 { MAP (sh_opcode30), 0xf00f }
1750};
1751
1752static const struct sh_opcode sh_opcode40[] =
1753{
1754 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1755 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1756 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1757 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1758 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1759 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1760 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1761 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1762 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1763 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1764 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1765 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1766 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1767 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1768 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1769 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1770 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1771 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1772 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1773 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1774 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1775 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1776 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1777 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1778 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1779 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1780 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1781 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1782 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1783 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1784 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1785 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1786 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1787 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1788 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1789 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1790 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1791 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1792 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1793 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1794 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1795 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1796 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1797 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1798 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1799 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1800 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1801 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1802 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1803 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1804 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1805};
1806
1807static const struct sh_opcode sh_opcode41[] =
1808{
1809 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1810 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1811 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1812 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1813 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1814 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1815};
1816
1817static const struct sh_minor_opcode sh_opcode4[] =
1818{
1819 { MAP (sh_opcode40), 0xf0ff },
1820 { MAP (sh_opcode41), 0xf00f }
1821};
1822
1823static const struct sh_opcode sh_opcode50[] =
1824{
1825 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1826};
1827
1828static const struct sh_minor_opcode sh_opcode5[] =
1829{
1830 { MAP (sh_opcode50), 0xf000 }
1831};
1832
1833static const struct sh_opcode sh_opcode60[] =
1834{
1835 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1836 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1837 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1838 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1839 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1840 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1841 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1842 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1843 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1844 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1845 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1846 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1847 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1848 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1849 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1850 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1851};
1852
1853static const struct sh_minor_opcode sh_opcode6[] =
1854{
1855 { MAP (sh_opcode60), 0xf00f }
1856};
1857
1858static const struct sh_opcode sh_opcode70[] =
1859{
1860 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1861};
1862
1863static const struct sh_minor_opcode sh_opcode7[] =
1864{
1865 { MAP (sh_opcode70), 0xf000 }
1866};
1867
1868static const struct sh_opcode sh_opcode80[] =
1869{
1870 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1871 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1872 { 0x8200, SETSSP }, /* setrc #imm */
1873 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1874 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1875 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1876 { 0x8900, BRANCH | USESSP }, /* bt label */
1877 { 0x8b00, BRANCH | USESSP }, /* bf label */
1878 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1879 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1880 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1881 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1882};
1883
1884static const struct sh_minor_opcode sh_opcode8[] =
1885{
1886 { MAP (sh_opcode80), 0xff00 }
1887};
1888
1889static const struct sh_opcode sh_opcode90[] =
1890{
1891 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1892};
1893
1894static const struct sh_minor_opcode sh_opcode9[] =
1895{
1896 { MAP (sh_opcode90), 0xf000 }
1897};
1898
1899static const struct sh_opcode sh_opcodea0[] =
1900{
1901 { 0xa000, BRANCH | DELAY } /* bra label */
1902};
1903
1904static const struct sh_minor_opcode sh_opcodea[] =
1905{
1906 { MAP (sh_opcodea0), 0xf000 }
1907};
1908
1909static const struct sh_opcode sh_opcodeb0[] =
1910{
1911 { 0xb000, BRANCH | DELAY } /* bsr label */
1912};
1913
1914static const struct sh_minor_opcode sh_opcodeb[] =
1915{
1916 { MAP (sh_opcodeb0), 0xf000 }
1917};
1918
1919static const struct sh_opcode sh_opcodec0[] =
1920{
1921 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1922 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1923 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1924 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1925 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1926 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1927 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1928 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1929 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1930 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1931 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1932 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1933 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1934 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1935 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1936 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1937};
1938
1939static const struct sh_minor_opcode sh_opcodec[] =
1940{
1941 { MAP (sh_opcodec0), 0xff00 }
1942};
1943
1944static const struct sh_opcode sh_opcoded0[] =
1945{
1946 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1947};
1948
1949static const struct sh_minor_opcode sh_opcoded[] =
1950{
1951 { MAP (sh_opcoded0), 0xf000 }
1952};
1953
1954static const struct sh_opcode sh_opcodee0[] =
1955{
1956 { 0xe000, SETS1 } /* mov #imm,rn */
1957};
1958
1959static const struct sh_minor_opcode sh_opcodee[] =
1960{
1961 { MAP (sh_opcodee0), 0xf000 }
1962};
1963
1964static const struct sh_opcode sh_opcodef0[] =
1965{
1966 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1967 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1968 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1969 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1970 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1971 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1972 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1973 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1974 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1975 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1976 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1977 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1978 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1979 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1980};
1981
1982static const struct sh_opcode sh_opcodef1[] =
1983{
1984 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1985 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1986 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1987 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1988 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1989 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
1990 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
1991 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
1992 { 0xf08d, SETSF1 }, /* fldi0 fn */
1993 { 0xf09d, SETSF1 } /* fldi1 fn */
1994};
1995
1996static const struct sh_minor_opcode sh_opcodef[] =
1997{
1998 { MAP (sh_opcodef0), 0xf00f },
1999 { MAP (sh_opcodef1), 0xf0ff }
2000};
2001
2002static struct sh_major_opcode sh_opcodes[] =
2003{
2004 { MAP (sh_opcode0) },
2005 { MAP (sh_opcode1) },
2006 { MAP (sh_opcode2) },
2007 { MAP (sh_opcode3) },
2008 { MAP (sh_opcode4) },
2009 { MAP (sh_opcode5) },
2010 { MAP (sh_opcode6) },
2011 { MAP (sh_opcode7) },
2012 { MAP (sh_opcode8) },
2013 { MAP (sh_opcode9) },
2014 { MAP (sh_opcodea) },
2015 { MAP (sh_opcodeb) },
2016 { MAP (sh_opcodec) },
2017 { MAP (sh_opcoded) },
2018 { MAP (sh_opcodee) },
2019 { MAP (sh_opcodef) }
2020};
2021
2022/* The double data transfer / parallel processing insns are not
2023 described here. This will cause sh_align_load_span to leave them alone. */
2024
2025static const struct sh_opcode sh_dsp_opcodef0[] =
2026{
2027 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2028 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2029 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2030 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2031 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2032 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2033 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2034 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2035};
2036
2037static const struct sh_minor_opcode sh_dsp_opcodef[] =
2038{
2039 { MAP (sh_dsp_opcodef0), 0xfc0d }
2040};
2041
2042/* Given an instruction, return a pointer to the corresponding
2043 sh_opcode structure. Return NULL if the instruction is not
2044 recognized. */
2045
2046static const struct sh_opcode *
2047sh_insn_info (insn)
2048 unsigned int insn;
2049{
2050 const struct sh_major_opcode *maj;
2051 const struct sh_minor_opcode *min, *minend;
2052
2053 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2054 min = maj->minor_opcodes;
2055 minend = min + maj->count;
2056 for (; min < minend; min++)
2057 {
2058 unsigned int l;
2059 const struct sh_opcode *op, *opend;
2060
2061 l = insn & min->mask;
2062 op = min->opcodes;
2063 opend = op + min->count;
2064
2065 /* Since the opcodes tables are sorted, we could use a binary
2066 search here if the count were above some cutoff value. */
2067 for (; op < opend; op++)
2068 if (op->opcode == l)
2069 return op;
2070 }
2071
2072 return NULL;
2073}
2074
2075/* See whether an instruction uses or sets a general purpose register */
2076
2077static bfd_boolean
2078sh_insn_uses_or_sets_reg (insn, op, reg)
2079 unsigned int insn;
2080 const struct sh_opcode *op;
2081 unsigned int reg;
2082{
2083 if (sh_insn_uses_reg (insn, op, reg))
2084 return TRUE;
2085
2086 return sh_insn_sets_reg (insn, op, reg);
2087}
2088
2089/* See whether an instruction uses a general purpose register. */
2090
2091static bfd_boolean
2092sh_insn_uses_reg (insn, op, reg)
2093 unsigned int insn;
2094 const struct sh_opcode *op;
2095 unsigned int reg;
2096{
2097 unsigned int f;
2098
2099 f = op->flags;
2100
2101 if ((f & USES1) != 0
2102 && USES1_REG (insn) == reg)
2103 return TRUE;
2104 if ((f & USES2) != 0
2105 && USES2_REG (insn) == reg)
2106 return TRUE;
2107 if ((f & USESR0) != 0
2108 && reg == 0)
2109 return TRUE;
2110 if ((f & USESAS) && reg == USESAS_REG (insn))
2111 return TRUE;
2112 if ((f & USESR8) && reg == 8)
2113 return TRUE;
2114
2115 return FALSE;
2116}
2117
2118/* See whether an instruction sets a general purpose register. */
2119
2120static bfd_boolean
2121sh_insn_sets_reg (insn, op, reg)
2122 unsigned int insn;
2123 const struct sh_opcode *op;
2124 unsigned int reg;
2125{
2126 unsigned int f;
2127
2128 f = op->flags;
2129
2130 if ((f & SETS1) != 0
2131 && SETS1_REG (insn) == reg)
2132 return TRUE;
2133 if ((f & SETS2) != 0
2134 && SETS2_REG (insn) == reg)
2135 return TRUE;
2136 if ((f & SETSR0) != 0
2137 && reg == 0)
2138 return TRUE;
2139 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2140 return TRUE;
2141
2142 return FALSE;
2143}
2144
2145/* See whether an instruction uses or sets a floating point register */
2146
2147static bfd_boolean
2148sh_insn_uses_or_sets_freg (insn, op, reg)
2149 unsigned int insn;
2150 const struct sh_opcode *op;
2151 unsigned int reg;
2152{
2153 if (sh_insn_uses_freg (insn, op, reg))
2154 return TRUE;
2155
2156 return sh_insn_sets_freg (insn, op, reg);
2157}
2158
2159/* See whether an instruction uses a floating point register. */
2160
2161static bfd_boolean
2162sh_insn_uses_freg (insn, op, freg)
2163 unsigned int insn;
2164 const struct sh_opcode *op;
2165 unsigned int freg;
2166{
2167 unsigned int f;
2168
2169 f = op->flags;
2170
2171 /* We can't tell if this is a double-precision insn, so just play safe
2172 and assume that it might be. So not only have we test FREG against
2173 itself, but also even FREG against FREG+1 - if the using insn uses
2174 just the low part of a double precision value - but also an odd
2175 FREG against FREG-1 - if the setting insn sets just the low part
2176 of a double precision value.
2177 So what this all boils down to is that we have to ignore the lowest
2178 bit of the register number. */
2179
2180 if ((f & USESF1) != 0
2181 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2182 return TRUE;
2183 if ((f & USESF2) != 0
2184 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2185 return TRUE;
2186 if ((f & USESF0) != 0
2187 && freg == 0)
2188 return TRUE;
2189
2190 return FALSE;
2191}
2192
2193/* See whether an instruction sets a floating point register. */
2194
2195static bfd_boolean
2196sh_insn_sets_freg (insn, op, freg)
2197 unsigned int insn;
2198 const struct sh_opcode *op;
2199 unsigned int freg;
2200{
2201 unsigned int f;
2202
2203 f = op->flags;
2204
2205 /* We can't tell if this is a double-precision insn, so just play safe
2206 and assume that it might be. So not only have we test FREG against
2207 itself, but also even FREG against FREG+1 - if the using insn uses
2208 just the low part of a double precision value - but also an odd
2209 FREG against FREG-1 - if the setting insn sets just the low part
2210 of a double precision value.
2211 So what this all boils down to is that we have to ignore the lowest
2212 bit of the register number. */
2213
2214 if ((f & SETSF1) != 0
2215 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2216 return TRUE;
2217
2218 return FALSE;
2219}
2220
2221/* See whether instructions I1 and I2 conflict, assuming I1 comes
2222 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2223 This should return TRUE if there is a conflict, or FALSE if the
2224 instructions can be swapped safely. */
2225
2226static bfd_boolean
2227sh_insns_conflict (i1, op1, i2, op2)
2228 unsigned int i1;
2229 const struct sh_opcode *op1;
2230 unsigned int i2;
2231 const struct sh_opcode *op2;
2232{
2233 unsigned int f1, f2;
2234
2235 f1 = op1->flags;
2236 f2 = op2->flags;
2237
2238 /* Load of fpscr conflicts with floating point operations.
2239 FIXME: shouldn't test raw opcodes here. */
2240 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2241 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2242 return TRUE;
2243
2244 if ((f1 & (BRANCH | DELAY)) != 0
2245 || (f2 & (BRANCH | DELAY)) != 0)
2246 return TRUE;
2247
2248 if (((f1 | f2) & SETSSP)
2249 && (f1 & (SETSSP | USESSP))
2250 && (f2 & (SETSSP | USESSP)))
2251 return TRUE;
2252
2253 if ((f1 & SETS1) != 0
2254 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2255 return TRUE;
2256 if ((f1 & SETS2) != 0
2257 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2258 return TRUE;
2259 if ((f1 & SETSR0) != 0
2260 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2261 return TRUE;
2262 if ((f1 & SETSAS)
2263 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2264 return TRUE;
2265 if ((f1 & SETSF1) != 0
2266 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2267 return TRUE;
2268
2269 if ((f2 & SETS1) != 0
2270 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2271 return TRUE;
2272 if ((f2 & SETS2) != 0
2273 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2274 return TRUE;
2275 if ((f2 & SETSR0) != 0
2276 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2277 return TRUE;
2278 if ((f2 & SETSAS)
2279 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2280 return TRUE;
2281 if ((f2 & SETSF1) != 0
2282 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2283 return TRUE;
2284
2285 /* The instructions do not conflict. */
2286 return FALSE;
2287}
2288
2289/* I1 is a load instruction, and I2 is some other instruction. Return
2290 TRUE if I1 loads a register which I2 uses. */
2291
2292static bfd_boolean
2293sh_load_use (i1, op1, i2, op2)
2294 unsigned int i1;
2295 const struct sh_opcode *op1;
2296 unsigned int i2;
2297 const struct sh_opcode *op2;
2298{
2299 unsigned int f1;
2300
2301 f1 = op1->flags;
2302
2303 if ((f1 & LOAD) == 0)
2304 return FALSE;
2305
2306 /* If both SETS1 and SETSSP are set, that means a load to a special
2307 register using postincrement addressing mode, which we don't care
2308 about here. */
2309 if ((f1 & SETS1) != 0
2310 && (f1 & SETSSP) == 0
2311 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2312 return TRUE;
2313
2314 if ((f1 & SETSR0) != 0
2315 && sh_insn_uses_reg (i2, op2, 0))
2316 return TRUE;
2317
2318 if ((f1 & SETSF1) != 0
2319 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2320 return TRUE;
2321
2322 return FALSE;
2323}
2324
2325/* Try to align loads and stores within a span of memory. This is
2326 called by both the ELF and the COFF sh targets. ABFD and SEC are
2327 the BFD and section we are examining. CONTENTS is the contents of
2328 the section. SWAP is the routine to call to swap two instructions.
2329 RELOCS is a pointer to the internal relocation information, to be
2330 passed to SWAP. PLABEL is a pointer to the current label in a
2331 sorted list of labels; LABEL_END is the end of the list. START and
2332 STOP are the range of memory to examine. If a swap is made,
2333 *PSWAPPED is set to TRUE. */
2334
2335#ifdef COFF_WITH_PE
2336static
2337#endif
2338bfd_boolean
2339_bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2340 plabel, label_end, start, stop, pswapped)
2341 bfd *abfd;
2342 asection *sec;
2343 bfd_byte *contents;
2344 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2345 PTR relocs;
2346 bfd_vma **plabel;
2347 bfd_vma *label_end;
2348 bfd_vma start;
2349 bfd_vma stop;
2350 bfd_boolean *pswapped;
2351{
2352 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2353 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2354 bfd_vma i;
2355
2356 /* The SH4 has a Harvard architecture, hence aligning loads is not
2357 desirable. In fact, it is counter-productive, since it interferes
2358 with the schedules generated by the compiler. */
2359 if (abfd->arch_info->mach == bfd_mach_sh4)
2360 return TRUE;
2361
2362 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2363 instructions. */
2364 if (dsp)
2365 {
2366 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2367 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2368 }
2369
2370 /* Instructions should be aligned on 2 byte boundaries. */
2371 if ((start & 1) == 1)
2372 ++start;
2373
2374 /* Now look through the unaligned addresses. */
2375 i = start;
2376 if ((i & 2) == 0)
2377 i += 2;
2378 for (; i < stop; i += 4)
2379 {
2380 unsigned int insn;
2381 const struct sh_opcode *op;
2382 unsigned int prev_insn = 0;
2383 const struct sh_opcode *prev_op = NULL;
2384
2385 insn = bfd_get_16 (abfd, contents + i);
2386 op = sh_insn_info (insn);
2387 if (op == NULL
2388 || (op->flags & (LOAD | STORE)) == 0)
2389 continue;
2390
2391 /* This is a load or store which is not on a four byte boundary. */
2392
2393 while (*plabel < label_end && **plabel < i)
2394 ++*plabel;
2395
2396 if (i > start)
2397 {
2398 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2399 /* If INSN is the field b of a parallel processing insn, it is not
2400 a load / store after all. Note that the test here might mistake
2401 the field_b of a pcopy insn for the starting code of a parallel
2402 processing insn; this might miss a swapping opportunity, but at
2403 least we're on the safe side. */
2404 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2405 continue;
2406
2407 /* Check if prev_insn is actually the field b of a parallel
2408 processing insn. Again, this can give a spurious match
2409 after a pcopy. */
2410 if (dsp && i - 2 > start)
2411 {
2412 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2413
2414 if ((pprev_insn & 0xfc00) == 0xf800)
2415 prev_op = NULL;
2416 else
2417 prev_op = sh_insn_info (prev_insn);
2418 }
2419 else
2420 prev_op = sh_insn_info (prev_insn);
2421
2422 /* If the load/store instruction is in a delay slot, we
2423 can't swap. */
2424 if (prev_op == NULL
2425 || (prev_op->flags & DELAY) != 0)
2426 continue;
2427 }
2428 if (i > start
2429 && (*plabel >= label_end || **plabel != i)
2430 && prev_op != NULL
2431 && (prev_op->flags & (LOAD | STORE)) == 0
2432 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2433 {
2434 bfd_boolean ok;
2435
2436 /* The load/store instruction does not have a label, and
2437 there is a previous instruction; PREV_INSN is not
2438 itself a load/store instruction, and PREV_INSN and
2439 INSN do not conflict. */
2440
2441 ok = TRUE;
2442
2443 if (i >= start + 4)
2444 {
2445 unsigned int prev2_insn;
2446 const struct sh_opcode *prev2_op;
2447
2448 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2449 prev2_op = sh_insn_info (prev2_insn);
2450
2451 /* If the instruction before PREV_INSN has a delay
2452 slot--that is, PREV_INSN is in a delay slot--we
2453 can not swap. */
2454 if (prev2_op == NULL
2455 || (prev2_op->flags & DELAY) != 0)
2456 ok = FALSE;
2457
2458 /* If the instruction before PREV_INSN is a load,
2459 and it sets a register which INSN uses, then
2460 putting INSN immediately after PREV_INSN will
2461 cause a pipeline bubble, so there is no point to
2462 making the swap. */
2463 if (ok
2464 && (prev2_op->flags & LOAD) != 0
2465 && sh_load_use (prev2_insn, prev2_op, insn, op))
2466 ok = FALSE;
2467 }
2468
2469 if (ok)
2470 {
2471 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2472 return FALSE;
2473 *pswapped = TRUE;
2474 continue;
2475 }
2476 }
2477
2478 while (*plabel < label_end && **plabel < i + 2)
2479 ++*plabel;
2480
2481 if (i + 2 < stop
2482 && (*plabel >= label_end || **plabel != i + 2))
2483 {
2484 unsigned int next_insn;
2485 const struct sh_opcode *next_op;
2486
2487 /* There is an instruction after the load/store
2488 instruction, and it does not have a label. */
2489 next_insn = bfd_get_16 (abfd, contents + i + 2);
2490 next_op = sh_insn_info (next_insn);
2491 if (next_op != NULL
2492 && (next_op->flags & (LOAD | STORE)) == 0
2493 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2494 {
2495 bfd_boolean ok;
2496
2497 /* NEXT_INSN is not itself a load/store instruction,
2498 and it does not conflict with INSN. */
2499
2500 ok = TRUE;
2501
2502 /* If PREV_INSN is a load, and it sets a register
2503 which NEXT_INSN uses, then putting NEXT_INSN
2504 immediately after PREV_INSN will cause a pipeline
2505 bubble, so there is no reason to make this swap. */
2506 if (prev_op != NULL
2507 && (prev_op->flags & LOAD) != 0
2508 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2509 ok = FALSE;
2510
2511 /* If INSN is a load, and it sets a register which
2512 the insn after NEXT_INSN uses, then doing the
2513 swap will cause a pipeline bubble, so there is no
2514 reason to make the swap. However, if the insn
2515 after NEXT_INSN is itself a load or store
2516 instruction, then it is misaligned, so
2517 optimistically hope that it will be swapped
2518 itself, and just live with the pipeline bubble if
2519 it isn't. */
2520 if (ok
2521 && i + 4 < stop
2522 && (op->flags & LOAD) != 0)
2523 {
2524 unsigned int next2_insn;
2525 const struct sh_opcode *next2_op;
2526
2527 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2528 next2_op = sh_insn_info (next2_insn);
2529 if (next2_op == NULL
2530 || ((next2_op->flags & (LOAD | STORE)) == 0
2531 && sh_load_use (insn, op, next2_insn, next2_op)))
2532 ok = FALSE;
2533 }
2534
2535 if (ok)
2536 {
2537 if (! (*swap) (abfd, sec, relocs, contents, i))
2538 return FALSE;
2539 *pswapped = TRUE;
2540 continue;
2541 }
2542 }
2543 }
2544 }
2545
2546 return TRUE;
2547}
2548#endif /* not COFF_IMAGE_WITH_PE */
2549
2550/* Look for loads and stores which we can align to four byte
2551 boundaries. See the longer comment above sh_relax_section for why
2552 this is desirable. This sets *PSWAPPED if some instruction was
2553 swapped. */
2554
2555static bfd_boolean
2556sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2557 bfd *abfd;
2558 asection *sec;
2559 struct internal_reloc *internal_relocs;
2560 bfd_byte *contents;
2561 bfd_boolean *pswapped;
2562{
2563 struct internal_reloc *irel, *irelend;
2564 bfd_vma *labels = NULL;
2565 bfd_vma *label, *label_end;
2566 bfd_size_type amt;
2567
2568 *pswapped = FALSE;
2569
2570 irelend = internal_relocs + sec->reloc_count;
2571
2572 /* Get all the addresses with labels on them. */
2573 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2574 labels = (bfd_vma *) bfd_malloc (amt);
2575 if (labels == NULL)
2576 goto error_return;
2577 label_end = labels;
2578 for (irel = internal_relocs; irel < irelend; irel++)
2579 {
2580 if (irel->r_type == R_SH_LABEL)
2581 {
2582 *label_end = irel->r_vaddr - sec->vma;
2583 ++label_end;
2584 }
2585 }
2586
2587 /* Note that the assembler currently always outputs relocs in
2588 address order. If that ever changes, this code will need to sort
2589 the label values and the relocs. */
2590
2591 label = labels;
2592
2593 for (irel = internal_relocs; irel < irelend; irel++)
2594 {
2595 bfd_vma start, stop;
2596
2597 if (irel->r_type != R_SH_CODE)
2598 continue;
2599
2600 start = irel->r_vaddr - sec->vma;
2601
2602 for (irel++; irel < irelend; irel++)
2603 if (irel->r_type == R_SH_DATA)
2604 break;
2605 if (irel < irelend)
2606 stop = irel->r_vaddr - sec->vma;
2607 else
2608 stop = sec->size;
2609
2610 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2611 (PTR) internal_relocs, &label,
2612 label_end, start, stop, pswapped))
2613 goto error_return;
2614 }
2615
2616 free (labels);
2617
2618 return TRUE;
2619
2620 error_return:
2621 if (labels != NULL)
2622 free (labels);
2623 return FALSE;
2624}
2625
2626/* Swap two SH instructions. */
2627
2628static bfd_boolean
2629sh_swap_insns (abfd, sec, relocs, contents, addr)
2630 bfd *abfd;
2631 asection *sec;
2632 PTR relocs;
2633 bfd_byte *contents;
2634 bfd_vma addr;
2635{
2636 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2637 unsigned short i1, i2;
2638 struct internal_reloc *irel, *irelend;
2639
2640 /* Swap the instructions themselves. */
2641 i1 = bfd_get_16 (abfd, contents + addr);
2642 i2 = bfd_get_16 (abfd, contents + addr + 2);
2643 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2644 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2645
2646 /* Adjust all reloc addresses. */
2647 irelend = internal_relocs + sec->reloc_count;
2648 for (irel = internal_relocs; irel < irelend; irel++)
2649 {
2650 int type, add;
2651
2652 /* There are a few special types of relocs that we don't want to
2653 adjust. These relocs do not apply to the instruction itself,
2654 but are only associated with the address. */
2655 type = irel->r_type;
2656 if (type == R_SH_ALIGN
2657 || type == R_SH_CODE
2658 || type == R_SH_DATA
2659 || type == R_SH_LABEL)
2660 continue;
2661
2662 /* If an R_SH_USES reloc points to one of the addresses being
2663 swapped, we must adjust it. It would be incorrect to do this
2664 for a jump, though, since we want to execute both
2665 instructions after the jump. (We have avoided swapping
2666 around a label, so the jump will not wind up executing an
2667 instruction it shouldn't). */
2668 if (type == R_SH_USES)
2669 {
2670 bfd_vma off;
2671
2672 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2673 if (off == addr)
2674 irel->r_offset += 2;
2675 else if (off == addr + 2)
2676 irel->r_offset -= 2;
2677 }
2678
2679 if (irel->r_vaddr - sec->vma == addr)
2680 {
2681 irel->r_vaddr += 2;
2682 add = -2;
2683 }
2684 else if (irel->r_vaddr - sec->vma == addr + 2)
2685 {
2686 irel->r_vaddr -= 2;
2687 add = 2;
2688 }
2689 else
2690 add = 0;
2691
2692 if (add != 0)
2693 {
2694 bfd_byte *loc;
2695 unsigned short insn, oinsn;
2696 bfd_boolean overflow;
2697
2698 loc = contents + irel->r_vaddr - sec->vma;
2699 overflow = FALSE;
2700 switch (type)
2701 {
2702 default:
2703 break;
2704
2705 case R_SH_PCDISP8BY2:
2706 case R_SH_PCRELIMM8BY2:
2707 insn = bfd_get_16 (abfd, loc);
2708 oinsn = insn;
2709 insn += add / 2;
2710 if ((oinsn & 0xff00) != (insn & 0xff00))
2711 overflow = TRUE;
2712 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2713 break;
2714
2715 case R_SH_PCDISP:
2716 insn = bfd_get_16 (abfd, loc);
2717 oinsn = insn;
2718 insn += add / 2;
2719 if ((oinsn & 0xf000) != (insn & 0xf000))
2720 overflow = TRUE;
2721 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2722 break;
2723
2724 case R_SH_PCRELIMM8BY4:
2725 /* This reloc ignores the least significant 3 bits of
2726 the program counter before adding in the offset.
2727 This means that if ADDR is at an even address, the
2728 swap will not affect the offset. If ADDR is an at an
2729 odd address, then the instruction will be crossing a
2730 four byte boundary, and must be adjusted. */
2731 if ((addr & 3) != 0)
2732 {
2733 insn = bfd_get_16 (abfd, loc);
2734 oinsn = insn;
2735 insn += add / 2;
2736 if ((oinsn & 0xff00) != (insn & 0xff00))
2737 overflow = TRUE;
2738 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2739 }
2740
2741 break;
2742 }
2743
2744 if (overflow)
2745 {
2746 ((*_bfd_error_handler)
2747 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2748 abfd, (unsigned long) irel->r_vaddr));
2749 bfd_set_error (bfd_error_bad_value);
2750 return FALSE;
2751 }
2752 }
2753 }
2754
2755 return TRUE;
2756}
2757\f
2758/* This is a modification of _bfd_coff_generic_relocate_section, which
2759 will handle SH relaxing. */
2760
2761static bfd_boolean
2762sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2763 relocs, syms, sections)
2764 bfd *output_bfd ATTRIBUTE_UNUSED;
2765 struct bfd_link_info *info;
2766 bfd *input_bfd;
2767 asection *input_section;
2768 bfd_byte *contents;
2769 struct internal_reloc *relocs;
2770 struct internal_syment *syms;
2771 asection **sections;
2772{
2773 struct internal_reloc *rel;
2774 struct internal_reloc *relend;
2775
2776 rel = relocs;
2777 relend = rel + input_section->reloc_count;
2778 for (; rel < relend; rel++)
2779 {
2780 long symndx;
2781 struct coff_link_hash_entry *h;
2782 struct internal_syment *sym;
2783 bfd_vma addend;
2784 bfd_vma val;
2785 reloc_howto_type *howto;
2786 bfd_reloc_status_type rstat;
2787
2788 /* Almost all relocs have to do with relaxing. If any work must
2789 be done for them, it has been done in sh_relax_section. */
2790 if (rel->r_type != R_SH_IMM32
2791#ifdef COFF_WITH_PE
2792 && rel->r_type != R_SH_IMM32CE
2793 && rel->r_type != R_SH_IMAGEBASE
2794#endif
2795 && rel->r_type != R_SH_PCDISP)
2796 continue;
2797
2798 symndx = rel->r_symndx;
2799
2800 if (symndx == -1)
2801 {
2802 h = NULL;
2803 sym = NULL;
2804 }
2805 else
2806 {
2807 if (symndx < 0
2808 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2809 {
2810 (*_bfd_error_handler)
2811 ("%B: illegal symbol index %ld in relocs",
2812 input_bfd, symndx);
2813 bfd_set_error (bfd_error_bad_value);
2814 return FALSE;
2815 }
2816 h = obj_coff_sym_hashes (input_bfd)[symndx];
2817 sym = syms + symndx;
2818 }
2819
2820 if (sym != NULL && sym->n_scnum != 0)
2821 addend = - sym->n_value;
2822 else
2823 addend = 0;
2824
2825 if (rel->r_type == R_SH_PCDISP)
2826 addend -= 4;
2827
2828 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2829 howto = NULL;
2830 else
2831 howto = &sh_coff_howtos[rel->r_type];
2832
2833 if (howto == NULL)
2834 {
2835 bfd_set_error (bfd_error_bad_value);
2836 return FALSE;
2837 }
2838
2839#ifdef COFF_WITH_PE
2840 if (rel->r_type == R_SH_IMAGEBASE)
2841 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2842#endif
2843
2844 val = 0;
2845
2846 if (h == NULL)
2847 {
2848 asection *sec;
2849
2850 /* There is nothing to do for an internal PCDISP reloc. */
2851 if (rel->r_type == R_SH_PCDISP)
2852 continue;
2853
2854 if (symndx == -1)
2855 {
2856 sec = bfd_abs_section_ptr;
2857 val = 0;
2858 }
2859 else
2860 {
2861 sec = sections[symndx];
2862 val = (sec->output_section->vma
2863 + sec->output_offset
2864 + sym->n_value
2865 - sec->vma);
2866 }
2867 }
2868 else
2869 {
2870 if (h->root.type == bfd_link_hash_defined
2871 || h->root.type == bfd_link_hash_defweak)
2872 {
2873 asection *sec;
2874
2875 sec = h->root.u.def.section;
2876 val = (h->root.u.def.value
2877 + sec->output_section->vma
2878 + sec->output_offset);
2879 }
2880 else if (! info->relocatable)
2881 {
2882 if (! ((*info->callbacks->undefined_symbol)
2883 (info, h->root.root.string, input_bfd, input_section,
2884 rel->r_vaddr - input_section->vma, TRUE)))
2885 return FALSE;
2886 }
2887 }
2888
2889 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2890 contents,
2891 rel->r_vaddr - input_section->vma,
2892 val, addend);
2893
2894 switch (rstat)
2895 {
2896 default:
2897 abort ();
2898 case bfd_reloc_ok:
2899 break;
2900 case bfd_reloc_overflow:
2901 {
2902 const char *name;
2903 char buf[SYMNMLEN + 1];
2904
2905 if (symndx == -1)
2906 name = "*ABS*";
2907 else if (h != NULL)
2908 name = NULL;
2909 else if (sym->_n._n_n._n_zeroes == 0
2910 && sym->_n._n_n._n_offset != 0)
2911 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2912 else
2913 {
2914 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2915 buf[SYMNMLEN] = '\0';
2916 name = buf;
2917 }
2918
2919 if (! ((*info->callbacks->reloc_overflow)
2920 (info, (h ? &h->root : NULL), name, howto->name,
2921 (bfd_vma) 0, input_bfd, input_section,
2922 rel->r_vaddr - input_section->vma)))
2923 return FALSE;
2924 }
2925 }
2926 }
2927
2928 return TRUE;
2929}
2930
2931/* This is a version of bfd_generic_get_relocated_section_contents
2932 which uses sh_relocate_section. */
2933
2934static bfd_byte *
2935sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2936 data, relocatable, symbols)
2937 bfd *output_bfd;
2938 struct bfd_link_info *link_info;
2939 struct bfd_link_order *link_order;
2940 bfd_byte *data;
2941 bfd_boolean relocatable;
2942 asymbol **symbols;
2943{
2944 asection *input_section = link_order->u.indirect.section;
2945 bfd *input_bfd = input_section->owner;
2946 asection **sections = NULL;
2947 struct internal_reloc *internal_relocs = NULL;
2948 struct internal_syment *internal_syms = NULL;
2949
2950 /* We only need to handle the case of relaxing, or of having a
2951 particular set of section contents, specially. */
2952 if (relocatable
2953 || coff_section_data (input_bfd, input_section) == NULL
2954 || coff_section_data (input_bfd, input_section)->contents == NULL)
2955 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2956 link_order, data,
2957 relocatable,
2958 symbols);
2959
2960 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2961 (size_t) input_section->size);
2962
2963 if ((input_section->flags & SEC_RELOC) != 0
2964 && input_section->reloc_count > 0)
2965 {
2966 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2967 bfd_byte *esym, *esymend;
2968 struct internal_syment *isymp;
2969 asection **secpp;
2970 bfd_size_type amt;
2971
2972 if (! _bfd_coff_get_external_symbols (input_bfd))
2973 goto error_return;
2974
2975 internal_relocs = (_bfd_coff_read_internal_relocs
2976 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2977 FALSE, (struct internal_reloc *) NULL));
2978 if (internal_relocs == NULL)
2979 goto error_return;
2980
2981 amt = obj_raw_syment_count (input_bfd);
2982 amt *= sizeof (struct internal_syment);
2983 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2984 if (internal_syms == NULL)
2985 goto error_return;
2986
2987 amt = obj_raw_syment_count (input_bfd);
2988 amt *= sizeof (asection *);
2989 sections = (asection **) bfd_malloc (amt);
2990 if (sections == NULL)
2991 goto error_return;
2992
2993 isymp = internal_syms;
2994 secpp = sections;
2995 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2996 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2997 while (esym < esymend)
2998 {
2999 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3000
3001 if (isymp->n_scnum != 0)
3002 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3003 else
3004 {
3005 if (isymp->n_value == 0)
3006 *secpp = bfd_und_section_ptr;
3007 else
3008 *secpp = bfd_com_section_ptr;
3009 }
3010
3011 esym += (isymp->n_numaux + 1) * symesz;
3012 secpp += isymp->n_numaux + 1;
3013 isymp += isymp->n_numaux + 1;
3014 }
3015
3016 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3017 input_section, data, internal_relocs,
3018 internal_syms, sections))
3019 goto error_return;
3020
3021 free (sections);
3022 sections = NULL;
3023 free (internal_syms);
3024 internal_syms = NULL;
3025 free (internal_relocs);
3026 internal_relocs = NULL;
3027 }
3028
3029 return data;
3030
3031 error_return:
3032 if (internal_relocs != NULL)
3033 free (internal_relocs);
3034 if (internal_syms != NULL)
3035 free (internal_syms);
3036 if (sections != NULL)
3037 free (sections);
3038 return NULL;
3039}
3040
3041/* The target vectors. */
3042
3043#ifndef TARGET_SHL_SYM
3044CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3045#endif
3046
3047#ifdef TARGET_SHL_SYM
3048#define TARGET_SYM TARGET_SHL_SYM
3049#else
3050#define TARGET_SYM shlcoff_vec
3051#endif
3052
3053#ifndef TARGET_SHL_NAME
3054#define TARGET_SHL_NAME "coff-shl"
3055#endif
3056
3057#ifdef COFF_WITH_PE
3058CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3059 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3060#else
3061CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3062 0, '_', NULL, COFF_SWAP_TABLE)
3063#endif
3064
3065#ifndef TARGET_SHL_SYM
3066static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3067static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3068/* Some people want versions of the SH COFF target which do not align
3069 to 16 byte boundaries. We implement that by adding a couple of new
3070 target vectors. These are just like the ones above, but they
3071 change the default section alignment. To generate them in the
3072 assembler, use -small. To use them in the linker, use -b
3073 coff-sh{l}-small and -oformat coff-sh{l}-small.
3074
3075 Yes, this is a horrible hack. A general solution for setting
3076 section alignment in COFF is rather complex. ELF handles this
3077 correctly. */
3078
3079/* Only recognize the small versions if the target was not defaulted.
3080 Otherwise we won't recognize the non default endianness. */
3081
3082static const bfd_target *
3083coff_small_object_p (abfd)
3084 bfd *abfd;
3085{
3086 if (abfd->target_defaulted)
3087 {
3088 bfd_set_error (bfd_error_wrong_format);
3089 return NULL;
3090 }
3091 return coff_object_p (abfd);
3092}
3093
3094/* Set the section alignment for the small versions. */
3095
3096static bfd_boolean
3097coff_small_new_section_hook (abfd, section)
3098 bfd *abfd;
3099 asection *section;
3100{
3101 if (! coff_new_section_hook (abfd, section))
3102 return FALSE;
3103
3104 /* We must align to at least a four byte boundary, because longword
3105 accesses must be on a four byte boundary. */
3106 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3107 section->alignment_power = 2;
3108
3109 return TRUE;
3110}
3111
3112/* This is copied from bfd_coff_std_swap_table so that we can change
3113 the default section alignment power. */
3114
3115static const bfd_coff_backend_data bfd_coff_small_swap_table =
3116{
3117 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3118 coff_swap_aux_out, coff_swap_sym_out,
3119 coff_swap_lineno_out, coff_swap_reloc_out,
3120 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3121 coff_swap_scnhdr_out,
3122 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3123#ifdef COFF_LONG_FILENAMES
3124 TRUE,
3125#else
3126 FALSE,
3127#endif
3128#ifdef COFF_LONG_SECTION_NAMES
3129 TRUE,
3130#else
3131 FALSE,
3132#endif
3133 2,
3134#ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3135 TRUE,
3136#else
3137 FALSE,
3138#endif
3139#ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3140 4,
3141#else
3142 2,
3143#endif
3144 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3145 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3146 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3147 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3148 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3149 coff_classify_symbol, coff_compute_section_file_positions,
3150 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3151 coff_adjust_symndx, coff_link_add_one_symbol,
3152 coff_link_output_has_begun, coff_final_link_postscript
3153};
3154
3155#define coff_small_close_and_cleanup \
3156 coff_close_and_cleanup
3157#define coff_small_bfd_free_cached_info \
3158 coff_bfd_free_cached_info
3159#define coff_small_get_section_contents \
3160 coff_get_section_contents
3161#define coff_small_get_section_contents_in_window \
3162 coff_get_section_contents_in_window
3163
3164extern const bfd_target shlcoff_small_vec;
3165
3166const bfd_target shcoff_small_vec =
3167{
3168 "coff-sh-small", /* name */
3169 bfd_target_coff_flavour,
3170 BFD_ENDIAN_BIG, /* data byte order is big */
3171 BFD_ENDIAN_BIG, /* header byte order is big */
3172
3173 (HAS_RELOC | EXEC_P | /* object flags */
3174 HAS_LINENO | HAS_DEBUG |
3175 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3176
3177 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3178 '_', /* leading symbol underscore */
3179 '/', /* ar_pad_char */
3180 15, /* ar_max_namelen */
3181 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3182 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3183 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3184 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3185 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3186 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3187
3188 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3189 bfd_generic_archive_p, _bfd_dummy_target},
3190 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3191 bfd_false},
3192 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3193 _bfd_write_archive_contents, bfd_false},
3194
3195 BFD_JUMP_TABLE_GENERIC (coff_small),
3196 BFD_JUMP_TABLE_COPY (coff),
3197 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3198 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3199 BFD_JUMP_TABLE_SYMBOLS (coff),
3200 BFD_JUMP_TABLE_RELOCS (coff),
3201 BFD_JUMP_TABLE_WRITE (coff),
3202 BFD_JUMP_TABLE_LINK (coff),
3203 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3204
3205 & shlcoff_small_vec,
3206
3207 (PTR) &bfd_coff_small_swap_table
3208};
3209
3210const bfd_target shlcoff_small_vec =
3211{
3212 "coff-shl-small", /* name */
3213 bfd_target_coff_flavour,
3214 BFD_ENDIAN_LITTLE, /* data byte order is little */
3215 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3216
3217 (HAS_RELOC | EXEC_P | /* object flags */
3218 HAS_LINENO | HAS_DEBUG |
3219 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3220
3221 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3222 '_', /* leading symbol underscore */
3223 '/', /* ar_pad_char */
3224 15, /* ar_max_namelen */
3225 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3226 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3227 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3228 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3229 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3230 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3231
3232 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3233 bfd_generic_archive_p, _bfd_dummy_target},
3234 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3235 bfd_false},
3236 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3237 _bfd_write_archive_contents, bfd_false},
3238
3239 BFD_JUMP_TABLE_GENERIC (coff_small),
3240 BFD_JUMP_TABLE_COPY (coff),
3241 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3242 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3243 BFD_JUMP_TABLE_SYMBOLS (coff),
3244 BFD_JUMP_TABLE_RELOCS (coff),
3245 BFD_JUMP_TABLE_WRITE (coff),
3246 BFD_JUMP_TABLE_LINK (coff),
3247 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3248
3249 & shcoff_small_vec,
3250
3251 (PTR) &bfd_coff_small_swap_table
3252};
3253#endif
This page took 0.033818 seconds and 4 git commands to generate.