PR ld/14357
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Renesas Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011
4 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
6 Written by Steve Chamberlain, <sac@cygnus.com>.
7 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
8
9 This file is part of BFD, the Binary File Descriptor library.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 MA 02110-1301, USA. */
25
26 #include "sysdep.h"
27 #include "bfd.h"
28 #include "libiberty.h"
29 #include "libbfd.h"
30 #include "bfdlink.h"
31 #include "coff/sh.h"
32 #include "coff/internal.h"
33
34 #undef bfd_pe_print_pdata
35
36 #ifdef COFF_WITH_PE
37 #include "coff/pe.h"
38
39 #ifndef COFF_IMAGE_WITH_PE
40 static bfd_boolean sh_align_load_span
41 PARAMS ((bfd *, asection *, bfd_byte *,
42 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
43 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
44
45 #define _bfd_sh_align_load_span sh_align_load_span
46 #endif
47
48 #define bfd_pe_print_pdata _bfd_pe_print_ce_compressed_pdata
49
50 #else
51
52 #define bfd_pe_print_pdata NULL
53
54 #endif /* COFF_WITH_PE. */
55
56 #include "libcoff.h"
57
58 /* Internal functions. */
59 static bfd_reloc_status_type sh_reloc
60 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
61 static long get_symbol_value PARAMS ((asymbol *));
62 static bfd_boolean sh_relax_section
63 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
64 static bfd_boolean sh_relax_delete_bytes
65 PARAMS ((bfd *, asection *, bfd_vma, int));
66 #ifndef COFF_IMAGE_WITH_PE
67 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
68 #endif
69 static bfd_boolean sh_align_loads
70 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
71 bfd_boolean *));
72 static bfd_boolean sh_swap_insns
73 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
74 static bfd_boolean sh_relocate_section
75 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
76 struct internal_reloc *, struct internal_syment *, asection **));
77 static bfd_byte *sh_coff_get_relocated_section_contents
78 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
79 bfd_byte *, bfd_boolean, asymbol **));
80 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
81
82 #ifdef COFF_WITH_PE
83 /* Can't build import tables with 2**4 alignment. */
84 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
85 #else
86 /* Default section alignment to 2**4. */
87 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
88 #endif
89
90 #ifdef COFF_IMAGE_WITH_PE
91 /* Align PE executables. */
92 #define COFF_PAGE_SIZE 0x1000
93 #endif
94
95 /* Generate long file names. */
96 #define COFF_LONG_FILENAMES
97
98 #ifdef COFF_WITH_PE
99 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
100 /* Return TRUE if this relocation should
101 appear in the output .reloc section. */
102 static bfd_boolean in_reloc_p (abfd, howto)
103 bfd * abfd ATTRIBUTE_UNUSED;
104 reloc_howto_type * howto;
105 {
106 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
107 }
108 #endif
109
110 /* The supported relocations. There are a lot of relocations defined
111 in coff/internal.h which we do not expect to ever see. */
112 static reloc_howto_type sh_coff_howtos[] =
113 {
114 EMPTY_HOWTO (0),
115 EMPTY_HOWTO (1),
116 #ifdef COFF_WITH_PE
117 /* Windows CE */
118 HOWTO (R_SH_IMM32CE, /* type */
119 0, /* rightshift */
120 2, /* size (0 = byte, 1 = short, 2 = long) */
121 32, /* bitsize */
122 FALSE, /* pc_relative */
123 0, /* bitpos */
124 complain_overflow_bitfield, /* complain_on_overflow */
125 sh_reloc, /* special_function */
126 "r_imm32ce", /* name */
127 TRUE, /* partial_inplace */
128 0xffffffff, /* src_mask */
129 0xffffffff, /* dst_mask */
130 FALSE), /* pcrel_offset */
131 #else
132 EMPTY_HOWTO (2),
133 #endif
134 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
135 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
136 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
137 EMPTY_HOWTO (6), /* R_SH_IMM24 */
138 EMPTY_HOWTO (7), /* R_SH_LOW16 */
139 EMPTY_HOWTO (8),
140 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
141
142 HOWTO (R_SH_PCDISP8BY2, /* type */
143 1, /* rightshift */
144 1, /* size (0 = byte, 1 = short, 2 = long) */
145 8, /* bitsize */
146 TRUE, /* pc_relative */
147 0, /* bitpos */
148 complain_overflow_signed, /* complain_on_overflow */
149 sh_reloc, /* special_function */
150 "r_pcdisp8by2", /* name */
151 TRUE, /* partial_inplace */
152 0xff, /* src_mask */
153 0xff, /* dst_mask */
154 TRUE), /* pcrel_offset */
155
156 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
157
158 HOWTO (R_SH_PCDISP, /* type */
159 1, /* rightshift */
160 1, /* size (0 = byte, 1 = short, 2 = long) */
161 12, /* bitsize */
162 TRUE, /* pc_relative */
163 0, /* bitpos */
164 complain_overflow_signed, /* complain_on_overflow */
165 sh_reloc, /* special_function */
166 "r_pcdisp12by2", /* name */
167 TRUE, /* partial_inplace */
168 0xfff, /* src_mask */
169 0xfff, /* dst_mask */
170 TRUE), /* pcrel_offset */
171
172 EMPTY_HOWTO (13),
173
174 HOWTO (R_SH_IMM32, /* type */
175 0, /* rightshift */
176 2, /* size (0 = byte, 1 = short, 2 = long) */
177 32, /* bitsize */
178 FALSE, /* pc_relative */
179 0, /* bitpos */
180 complain_overflow_bitfield, /* complain_on_overflow */
181 sh_reloc, /* special_function */
182 "r_imm32", /* name */
183 TRUE, /* partial_inplace */
184 0xffffffff, /* src_mask */
185 0xffffffff, /* dst_mask */
186 FALSE), /* pcrel_offset */
187
188 EMPTY_HOWTO (15),
189 #ifdef COFF_WITH_PE
190 HOWTO (R_SH_IMAGEBASE, /* type */
191 0, /* rightshift */
192 2, /* size (0 = byte, 1 = short, 2 = long) */
193 32, /* bitsize */
194 FALSE, /* pc_relative */
195 0, /* bitpos */
196 complain_overflow_bitfield, /* complain_on_overflow */
197 sh_reloc, /* special_function */
198 "rva32", /* name */
199 TRUE, /* partial_inplace */
200 0xffffffff, /* src_mask */
201 0xffffffff, /* dst_mask */
202 FALSE), /* pcrel_offset */
203 #else
204 EMPTY_HOWTO (16), /* R_SH_IMM8 */
205 #endif
206 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
207 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
208 EMPTY_HOWTO (19), /* R_SH_IMM4 */
209 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
210 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
211
212 HOWTO (R_SH_PCRELIMM8BY2, /* type */
213 1, /* rightshift */
214 1, /* size (0 = byte, 1 = short, 2 = long) */
215 8, /* bitsize */
216 TRUE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_unsigned, /* complain_on_overflow */
219 sh_reloc, /* special_function */
220 "r_pcrelimm8by2", /* name */
221 TRUE, /* partial_inplace */
222 0xff, /* src_mask */
223 0xff, /* dst_mask */
224 TRUE), /* pcrel_offset */
225
226 HOWTO (R_SH_PCRELIMM8BY4, /* type */
227 2, /* rightshift */
228 1, /* size (0 = byte, 1 = short, 2 = long) */
229 8, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_unsigned, /* complain_on_overflow */
233 sh_reloc, /* special_function */
234 "r_pcrelimm8by4", /* name */
235 TRUE, /* partial_inplace */
236 0xff, /* src_mask */
237 0xff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_SH_IMM16, /* type */
241 0, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 16, /* bitsize */
244 FALSE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_bitfield, /* complain_on_overflow */
247 sh_reloc, /* special_function */
248 "r_imm16", /* name */
249 TRUE, /* partial_inplace */
250 0xffff, /* src_mask */
251 0xffff, /* dst_mask */
252 FALSE), /* pcrel_offset */
253
254 HOWTO (R_SH_SWITCH16, /* type */
255 0, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 16, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_bitfield, /* complain_on_overflow */
261 sh_reloc, /* special_function */
262 "r_switch16", /* name */
263 TRUE, /* partial_inplace */
264 0xffff, /* src_mask */
265 0xffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_SH_SWITCH32, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield, /* complain_on_overflow */
275 sh_reloc, /* special_function */
276 "r_switch32", /* name */
277 TRUE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_SH_USES, /* type */
283 0, /* rightshift */
284 1, /* size (0 = byte, 1 = short, 2 = long) */
285 16, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_bitfield, /* complain_on_overflow */
289 sh_reloc, /* special_function */
290 "r_uses", /* name */
291 TRUE, /* partial_inplace */
292 0xffff, /* src_mask */
293 0xffff, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 HOWTO (R_SH_COUNT, /* type */
297 0, /* rightshift */
298 2, /* size (0 = byte, 1 = short, 2 = long) */
299 32, /* bitsize */
300 FALSE, /* pc_relative */
301 0, /* bitpos */
302 complain_overflow_bitfield, /* complain_on_overflow */
303 sh_reloc, /* special_function */
304 "r_count", /* name */
305 TRUE, /* partial_inplace */
306 0xffffffff, /* src_mask */
307 0xffffffff, /* dst_mask */
308 FALSE), /* pcrel_offset */
309
310 HOWTO (R_SH_ALIGN, /* type */
311 0, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 32, /* bitsize */
314 FALSE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_bitfield, /* complain_on_overflow */
317 sh_reloc, /* special_function */
318 "r_align", /* name */
319 TRUE, /* partial_inplace */
320 0xffffffff, /* src_mask */
321 0xffffffff, /* dst_mask */
322 FALSE), /* pcrel_offset */
323
324 HOWTO (R_SH_CODE, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield, /* complain_on_overflow */
331 sh_reloc, /* special_function */
332 "r_code", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_SH_DATA, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield, /* complain_on_overflow */
345 sh_reloc, /* special_function */
346 "r_data", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_SH_LABEL, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield, /* complain_on_overflow */
359 sh_reloc, /* special_function */
360 "r_label", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 HOWTO (R_SH_SWITCH8, /* type */
367 0, /* rightshift */
368 0, /* size (0 = byte, 1 = short, 2 = long) */
369 8, /* bitsize */
370 FALSE, /* pc_relative */
371 0, /* bitpos */
372 complain_overflow_bitfield, /* complain_on_overflow */
373 sh_reloc, /* special_function */
374 "r_switch8", /* name */
375 TRUE, /* partial_inplace */
376 0xff, /* src_mask */
377 0xff, /* dst_mask */
378 FALSE) /* pcrel_offset */
379 };
380
381 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
382
383 /* Check for a bad magic number. */
384 #define BADMAG(x) SHBADMAG(x)
385
386 /* Customize coffcode.h (this is not currently used). */
387 #define SH 1
388
389 /* FIXME: This should not be set here. */
390 #define __A_MAGIC_SET__
391
392 #ifndef COFF_WITH_PE
393 /* Swap the r_offset field in and out. */
394 #define SWAP_IN_RELOC_OFFSET H_GET_32
395 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
396
397 /* Swap out extra information in the reloc structure. */
398 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
399 do \
400 { \
401 dst->r_stuff[0] = 'S'; \
402 dst->r_stuff[1] = 'C'; \
403 } \
404 while (0)
405 #endif
406
407 /* Get the value of a symbol, when performing a relocation. */
408
409 static long
410 get_symbol_value (symbol)
411 asymbol *symbol;
412 {
413 bfd_vma relocation;
414
415 if (bfd_is_com_section (symbol->section))
416 relocation = 0;
417 else
418 relocation = (symbol->value +
419 symbol->section->output_section->vma +
420 symbol->section->output_offset);
421
422 return relocation;
423 }
424
425 #ifdef COFF_WITH_PE
426 /* Convert an rtype to howto for the COFF backend linker.
427 Copied from coff-i386. */
428 #define coff_rtype_to_howto coff_sh_rtype_to_howto
429 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
430
431 static reloc_howto_type *
432 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
433 bfd * abfd ATTRIBUTE_UNUSED;
434 asection * sec;
435 struct internal_reloc * rel;
436 struct coff_link_hash_entry * h;
437 struct internal_syment * sym;
438 bfd_vma * addendp;
439 {
440 reloc_howto_type * howto;
441
442 howto = sh_coff_howtos + rel->r_type;
443
444 *addendp = 0;
445
446 if (howto->pc_relative)
447 *addendp += sec->vma;
448
449 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
450 {
451 /* This is a common symbol. The section contents include the
452 size (sym->n_value) as an addend. The relocate_section
453 function will be adding in the final value of the symbol. We
454 need to subtract out the current size in order to get the
455 correct result. */
456 BFD_ASSERT (h != NULL);
457 }
458
459 if (howto->pc_relative)
460 {
461 *addendp -= 4;
462
463 /* If the symbol is defined, then the generic code is going to
464 add back the symbol value in order to cancel out an
465 adjustment it made to the addend. However, we set the addend
466 to 0 at the start of this function. We need to adjust here,
467 to avoid the adjustment the generic code will make. FIXME:
468 This is getting a bit hackish. */
469 if (sym != NULL && sym->n_scnum != 0)
470 *addendp -= sym->n_value;
471 }
472
473 if (rel->r_type == R_SH_IMAGEBASE)
474 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
475
476 return howto;
477 }
478
479 #endif /* COFF_WITH_PE */
480
481 /* This structure is used to map BFD reloc codes to SH PE relocs. */
482 struct shcoff_reloc_map
483 {
484 bfd_reloc_code_real_type bfd_reloc_val;
485 unsigned char shcoff_reloc_val;
486 };
487
488 #ifdef COFF_WITH_PE
489 /* An array mapping BFD reloc codes to SH PE relocs. */
490 static const struct shcoff_reloc_map sh_reloc_map[] =
491 {
492 { BFD_RELOC_32, R_SH_IMM32CE },
493 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
494 { BFD_RELOC_CTOR, R_SH_IMM32CE },
495 };
496 #else
497 /* An array mapping BFD reloc codes to SH PE relocs. */
498 static const struct shcoff_reloc_map sh_reloc_map[] =
499 {
500 { BFD_RELOC_32, R_SH_IMM32 },
501 { BFD_RELOC_CTOR, R_SH_IMM32 },
502 };
503 #endif
504
505 /* Given a BFD reloc code, return the howto structure for the
506 corresponding SH PE reloc. */
507 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
508 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
509
510 static reloc_howto_type *
511 sh_coff_reloc_type_lookup (abfd, code)
512 bfd * abfd ATTRIBUTE_UNUSED;
513 bfd_reloc_code_real_type code;
514 {
515 unsigned int i;
516
517 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
518 if (sh_reloc_map[i].bfd_reloc_val == code)
519 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
520
521 (*_bfd_error_handler) (_("SH Error: unknown reloc type %d"), code);
522 return NULL;
523 }
524
525 static reloc_howto_type *
526 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
527 const char *r_name)
528 {
529 unsigned int i;
530
531 for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
532 if (sh_coff_howtos[i].name != NULL
533 && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
534 return &sh_coff_howtos[i];
535
536 return NULL;
537 }
538
539 /* This macro is used in coffcode.h to get the howto corresponding to
540 an internal reloc. */
541
542 #define RTYPE2HOWTO(relent, internal) \
543 ((relent)->howto = \
544 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
545 ? &sh_coff_howtos[(internal)->r_type] \
546 : (reloc_howto_type *) NULL))
547
548 /* This is the same as the macro in coffcode.h, except that it copies
549 r_offset into reloc_entry->addend for some relocs. */
550 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
551 { \
552 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
553 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
554 coffsym = (obj_symbols (abfd) \
555 + (cache_ptr->sym_ptr_ptr - symbols)); \
556 else if (ptr) \
557 coffsym = coff_symbol_from (abfd, ptr); \
558 if (coffsym != (coff_symbol_type *) NULL \
559 && coffsym->native->u.syment.n_scnum == 0) \
560 cache_ptr->addend = 0; \
561 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
562 && ptr->section != (asection *) NULL) \
563 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
564 else \
565 cache_ptr->addend = 0; \
566 if ((reloc).r_type == R_SH_SWITCH8 \
567 || (reloc).r_type == R_SH_SWITCH16 \
568 || (reloc).r_type == R_SH_SWITCH32 \
569 || (reloc).r_type == R_SH_USES \
570 || (reloc).r_type == R_SH_COUNT \
571 || (reloc).r_type == R_SH_ALIGN) \
572 cache_ptr->addend = (reloc).r_offset; \
573 }
574
575 /* This is the howto function for the SH relocations. */
576
577 static bfd_reloc_status_type
578 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
579 error_message)
580 bfd *abfd;
581 arelent *reloc_entry;
582 asymbol *symbol_in;
583 PTR data;
584 asection *input_section;
585 bfd *output_bfd;
586 char **error_message ATTRIBUTE_UNUSED;
587 {
588 unsigned long insn;
589 bfd_vma sym_value;
590 unsigned short r_type;
591 bfd_vma addr = reloc_entry->address;
592 bfd_byte *hit_data = addr + (bfd_byte *) data;
593
594 r_type = reloc_entry->howto->type;
595
596 if (output_bfd != NULL)
597 {
598 /* Partial linking--do nothing. */
599 reloc_entry->address += input_section->output_offset;
600 return bfd_reloc_ok;
601 }
602
603 /* Almost all relocs have to do with relaxing. If any work must be
604 done for them, it has been done in sh_relax_section. */
605 if (r_type != R_SH_IMM32
606 #ifdef COFF_WITH_PE
607 && r_type != R_SH_IMM32CE
608 && r_type != R_SH_IMAGEBASE
609 #endif
610 && (r_type != R_SH_PCDISP
611 || (symbol_in->flags & BSF_LOCAL) != 0))
612 return bfd_reloc_ok;
613
614 if (symbol_in != NULL
615 && bfd_is_und_section (symbol_in->section))
616 return bfd_reloc_undefined;
617
618 sym_value = get_symbol_value (symbol_in);
619
620 switch (r_type)
621 {
622 case R_SH_IMM32:
623 #ifdef COFF_WITH_PE
624 case R_SH_IMM32CE:
625 #endif
626 insn = bfd_get_32 (abfd, hit_data);
627 insn += sym_value + reloc_entry->addend;
628 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
629 break;
630 #ifdef COFF_WITH_PE
631 case R_SH_IMAGEBASE:
632 insn = bfd_get_32 (abfd, hit_data);
633 insn += sym_value + reloc_entry->addend;
634 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
635 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
636 break;
637 #endif
638 case R_SH_PCDISP:
639 insn = bfd_get_16 (abfd, hit_data);
640 sym_value += reloc_entry->addend;
641 sym_value -= (input_section->output_section->vma
642 + input_section->output_offset
643 + addr
644 + 4);
645 sym_value += (insn & 0xfff) << 1;
646 if (insn & 0x800)
647 sym_value -= 0x1000;
648 insn = (insn & 0xf000) | (sym_value & 0xfff);
649 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
650 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
651 return bfd_reloc_overflow;
652 break;
653 default:
654 abort ();
655 break;
656 }
657
658 return bfd_reloc_ok;
659 }
660
661 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
662
663 /* We can do relaxing. */
664 #define coff_bfd_relax_section sh_relax_section
665
666 /* We use the special COFF backend linker. */
667 #define coff_relocate_section sh_relocate_section
668
669 /* When relaxing, we need to use special code to get the relocated
670 section contents. */
671 #define coff_bfd_get_relocated_section_contents \
672 sh_coff_get_relocated_section_contents
673
674 #include "coffcode.h"
675 \f
676 /* This function handles relaxing on the SH.
677
678 Function calls on the SH look like this:
679
680 movl L1,r0
681 ...
682 jsr @r0
683 ...
684 L1:
685 .long function
686
687 The compiler and assembler will cooperate to create R_SH_USES
688 relocs on the jsr instructions. The r_offset field of the
689 R_SH_USES reloc is the PC relative offset to the instruction which
690 loads the register (the r_offset field is computed as though it
691 were a jump instruction, so the offset value is actually from four
692 bytes past the instruction). The linker can use this reloc to
693 determine just which function is being called, and thus decide
694 whether it is possible to replace the jsr with a bsr.
695
696 If multiple function calls are all based on a single register load
697 (i.e., the same function is called multiple times), the compiler
698 guarantees that each function call will have an R_SH_USES reloc.
699 Therefore, if the linker is able to convert each R_SH_USES reloc
700 which refers to that address, it can safely eliminate the register
701 load.
702
703 When the assembler creates an R_SH_USES reloc, it examines it to
704 determine which address is being loaded (L1 in the above example).
705 It then counts the number of references to that address, and
706 creates an R_SH_COUNT reloc at that address. The r_offset field of
707 the R_SH_COUNT reloc will be the number of references. If the
708 linker is able to eliminate a register load, it can use the
709 R_SH_COUNT reloc to see whether it can also eliminate the function
710 address.
711
712 SH relaxing also handles another, unrelated, matter. On the SH, if
713 a load or store instruction is not aligned on a four byte boundary,
714 the memory cycle interferes with the 32 bit instruction fetch,
715 causing a one cycle bubble in the pipeline. Therefore, we try to
716 align load and store instructions on four byte boundaries if we
717 can, by swapping them with one of the adjacent instructions. */
718
719 static bfd_boolean
720 sh_relax_section (abfd, sec, link_info, again)
721 bfd *abfd;
722 asection *sec;
723 struct bfd_link_info *link_info;
724 bfd_boolean *again;
725 {
726 struct internal_reloc *internal_relocs;
727 bfd_boolean have_code;
728 struct internal_reloc *irel, *irelend;
729 bfd_byte *contents = NULL;
730
731 *again = FALSE;
732
733 if (link_info->relocatable
734 || (sec->flags & SEC_RELOC) == 0
735 || sec->reloc_count == 0)
736 return TRUE;
737
738 if (coff_section_data (abfd, sec) == NULL)
739 {
740 bfd_size_type amt = sizeof (struct coff_section_tdata);
741 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
742 if (sec->used_by_bfd == NULL)
743 return FALSE;
744 }
745
746 internal_relocs = (_bfd_coff_read_internal_relocs
747 (abfd, sec, link_info->keep_memory,
748 (bfd_byte *) NULL, FALSE,
749 (struct internal_reloc *) NULL));
750 if (internal_relocs == NULL)
751 goto error_return;
752
753 have_code = FALSE;
754
755 irelend = internal_relocs + sec->reloc_count;
756 for (irel = internal_relocs; irel < irelend; irel++)
757 {
758 bfd_vma laddr, paddr, symval;
759 unsigned short insn;
760 struct internal_reloc *irelfn, *irelscan, *irelcount;
761 struct internal_syment sym;
762 bfd_signed_vma foff;
763
764 if (irel->r_type == R_SH_CODE)
765 have_code = TRUE;
766
767 if (irel->r_type != R_SH_USES)
768 continue;
769
770 /* Get the section contents. */
771 if (contents == NULL)
772 {
773 if (coff_section_data (abfd, sec)->contents != NULL)
774 contents = coff_section_data (abfd, sec)->contents;
775 else
776 {
777 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
778 goto error_return;
779 }
780 }
781
782 /* The r_offset field of the R_SH_USES reloc will point us to
783 the register load. The 4 is because the r_offset field is
784 computed as though it were a jump offset, which are based
785 from 4 bytes after the jump instruction. */
786 laddr = irel->r_vaddr - sec->vma + 4;
787 /* Careful to sign extend the 32-bit offset. */
788 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
789 if (laddr >= sec->size)
790 {
791 (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
792 abfd, (unsigned long) irel->r_vaddr);
793 continue;
794 }
795 insn = bfd_get_16 (abfd, contents + laddr);
796
797 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
798 if ((insn & 0xf000) != 0xd000)
799 {
800 ((*_bfd_error_handler)
801 ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
802 abfd, (unsigned long) irel->r_vaddr, insn));
803 continue;
804 }
805
806 /* Get the address from which the register is being loaded. The
807 displacement in the mov.l instruction is quadrupled. It is a
808 displacement from four bytes after the movl instruction, but,
809 before adding in the PC address, two least significant bits
810 of the PC are cleared. We assume that the section is aligned
811 on a four byte boundary. */
812 paddr = insn & 0xff;
813 paddr *= 4;
814 paddr += (laddr + 4) &~ (bfd_vma) 3;
815 if (paddr >= sec->size)
816 {
817 ((*_bfd_error_handler)
818 ("%B: 0x%lx: warning: bad R_SH_USES load offset",
819 abfd, (unsigned long) irel->r_vaddr));
820 continue;
821 }
822
823 /* Get the reloc for the address from which the register is
824 being loaded. This reloc will tell us which function is
825 actually being called. */
826 paddr += sec->vma;
827 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
828 if (irelfn->r_vaddr == paddr
829 #ifdef COFF_WITH_PE
830 && (irelfn->r_type == R_SH_IMM32
831 || irelfn->r_type == R_SH_IMM32CE
832 || irelfn->r_type == R_SH_IMAGEBASE)
833
834 #else
835 && irelfn->r_type == R_SH_IMM32
836 #endif
837 )
838 break;
839 if (irelfn >= irelend)
840 {
841 ((*_bfd_error_handler)
842 ("%B: 0x%lx: warning: could not find expected reloc",
843 abfd, (unsigned long) paddr));
844 continue;
845 }
846
847 /* Get the value of the symbol referred to by the reloc. */
848 if (! _bfd_coff_get_external_symbols (abfd))
849 goto error_return;
850 bfd_coff_swap_sym_in (abfd,
851 ((bfd_byte *) obj_coff_external_syms (abfd)
852 + (irelfn->r_symndx
853 * bfd_coff_symesz (abfd))),
854 &sym);
855 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
856 {
857 ((*_bfd_error_handler)
858 ("%B: 0x%lx: warning: symbol in unexpected section",
859 abfd, (unsigned long) paddr));
860 continue;
861 }
862
863 if (sym.n_sclass != C_EXT)
864 {
865 symval = (sym.n_value
866 - sec->vma
867 + sec->output_section->vma
868 + sec->output_offset);
869 }
870 else
871 {
872 struct coff_link_hash_entry *h;
873
874 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
875 BFD_ASSERT (h != NULL);
876 if (h->root.type != bfd_link_hash_defined
877 && h->root.type != bfd_link_hash_defweak)
878 {
879 /* This appears to be a reference to an undefined
880 symbol. Just ignore it--it will be caught by the
881 regular reloc processing. */
882 continue;
883 }
884
885 symval = (h->root.u.def.value
886 + h->root.u.def.section->output_section->vma
887 + h->root.u.def.section->output_offset);
888 }
889
890 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
891
892 /* See if this function call can be shortened. */
893 foff = (symval
894 - (irel->r_vaddr
895 - sec->vma
896 + sec->output_section->vma
897 + sec->output_offset
898 + 4));
899 if (foff < -0x1000 || foff >= 0x1000)
900 {
901 /* After all that work, we can't shorten this function call. */
902 continue;
903 }
904
905 /* Shorten the function call. */
906
907 /* For simplicity of coding, we are going to modify the section
908 contents, the section relocs, and the BFD symbol table. We
909 must tell the rest of the code not to free up this
910 information. It would be possible to instead create a table
911 of changes which have to be made, as is done in coff-mips.c;
912 that would be more work, but would require less memory when
913 the linker is run. */
914
915 coff_section_data (abfd, sec)->relocs = internal_relocs;
916 coff_section_data (abfd, sec)->keep_relocs = TRUE;
917
918 coff_section_data (abfd, sec)->contents = contents;
919 coff_section_data (abfd, sec)->keep_contents = TRUE;
920
921 obj_coff_keep_syms (abfd) = TRUE;
922
923 /* Replace the jsr with a bsr. */
924
925 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
926 replace the jsr with a bsr. */
927 irel->r_type = R_SH_PCDISP;
928 irel->r_symndx = irelfn->r_symndx;
929 if (sym.n_sclass != C_EXT)
930 {
931 /* If this needs to be changed because of future relaxing,
932 it will be handled here like other internal PCDISP
933 relocs. */
934 bfd_put_16 (abfd,
935 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
936 contents + irel->r_vaddr - sec->vma);
937 }
938 else
939 {
940 /* We can't fully resolve this yet, because the external
941 symbol value may be changed by future relaxing. We let
942 the final link phase handle it. */
943 bfd_put_16 (abfd, (bfd_vma) 0xb000,
944 contents + irel->r_vaddr - sec->vma);
945 }
946
947 /* See if there is another R_SH_USES reloc referring to the same
948 register load. */
949 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
950 if (irelscan->r_type == R_SH_USES
951 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
952 break;
953 if (irelscan < irelend)
954 {
955 /* Some other function call depends upon this register load,
956 and we have not yet converted that function call.
957 Indeed, we may never be able to convert it. There is
958 nothing else we can do at this point. */
959 continue;
960 }
961
962 /* Look for a R_SH_COUNT reloc on the location where the
963 function address is stored. Do this before deleting any
964 bytes, to avoid confusion about the address. */
965 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
966 if (irelcount->r_vaddr == paddr
967 && irelcount->r_type == R_SH_COUNT)
968 break;
969
970 /* Delete the register load. */
971 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
972 goto error_return;
973
974 /* That will change things, so, just in case it permits some
975 other function call to come within range, we should relax
976 again. Note that this is not required, and it may be slow. */
977 *again = TRUE;
978
979 /* Now check whether we got a COUNT reloc. */
980 if (irelcount >= irelend)
981 {
982 ((*_bfd_error_handler)
983 ("%B: 0x%lx: warning: could not find expected COUNT reloc",
984 abfd, (unsigned long) paddr));
985 continue;
986 }
987
988 /* The number of uses is stored in the r_offset field. We've
989 just deleted one. */
990 if (irelcount->r_offset == 0)
991 {
992 ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
993 abfd, (unsigned long) paddr));
994 continue;
995 }
996
997 --irelcount->r_offset;
998
999 /* If there are no more uses, we can delete the address. Reload
1000 the address from irelfn, in case it was changed by the
1001 previous call to sh_relax_delete_bytes. */
1002 if (irelcount->r_offset == 0)
1003 {
1004 if (! sh_relax_delete_bytes (abfd, sec,
1005 irelfn->r_vaddr - sec->vma, 4))
1006 goto error_return;
1007 }
1008
1009 /* We've done all we can with that function call. */
1010 }
1011
1012 /* Look for load and store instructions that we can align on four
1013 byte boundaries. */
1014 if (have_code)
1015 {
1016 bfd_boolean swapped;
1017
1018 /* Get the section contents. */
1019 if (contents == NULL)
1020 {
1021 if (coff_section_data (abfd, sec)->contents != NULL)
1022 contents = coff_section_data (abfd, sec)->contents;
1023 else
1024 {
1025 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1026 goto error_return;
1027 }
1028 }
1029
1030 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1031 goto error_return;
1032
1033 if (swapped)
1034 {
1035 coff_section_data (abfd, sec)->relocs = internal_relocs;
1036 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1037
1038 coff_section_data (abfd, sec)->contents = contents;
1039 coff_section_data (abfd, sec)->keep_contents = TRUE;
1040
1041 obj_coff_keep_syms (abfd) = TRUE;
1042 }
1043 }
1044
1045 if (internal_relocs != NULL
1046 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1047 {
1048 if (! link_info->keep_memory)
1049 free (internal_relocs);
1050 else
1051 coff_section_data (abfd, sec)->relocs = internal_relocs;
1052 }
1053
1054 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1055 {
1056 if (! link_info->keep_memory)
1057 free (contents);
1058 else
1059 /* Cache the section contents for coff_link_input_bfd. */
1060 coff_section_data (abfd, sec)->contents = contents;
1061 }
1062
1063 return TRUE;
1064
1065 error_return:
1066 if (internal_relocs != NULL
1067 && internal_relocs != coff_section_data (abfd, sec)->relocs)
1068 free (internal_relocs);
1069 if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1070 free (contents);
1071 return FALSE;
1072 }
1073
1074 /* Delete some bytes from a section while relaxing. */
1075
1076 static bfd_boolean
1077 sh_relax_delete_bytes (abfd, sec, addr, count)
1078 bfd *abfd;
1079 asection *sec;
1080 bfd_vma addr;
1081 int count;
1082 {
1083 bfd_byte *contents;
1084 struct internal_reloc *irel, *irelend;
1085 struct internal_reloc *irelalign;
1086 bfd_vma toaddr;
1087 bfd_byte *esym, *esymend;
1088 bfd_size_type symesz;
1089 struct coff_link_hash_entry **sym_hash;
1090 asection *o;
1091
1092 contents = coff_section_data (abfd, sec)->contents;
1093
1094 /* The deletion must stop at the next ALIGN reloc for an aligment
1095 power larger than the number of bytes we are deleting. */
1096
1097 irelalign = NULL;
1098 toaddr = sec->size;
1099
1100 irel = coff_section_data (abfd, sec)->relocs;
1101 irelend = irel + sec->reloc_count;
1102 for (; irel < irelend; irel++)
1103 {
1104 if (irel->r_type == R_SH_ALIGN
1105 && irel->r_vaddr - sec->vma > addr
1106 && count < (1 << irel->r_offset))
1107 {
1108 irelalign = irel;
1109 toaddr = irel->r_vaddr - sec->vma;
1110 break;
1111 }
1112 }
1113
1114 /* Actually delete the bytes. */
1115 memmove (contents + addr, contents + addr + count,
1116 (size_t) (toaddr - addr - count));
1117 if (irelalign == NULL)
1118 sec->size -= count;
1119 else
1120 {
1121 int i;
1122
1123 #define NOP_OPCODE (0x0009)
1124
1125 BFD_ASSERT ((count & 1) == 0);
1126 for (i = 0; i < count; i += 2)
1127 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1128 }
1129
1130 /* Adjust all the relocs. */
1131 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1132 {
1133 bfd_vma nraddr, stop;
1134 bfd_vma start = 0;
1135 int insn = 0;
1136 struct internal_syment sym;
1137 int off, adjust, oinsn;
1138 bfd_signed_vma voff = 0;
1139 bfd_boolean overflow;
1140
1141 /* Get the new reloc address. */
1142 nraddr = irel->r_vaddr - sec->vma;
1143 if ((irel->r_vaddr - sec->vma > addr
1144 && irel->r_vaddr - sec->vma < toaddr)
1145 || (irel->r_type == R_SH_ALIGN
1146 && irel->r_vaddr - sec->vma == toaddr))
1147 nraddr -= count;
1148
1149 /* See if this reloc was for the bytes we have deleted, in which
1150 case we no longer care about it. Don't delete relocs which
1151 represent addresses, though. */
1152 if (irel->r_vaddr - sec->vma >= addr
1153 && irel->r_vaddr - sec->vma < addr + count
1154 && irel->r_type != R_SH_ALIGN
1155 && irel->r_type != R_SH_CODE
1156 && irel->r_type != R_SH_DATA
1157 && irel->r_type != R_SH_LABEL)
1158 irel->r_type = R_SH_UNUSED;
1159
1160 /* If this is a PC relative reloc, see if the range it covers
1161 includes the bytes we have deleted. */
1162 switch (irel->r_type)
1163 {
1164 default:
1165 break;
1166
1167 case R_SH_PCDISP8BY2:
1168 case R_SH_PCDISP:
1169 case R_SH_PCRELIMM8BY2:
1170 case R_SH_PCRELIMM8BY4:
1171 start = irel->r_vaddr - sec->vma;
1172 insn = bfd_get_16 (abfd, contents + nraddr);
1173 break;
1174 }
1175
1176 switch (irel->r_type)
1177 {
1178 default:
1179 start = stop = addr;
1180 break;
1181
1182 case R_SH_IMM32:
1183 #ifdef COFF_WITH_PE
1184 case R_SH_IMM32CE:
1185 case R_SH_IMAGEBASE:
1186 #endif
1187 /* If this reloc is against a symbol defined in this
1188 section, and the symbol will not be adjusted below, we
1189 must check the addend to see it will put the value in
1190 range to be adjusted, and hence must be changed. */
1191 bfd_coff_swap_sym_in (abfd,
1192 ((bfd_byte *) obj_coff_external_syms (abfd)
1193 + (irel->r_symndx
1194 * bfd_coff_symesz (abfd))),
1195 &sym);
1196 if (sym.n_sclass != C_EXT
1197 && sym.n_scnum == sec->target_index
1198 && ((bfd_vma) sym.n_value <= addr
1199 || (bfd_vma) sym.n_value >= toaddr))
1200 {
1201 bfd_vma val;
1202
1203 val = bfd_get_32 (abfd, contents + nraddr);
1204 val += sym.n_value;
1205 if (val > addr && val < toaddr)
1206 bfd_put_32 (abfd, val - count, contents + nraddr);
1207 }
1208 start = stop = addr;
1209 break;
1210
1211 case R_SH_PCDISP8BY2:
1212 off = insn & 0xff;
1213 if (off & 0x80)
1214 off -= 0x100;
1215 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1216 break;
1217
1218 case R_SH_PCDISP:
1219 bfd_coff_swap_sym_in (abfd,
1220 ((bfd_byte *) obj_coff_external_syms (abfd)
1221 + (irel->r_symndx
1222 * bfd_coff_symesz (abfd))),
1223 &sym);
1224 if (sym.n_sclass == C_EXT)
1225 start = stop = addr;
1226 else
1227 {
1228 off = insn & 0xfff;
1229 if (off & 0x800)
1230 off -= 0x1000;
1231 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1232 }
1233 break;
1234
1235 case R_SH_PCRELIMM8BY2:
1236 off = insn & 0xff;
1237 stop = start + 4 + off * 2;
1238 break;
1239
1240 case R_SH_PCRELIMM8BY4:
1241 off = insn & 0xff;
1242 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1243 break;
1244
1245 case R_SH_SWITCH8:
1246 case R_SH_SWITCH16:
1247 case R_SH_SWITCH32:
1248 /* These relocs types represent
1249 .word L2-L1
1250 The r_offset field holds the difference between the reloc
1251 address and L1. That is the start of the reloc, and
1252 adding in the contents gives us the top. We must adjust
1253 both the r_offset field and the section contents. */
1254
1255 start = irel->r_vaddr - sec->vma;
1256 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1257
1258 if (start > addr
1259 && start < toaddr
1260 && (stop <= addr || stop >= toaddr))
1261 irel->r_offset += count;
1262 else if (stop > addr
1263 && stop < toaddr
1264 && (start <= addr || start >= toaddr))
1265 irel->r_offset -= count;
1266
1267 start = stop;
1268
1269 if (irel->r_type == R_SH_SWITCH16)
1270 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1271 else if (irel->r_type == R_SH_SWITCH8)
1272 voff = bfd_get_8 (abfd, contents + nraddr);
1273 else
1274 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1275 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1276
1277 break;
1278
1279 case R_SH_USES:
1280 start = irel->r_vaddr - sec->vma;
1281 stop = (bfd_vma) ((bfd_signed_vma) start
1282 + (long) irel->r_offset
1283 + 4);
1284 break;
1285 }
1286
1287 if (start > addr
1288 && start < toaddr
1289 && (stop <= addr || stop >= toaddr))
1290 adjust = count;
1291 else if (stop > addr
1292 && stop < toaddr
1293 && (start <= addr || start >= toaddr))
1294 adjust = - count;
1295 else
1296 adjust = 0;
1297
1298 if (adjust != 0)
1299 {
1300 oinsn = insn;
1301 overflow = FALSE;
1302 switch (irel->r_type)
1303 {
1304 default:
1305 abort ();
1306 break;
1307
1308 case R_SH_PCDISP8BY2:
1309 case R_SH_PCRELIMM8BY2:
1310 insn += adjust / 2;
1311 if ((oinsn & 0xff00) != (insn & 0xff00))
1312 overflow = TRUE;
1313 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1314 break;
1315
1316 case R_SH_PCDISP:
1317 insn += adjust / 2;
1318 if ((oinsn & 0xf000) != (insn & 0xf000))
1319 overflow = TRUE;
1320 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1321 break;
1322
1323 case R_SH_PCRELIMM8BY4:
1324 BFD_ASSERT (adjust == count || count >= 4);
1325 if (count >= 4)
1326 insn += adjust / 4;
1327 else
1328 {
1329 if ((irel->r_vaddr & 3) == 0)
1330 ++insn;
1331 }
1332 if ((oinsn & 0xff00) != (insn & 0xff00))
1333 overflow = TRUE;
1334 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1335 break;
1336
1337 case R_SH_SWITCH8:
1338 voff += adjust;
1339 if (voff < 0 || voff >= 0xff)
1340 overflow = TRUE;
1341 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1342 break;
1343
1344 case R_SH_SWITCH16:
1345 voff += adjust;
1346 if (voff < - 0x8000 || voff >= 0x8000)
1347 overflow = TRUE;
1348 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1349 break;
1350
1351 case R_SH_SWITCH32:
1352 voff += adjust;
1353 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1354 break;
1355
1356 case R_SH_USES:
1357 irel->r_offset += adjust;
1358 break;
1359 }
1360
1361 if (overflow)
1362 {
1363 ((*_bfd_error_handler)
1364 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1365 abfd, (unsigned long) irel->r_vaddr));
1366 bfd_set_error (bfd_error_bad_value);
1367 return FALSE;
1368 }
1369 }
1370
1371 irel->r_vaddr = nraddr + sec->vma;
1372 }
1373
1374 /* Look through all the other sections. If there contain any IMM32
1375 relocs against internal symbols which we are not going to adjust
1376 below, we may need to adjust the addends. */
1377 for (o = abfd->sections; o != NULL; o = o->next)
1378 {
1379 struct internal_reloc *internal_relocs;
1380 struct internal_reloc *irelscan, *irelscanend;
1381 bfd_byte *ocontents;
1382
1383 if (o == sec
1384 || (o->flags & SEC_RELOC) == 0
1385 || o->reloc_count == 0)
1386 continue;
1387
1388 /* We always cache the relocs. Perhaps, if info->keep_memory is
1389 FALSE, we should free them, if we are permitted to, when we
1390 leave sh_coff_relax_section. */
1391 internal_relocs = (_bfd_coff_read_internal_relocs
1392 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1393 (struct internal_reloc *) NULL));
1394 if (internal_relocs == NULL)
1395 return FALSE;
1396
1397 ocontents = NULL;
1398 irelscanend = internal_relocs + o->reloc_count;
1399 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1400 {
1401 struct internal_syment sym;
1402
1403 #ifdef COFF_WITH_PE
1404 if (irelscan->r_type != R_SH_IMM32
1405 && irelscan->r_type != R_SH_IMAGEBASE
1406 && irelscan->r_type != R_SH_IMM32CE)
1407 #else
1408 if (irelscan->r_type != R_SH_IMM32)
1409 #endif
1410 continue;
1411
1412 bfd_coff_swap_sym_in (abfd,
1413 ((bfd_byte *) obj_coff_external_syms (abfd)
1414 + (irelscan->r_symndx
1415 * bfd_coff_symesz (abfd))),
1416 &sym);
1417 if (sym.n_sclass != C_EXT
1418 && sym.n_scnum == sec->target_index
1419 && ((bfd_vma) sym.n_value <= addr
1420 || (bfd_vma) sym.n_value >= toaddr))
1421 {
1422 bfd_vma val;
1423
1424 if (ocontents == NULL)
1425 {
1426 if (coff_section_data (abfd, o)->contents != NULL)
1427 ocontents = coff_section_data (abfd, o)->contents;
1428 else
1429 {
1430 if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1431 return FALSE;
1432 /* We always cache the section contents.
1433 Perhaps, if info->keep_memory is FALSE, we
1434 should free them, if we are permitted to,
1435 when we leave sh_coff_relax_section. */
1436 coff_section_data (abfd, o)->contents = ocontents;
1437 }
1438 }
1439
1440 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1441 val += sym.n_value;
1442 if (val > addr && val < toaddr)
1443 bfd_put_32 (abfd, val - count,
1444 ocontents + irelscan->r_vaddr - o->vma);
1445
1446 coff_section_data (abfd, o)->keep_contents = TRUE;
1447 }
1448 }
1449 }
1450
1451 /* Adjusting the internal symbols will not work if something has
1452 already retrieved the generic symbols. It would be possible to
1453 make this work by adjusting the generic symbols at the same time.
1454 However, this case should not arise in normal usage. */
1455 if (obj_symbols (abfd) != NULL
1456 || obj_raw_syments (abfd) != NULL)
1457 {
1458 ((*_bfd_error_handler)
1459 ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1460 bfd_set_error (bfd_error_invalid_operation);
1461 return FALSE;
1462 }
1463
1464 /* Adjust all the symbols. */
1465 sym_hash = obj_coff_sym_hashes (abfd);
1466 symesz = bfd_coff_symesz (abfd);
1467 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1468 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1469 while (esym < esymend)
1470 {
1471 struct internal_syment isym;
1472
1473 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1474
1475 if (isym.n_scnum == sec->target_index
1476 && (bfd_vma) isym.n_value > addr
1477 && (bfd_vma) isym.n_value < toaddr)
1478 {
1479 isym.n_value -= count;
1480
1481 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1482
1483 if (*sym_hash != NULL)
1484 {
1485 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1486 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1487 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1488 && (*sym_hash)->root.u.def.value < toaddr);
1489 (*sym_hash)->root.u.def.value -= count;
1490 }
1491 }
1492
1493 esym += (isym.n_numaux + 1) * symesz;
1494 sym_hash += isym.n_numaux + 1;
1495 }
1496
1497 /* See if we can move the ALIGN reloc forward. We have adjusted
1498 r_vaddr for it already. */
1499 if (irelalign != NULL)
1500 {
1501 bfd_vma alignto, alignaddr;
1502
1503 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1504 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1505 1 << irelalign->r_offset);
1506 if (alignto != alignaddr)
1507 {
1508 /* Tail recursion. */
1509 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1510 (int) (alignto - alignaddr));
1511 }
1512 }
1513
1514 return TRUE;
1515 }
1516 \f
1517 /* This is yet another version of the SH opcode table, used to rapidly
1518 get information about a particular instruction. */
1519
1520 /* The opcode map is represented by an array of these structures. The
1521 array is indexed by the high order four bits in the instruction. */
1522
1523 struct sh_major_opcode
1524 {
1525 /* A pointer to the instruction list. This is an array which
1526 contains all the instructions with this major opcode. */
1527 const struct sh_minor_opcode *minor_opcodes;
1528 /* The number of elements in minor_opcodes. */
1529 unsigned short count;
1530 };
1531
1532 /* This structure holds information for a set of SH opcodes. The
1533 instruction code is anded with the mask value, and the resulting
1534 value is used to search the order opcode list. */
1535
1536 struct sh_minor_opcode
1537 {
1538 /* The sorted opcode list. */
1539 const struct sh_opcode *opcodes;
1540 /* The number of elements in opcodes. */
1541 unsigned short count;
1542 /* The mask value to use when searching the opcode list. */
1543 unsigned short mask;
1544 };
1545
1546 /* This structure holds information for an SH instruction. An array
1547 of these structures is sorted in order by opcode. */
1548
1549 struct sh_opcode
1550 {
1551 /* The code for this instruction, after it has been anded with the
1552 mask value in the sh_major_opcode structure. */
1553 unsigned short opcode;
1554 /* Flags for this instruction. */
1555 unsigned long flags;
1556 };
1557
1558 /* Flag which appear in the sh_opcode structure. */
1559
1560 /* This instruction loads a value from memory. */
1561 #define LOAD (0x1)
1562
1563 /* This instruction stores a value to memory. */
1564 #define STORE (0x2)
1565
1566 /* This instruction is a branch. */
1567 #define BRANCH (0x4)
1568
1569 /* This instruction has a delay slot. */
1570 #define DELAY (0x8)
1571
1572 /* This instruction uses the value in the register in the field at
1573 mask 0x0f00 of the instruction. */
1574 #define USES1 (0x10)
1575 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1576
1577 /* This instruction uses the value in the register in the field at
1578 mask 0x00f0 of the instruction. */
1579 #define USES2 (0x20)
1580 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1581
1582 /* This instruction uses the value in register 0. */
1583 #define USESR0 (0x40)
1584
1585 /* This instruction sets the value in the register in the field at
1586 mask 0x0f00 of the instruction. */
1587 #define SETS1 (0x80)
1588 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1589
1590 /* This instruction sets the value in the register in the field at
1591 mask 0x00f0 of the instruction. */
1592 #define SETS2 (0x100)
1593 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1594
1595 /* This instruction sets register 0. */
1596 #define SETSR0 (0x200)
1597
1598 /* This instruction sets a special register. */
1599 #define SETSSP (0x400)
1600
1601 /* This instruction uses a special register. */
1602 #define USESSP (0x800)
1603
1604 /* This instruction uses the floating point register in the field at
1605 mask 0x0f00 of the instruction. */
1606 #define USESF1 (0x1000)
1607 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1608
1609 /* This instruction uses the floating point register in the field at
1610 mask 0x00f0 of the instruction. */
1611 #define USESF2 (0x2000)
1612 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1613
1614 /* This instruction uses floating point register 0. */
1615 #define USESF0 (0x4000)
1616
1617 /* This instruction sets the floating point register in the field at
1618 mask 0x0f00 of the instruction. */
1619 #define SETSF1 (0x8000)
1620 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1621
1622 #define USESAS (0x10000)
1623 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1624 #define USESR8 (0x20000)
1625 #define SETSAS (0x40000)
1626 #define SETSAS_REG(x) USESAS_REG (x)
1627
1628 #define MAP(a) a, sizeof a / sizeof a[0]
1629
1630 #ifndef COFF_IMAGE_WITH_PE
1631 static bfd_boolean sh_insn_uses_reg
1632 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1633 static bfd_boolean sh_insn_sets_reg
1634 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1635 static bfd_boolean sh_insn_uses_or_sets_reg
1636 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1637 static bfd_boolean sh_insn_uses_freg
1638 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1639 static bfd_boolean sh_insn_sets_freg
1640 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1641 static bfd_boolean sh_insn_uses_or_sets_freg
1642 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1643 static bfd_boolean sh_insns_conflict
1644 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1645 const struct sh_opcode *));
1646 static bfd_boolean sh_load_use
1647 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1648 const struct sh_opcode *));
1649
1650 /* The opcode maps. */
1651
1652 static const struct sh_opcode sh_opcode00[] =
1653 {
1654 { 0x0008, SETSSP }, /* clrt */
1655 { 0x0009, 0 }, /* nop */
1656 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1657 { 0x0018, SETSSP }, /* sett */
1658 { 0x0019, SETSSP }, /* div0u */
1659 { 0x001b, 0 }, /* sleep */
1660 { 0x0028, SETSSP }, /* clrmac */
1661 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1662 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1663 { 0x0048, SETSSP }, /* clrs */
1664 { 0x0058, SETSSP } /* sets */
1665 };
1666
1667 static const struct sh_opcode sh_opcode01[] =
1668 {
1669 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1670 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1671 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1672 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1673 { 0x0029, SETS1 | USESSP }, /* movt rn */
1674 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1675 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1676 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1677 { 0x0083, LOAD | USES1 }, /* pref @rn */
1678 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1679 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1680 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1681 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1682 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1683 };
1684
1685 static const struct sh_opcode sh_opcode02[] =
1686 {
1687 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1688 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1689 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1690 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1691 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1692 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1693 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1694 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1695 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1696 };
1697
1698 static const struct sh_minor_opcode sh_opcode0[] =
1699 {
1700 { MAP (sh_opcode00), 0xffff },
1701 { MAP (sh_opcode01), 0xf0ff },
1702 { MAP (sh_opcode02), 0xf00f }
1703 };
1704
1705 static const struct sh_opcode sh_opcode10[] =
1706 {
1707 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1708 };
1709
1710 static const struct sh_minor_opcode sh_opcode1[] =
1711 {
1712 { MAP (sh_opcode10), 0xf000 }
1713 };
1714
1715 static const struct sh_opcode sh_opcode20[] =
1716 {
1717 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1718 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1719 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1720 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1721 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1722 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1723 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1724 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1725 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1726 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1727 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1728 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1729 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1730 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1731 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1732 };
1733
1734 static const struct sh_minor_opcode sh_opcode2[] =
1735 {
1736 { MAP (sh_opcode20), 0xf00f }
1737 };
1738
1739 static const struct sh_opcode sh_opcode30[] =
1740 {
1741 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1742 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1743 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1744 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1745 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1746 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1747 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1748 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1749 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1750 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1751 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1752 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1753 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1754 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1755 };
1756
1757 static const struct sh_minor_opcode sh_opcode3[] =
1758 {
1759 { MAP (sh_opcode30), 0xf00f }
1760 };
1761
1762 static const struct sh_opcode sh_opcode40[] =
1763 {
1764 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1765 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1766 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1767 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1768 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1769 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1770 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1771 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1772 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1773 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1774 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1775 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1776 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1777 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1778 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1779 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1780 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1781 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1782 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1783 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1784 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1785 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1786 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1787 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1788 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1789 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1790 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1791 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1792 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1793 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1794 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1795 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1796 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1797 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1798 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1799 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1800 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1801 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1802 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1803 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1804 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1805 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1806 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1807 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1808 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1809 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1810 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1811 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1812 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1813 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1814 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1815 };
1816
1817 static const struct sh_opcode sh_opcode41[] =
1818 {
1819 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1820 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1821 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1822 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1823 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1824 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1825 };
1826
1827 static const struct sh_minor_opcode sh_opcode4[] =
1828 {
1829 { MAP (sh_opcode40), 0xf0ff },
1830 { MAP (sh_opcode41), 0xf00f }
1831 };
1832
1833 static const struct sh_opcode sh_opcode50[] =
1834 {
1835 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1836 };
1837
1838 static const struct sh_minor_opcode sh_opcode5[] =
1839 {
1840 { MAP (sh_opcode50), 0xf000 }
1841 };
1842
1843 static const struct sh_opcode sh_opcode60[] =
1844 {
1845 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1846 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1847 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1848 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1849 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1850 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1851 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1852 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1853 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1854 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1855 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1856 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1857 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1858 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1859 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1860 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1861 };
1862
1863 static const struct sh_minor_opcode sh_opcode6[] =
1864 {
1865 { MAP (sh_opcode60), 0xf00f }
1866 };
1867
1868 static const struct sh_opcode sh_opcode70[] =
1869 {
1870 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1871 };
1872
1873 static const struct sh_minor_opcode sh_opcode7[] =
1874 {
1875 { MAP (sh_opcode70), 0xf000 }
1876 };
1877
1878 static const struct sh_opcode sh_opcode80[] =
1879 {
1880 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1881 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1882 { 0x8200, SETSSP }, /* setrc #imm */
1883 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1884 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1885 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1886 { 0x8900, BRANCH | USESSP }, /* bt label */
1887 { 0x8b00, BRANCH | USESSP }, /* bf label */
1888 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1889 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1890 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1891 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1892 };
1893
1894 static const struct sh_minor_opcode sh_opcode8[] =
1895 {
1896 { MAP (sh_opcode80), 0xff00 }
1897 };
1898
1899 static const struct sh_opcode sh_opcode90[] =
1900 {
1901 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1902 };
1903
1904 static const struct sh_minor_opcode sh_opcode9[] =
1905 {
1906 { MAP (sh_opcode90), 0xf000 }
1907 };
1908
1909 static const struct sh_opcode sh_opcodea0[] =
1910 {
1911 { 0xa000, BRANCH | DELAY } /* bra label */
1912 };
1913
1914 static const struct sh_minor_opcode sh_opcodea[] =
1915 {
1916 { MAP (sh_opcodea0), 0xf000 }
1917 };
1918
1919 static const struct sh_opcode sh_opcodeb0[] =
1920 {
1921 { 0xb000, BRANCH | DELAY } /* bsr label */
1922 };
1923
1924 static const struct sh_minor_opcode sh_opcodeb[] =
1925 {
1926 { MAP (sh_opcodeb0), 0xf000 }
1927 };
1928
1929 static const struct sh_opcode sh_opcodec0[] =
1930 {
1931 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
1932 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
1933 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
1934 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
1935 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
1936 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
1937 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
1938 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
1939 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
1940 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
1941 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
1942 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
1943 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
1944 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
1945 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
1946 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
1947 };
1948
1949 static const struct sh_minor_opcode sh_opcodec[] =
1950 {
1951 { MAP (sh_opcodec0), 0xff00 }
1952 };
1953
1954 static const struct sh_opcode sh_opcoded0[] =
1955 {
1956 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
1957 };
1958
1959 static const struct sh_minor_opcode sh_opcoded[] =
1960 {
1961 { MAP (sh_opcoded0), 0xf000 }
1962 };
1963
1964 static const struct sh_opcode sh_opcodee0[] =
1965 {
1966 { 0xe000, SETS1 } /* mov #imm,rn */
1967 };
1968
1969 static const struct sh_minor_opcode sh_opcodee[] =
1970 {
1971 { MAP (sh_opcodee0), 0xf000 }
1972 };
1973
1974 static const struct sh_opcode sh_opcodef0[] =
1975 {
1976 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
1977 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
1978 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
1979 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
1980 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
1981 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
1982 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
1983 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
1984 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
1985 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
1986 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
1987 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
1988 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
1989 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
1990 };
1991
1992 static const struct sh_opcode sh_opcodef1[] =
1993 {
1994 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
1995 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
1996 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
1997 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
1998 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
1999 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2000 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2001 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2002 { 0xf08d, SETSF1 }, /* fldi0 fn */
2003 { 0xf09d, SETSF1 } /* fldi1 fn */
2004 };
2005
2006 static const struct sh_minor_opcode sh_opcodef[] =
2007 {
2008 { MAP (sh_opcodef0), 0xf00f },
2009 { MAP (sh_opcodef1), 0xf0ff }
2010 };
2011
2012 static struct sh_major_opcode sh_opcodes[] =
2013 {
2014 { MAP (sh_opcode0) },
2015 { MAP (sh_opcode1) },
2016 { MAP (sh_opcode2) },
2017 { MAP (sh_opcode3) },
2018 { MAP (sh_opcode4) },
2019 { MAP (sh_opcode5) },
2020 { MAP (sh_opcode6) },
2021 { MAP (sh_opcode7) },
2022 { MAP (sh_opcode8) },
2023 { MAP (sh_opcode9) },
2024 { MAP (sh_opcodea) },
2025 { MAP (sh_opcodeb) },
2026 { MAP (sh_opcodec) },
2027 { MAP (sh_opcoded) },
2028 { MAP (sh_opcodee) },
2029 { MAP (sh_opcodef) }
2030 };
2031
2032 /* The double data transfer / parallel processing insns are not
2033 described here. This will cause sh_align_load_span to leave them alone. */
2034
2035 static const struct sh_opcode sh_dsp_opcodef0[] =
2036 {
2037 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2038 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2039 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2040 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2041 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2042 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2043 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2044 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2045 };
2046
2047 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2048 {
2049 { MAP (sh_dsp_opcodef0), 0xfc0d }
2050 };
2051
2052 /* Given an instruction, return a pointer to the corresponding
2053 sh_opcode structure. Return NULL if the instruction is not
2054 recognized. */
2055
2056 static const struct sh_opcode *
2057 sh_insn_info (insn)
2058 unsigned int insn;
2059 {
2060 const struct sh_major_opcode *maj;
2061 const struct sh_minor_opcode *min, *minend;
2062
2063 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2064 min = maj->minor_opcodes;
2065 minend = min + maj->count;
2066 for (; min < minend; min++)
2067 {
2068 unsigned int l;
2069 const struct sh_opcode *op, *opend;
2070
2071 l = insn & min->mask;
2072 op = min->opcodes;
2073 opend = op + min->count;
2074
2075 /* Since the opcodes tables are sorted, we could use a binary
2076 search here if the count were above some cutoff value. */
2077 for (; op < opend; op++)
2078 if (op->opcode == l)
2079 return op;
2080 }
2081
2082 return NULL;
2083 }
2084
2085 /* See whether an instruction uses or sets a general purpose register */
2086
2087 static bfd_boolean
2088 sh_insn_uses_or_sets_reg (insn, op, reg)
2089 unsigned int insn;
2090 const struct sh_opcode *op;
2091 unsigned int reg;
2092 {
2093 if (sh_insn_uses_reg (insn, op, reg))
2094 return TRUE;
2095
2096 return sh_insn_sets_reg (insn, op, reg);
2097 }
2098
2099 /* See whether an instruction uses a general purpose register. */
2100
2101 static bfd_boolean
2102 sh_insn_uses_reg (insn, op, reg)
2103 unsigned int insn;
2104 const struct sh_opcode *op;
2105 unsigned int reg;
2106 {
2107 unsigned int f;
2108
2109 f = op->flags;
2110
2111 if ((f & USES1) != 0
2112 && USES1_REG (insn) == reg)
2113 return TRUE;
2114 if ((f & USES2) != 0
2115 && USES2_REG (insn) == reg)
2116 return TRUE;
2117 if ((f & USESR0) != 0
2118 && reg == 0)
2119 return TRUE;
2120 if ((f & USESAS) && reg == USESAS_REG (insn))
2121 return TRUE;
2122 if ((f & USESR8) && reg == 8)
2123 return TRUE;
2124
2125 return FALSE;
2126 }
2127
2128 /* See whether an instruction sets a general purpose register. */
2129
2130 static bfd_boolean
2131 sh_insn_sets_reg (insn, op, reg)
2132 unsigned int insn;
2133 const struct sh_opcode *op;
2134 unsigned int reg;
2135 {
2136 unsigned int f;
2137
2138 f = op->flags;
2139
2140 if ((f & SETS1) != 0
2141 && SETS1_REG (insn) == reg)
2142 return TRUE;
2143 if ((f & SETS2) != 0
2144 && SETS2_REG (insn) == reg)
2145 return TRUE;
2146 if ((f & SETSR0) != 0
2147 && reg == 0)
2148 return TRUE;
2149 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2150 return TRUE;
2151
2152 return FALSE;
2153 }
2154
2155 /* See whether an instruction uses or sets a floating point register */
2156
2157 static bfd_boolean
2158 sh_insn_uses_or_sets_freg (insn, op, reg)
2159 unsigned int insn;
2160 const struct sh_opcode *op;
2161 unsigned int reg;
2162 {
2163 if (sh_insn_uses_freg (insn, op, reg))
2164 return TRUE;
2165
2166 return sh_insn_sets_freg (insn, op, reg);
2167 }
2168
2169 /* See whether an instruction uses a floating point register. */
2170
2171 static bfd_boolean
2172 sh_insn_uses_freg (insn, op, freg)
2173 unsigned int insn;
2174 const struct sh_opcode *op;
2175 unsigned int freg;
2176 {
2177 unsigned int f;
2178
2179 f = op->flags;
2180
2181 /* We can't tell if this is a double-precision insn, so just play safe
2182 and assume that it might be. So not only have we test FREG against
2183 itself, but also even FREG against FREG+1 - if the using insn uses
2184 just the low part of a double precision value - but also an odd
2185 FREG against FREG-1 - if the setting insn sets just the low part
2186 of a double precision value.
2187 So what this all boils down to is that we have to ignore the lowest
2188 bit of the register number. */
2189
2190 if ((f & USESF1) != 0
2191 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2192 return TRUE;
2193 if ((f & USESF2) != 0
2194 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2195 return TRUE;
2196 if ((f & USESF0) != 0
2197 && freg == 0)
2198 return TRUE;
2199
2200 return FALSE;
2201 }
2202
2203 /* See whether an instruction sets a floating point register. */
2204
2205 static bfd_boolean
2206 sh_insn_sets_freg (insn, op, freg)
2207 unsigned int insn;
2208 const struct sh_opcode *op;
2209 unsigned int freg;
2210 {
2211 unsigned int f;
2212
2213 f = op->flags;
2214
2215 /* We can't tell if this is a double-precision insn, so just play safe
2216 and assume that it might be. So not only have we test FREG against
2217 itself, but also even FREG against FREG+1 - if the using insn uses
2218 just the low part of a double precision value - but also an odd
2219 FREG against FREG-1 - if the setting insn sets just the low part
2220 of a double precision value.
2221 So what this all boils down to is that we have to ignore the lowest
2222 bit of the register number. */
2223
2224 if ((f & SETSF1) != 0
2225 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2226 return TRUE;
2227
2228 return FALSE;
2229 }
2230
2231 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2232 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2233 This should return TRUE if there is a conflict, or FALSE if the
2234 instructions can be swapped safely. */
2235
2236 static bfd_boolean
2237 sh_insns_conflict (i1, op1, i2, op2)
2238 unsigned int i1;
2239 const struct sh_opcode *op1;
2240 unsigned int i2;
2241 const struct sh_opcode *op2;
2242 {
2243 unsigned int f1, f2;
2244
2245 f1 = op1->flags;
2246 f2 = op2->flags;
2247
2248 /* Load of fpscr conflicts with floating point operations.
2249 FIXME: shouldn't test raw opcodes here. */
2250 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2251 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2252 return TRUE;
2253
2254 if ((f1 & (BRANCH | DELAY)) != 0
2255 || (f2 & (BRANCH | DELAY)) != 0)
2256 return TRUE;
2257
2258 if (((f1 | f2) & SETSSP)
2259 && (f1 & (SETSSP | USESSP))
2260 && (f2 & (SETSSP | USESSP)))
2261 return TRUE;
2262
2263 if ((f1 & SETS1) != 0
2264 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2265 return TRUE;
2266 if ((f1 & SETS2) != 0
2267 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2268 return TRUE;
2269 if ((f1 & SETSR0) != 0
2270 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2271 return TRUE;
2272 if ((f1 & SETSAS)
2273 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2274 return TRUE;
2275 if ((f1 & SETSF1) != 0
2276 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2277 return TRUE;
2278
2279 if ((f2 & SETS1) != 0
2280 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2281 return TRUE;
2282 if ((f2 & SETS2) != 0
2283 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2284 return TRUE;
2285 if ((f2 & SETSR0) != 0
2286 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2287 return TRUE;
2288 if ((f2 & SETSAS)
2289 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2290 return TRUE;
2291 if ((f2 & SETSF1) != 0
2292 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2293 return TRUE;
2294
2295 /* The instructions do not conflict. */
2296 return FALSE;
2297 }
2298
2299 /* I1 is a load instruction, and I2 is some other instruction. Return
2300 TRUE if I1 loads a register which I2 uses. */
2301
2302 static bfd_boolean
2303 sh_load_use (i1, op1, i2, op2)
2304 unsigned int i1;
2305 const struct sh_opcode *op1;
2306 unsigned int i2;
2307 const struct sh_opcode *op2;
2308 {
2309 unsigned int f1;
2310
2311 f1 = op1->flags;
2312
2313 if ((f1 & LOAD) == 0)
2314 return FALSE;
2315
2316 /* If both SETS1 and SETSSP are set, that means a load to a special
2317 register using postincrement addressing mode, which we don't care
2318 about here. */
2319 if ((f1 & SETS1) != 0
2320 && (f1 & SETSSP) == 0
2321 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2322 return TRUE;
2323
2324 if ((f1 & SETSR0) != 0
2325 && sh_insn_uses_reg (i2, op2, 0))
2326 return TRUE;
2327
2328 if ((f1 & SETSF1) != 0
2329 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2330 return TRUE;
2331
2332 return FALSE;
2333 }
2334
2335 /* Try to align loads and stores within a span of memory. This is
2336 called by both the ELF and the COFF sh targets. ABFD and SEC are
2337 the BFD and section we are examining. CONTENTS is the contents of
2338 the section. SWAP is the routine to call to swap two instructions.
2339 RELOCS is a pointer to the internal relocation information, to be
2340 passed to SWAP. PLABEL is a pointer to the current label in a
2341 sorted list of labels; LABEL_END is the end of the list. START and
2342 STOP are the range of memory to examine. If a swap is made,
2343 *PSWAPPED is set to TRUE. */
2344
2345 #ifdef COFF_WITH_PE
2346 static
2347 #endif
2348 bfd_boolean
2349 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2350 plabel, label_end, start, stop, pswapped)
2351 bfd *abfd;
2352 asection *sec;
2353 bfd_byte *contents;
2354 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2355 PTR relocs;
2356 bfd_vma **plabel;
2357 bfd_vma *label_end;
2358 bfd_vma start;
2359 bfd_vma stop;
2360 bfd_boolean *pswapped;
2361 {
2362 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2363 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2364 bfd_vma i;
2365
2366 /* The SH4 has a Harvard architecture, hence aligning loads is not
2367 desirable. In fact, it is counter-productive, since it interferes
2368 with the schedules generated by the compiler. */
2369 if (abfd->arch_info->mach == bfd_mach_sh4)
2370 return TRUE;
2371
2372 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2373 instructions. */
2374 if (dsp)
2375 {
2376 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2377 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2378 }
2379
2380 /* Instructions should be aligned on 2 byte boundaries. */
2381 if ((start & 1) == 1)
2382 ++start;
2383
2384 /* Now look through the unaligned addresses. */
2385 i = start;
2386 if ((i & 2) == 0)
2387 i += 2;
2388 for (; i < stop; i += 4)
2389 {
2390 unsigned int insn;
2391 const struct sh_opcode *op;
2392 unsigned int prev_insn = 0;
2393 const struct sh_opcode *prev_op = NULL;
2394
2395 insn = bfd_get_16 (abfd, contents + i);
2396 op = sh_insn_info (insn);
2397 if (op == NULL
2398 || (op->flags & (LOAD | STORE)) == 0)
2399 continue;
2400
2401 /* This is a load or store which is not on a four byte boundary. */
2402
2403 while (*plabel < label_end && **plabel < i)
2404 ++*plabel;
2405
2406 if (i > start)
2407 {
2408 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2409 /* If INSN is the field b of a parallel processing insn, it is not
2410 a load / store after all. Note that the test here might mistake
2411 the field_b of a pcopy insn for the starting code of a parallel
2412 processing insn; this might miss a swapping opportunity, but at
2413 least we're on the safe side. */
2414 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2415 continue;
2416
2417 /* Check if prev_insn is actually the field b of a parallel
2418 processing insn. Again, this can give a spurious match
2419 after a pcopy. */
2420 if (dsp && i - 2 > start)
2421 {
2422 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2423
2424 if ((pprev_insn & 0xfc00) == 0xf800)
2425 prev_op = NULL;
2426 else
2427 prev_op = sh_insn_info (prev_insn);
2428 }
2429 else
2430 prev_op = sh_insn_info (prev_insn);
2431
2432 /* If the load/store instruction is in a delay slot, we
2433 can't swap. */
2434 if (prev_op == NULL
2435 || (prev_op->flags & DELAY) != 0)
2436 continue;
2437 }
2438 if (i > start
2439 && (*plabel >= label_end || **plabel != i)
2440 && prev_op != NULL
2441 && (prev_op->flags & (LOAD | STORE)) == 0
2442 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2443 {
2444 bfd_boolean ok;
2445
2446 /* The load/store instruction does not have a label, and
2447 there is a previous instruction; PREV_INSN is not
2448 itself a load/store instruction, and PREV_INSN and
2449 INSN do not conflict. */
2450
2451 ok = TRUE;
2452
2453 if (i >= start + 4)
2454 {
2455 unsigned int prev2_insn;
2456 const struct sh_opcode *prev2_op;
2457
2458 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2459 prev2_op = sh_insn_info (prev2_insn);
2460
2461 /* If the instruction before PREV_INSN has a delay
2462 slot--that is, PREV_INSN is in a delay slot--we
2463 can not swap. */
2464 if (prev2_op == NULL
2465 || (prev2_op->flags & DELAY) != 0)
2466 ok = FALSE;
2467
2468 /* If the instruction before PREV_INSN is a load,
2469 and it sets a register which INSN uses, then
2470 putting INSN immediately after PREV_INSN will
2471 cause a pipeline bubble, so there is no point to
2472 making the swap. */
2473 if (ok
2474 && (prev2_op->flags & LOAD) != 0
2475 && sh_load_use (prev2_insn, prev2_op, insn, op))
2476 ok = FALSE;
2477 }
2478
2479 if (ok)
2480 {
2481 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2482 return FALSE;
2483 *pswapped = TRUE;
2484 continue;
2485 }
2486 }
2487
2488 while (*plabel < label_end && **plabel < i + 2)
2489 ++*plabel;
2490
2491 if (i + 2 < stop
2492 && (*plabel >= label_end || **plabel != i + 2))
2493 {
2494 unsigned int next_insn;
2495 const struct sh_opcode *next_op;
2496
2497 /* There is an instruction after the load/store
2498 instruction, and it does not have a label. */
2499 next_insn = bfd_get_16 (abfd, contents + i + 2);
2500 next_op = sh_insn_info (next_insn);
2501 if (next_op != NULL
2502 && (next_op->flags & (LOAD | STORE)) == 0
2503 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2504 {
2505 bfd_boolean ok;
2506
2507 /* NEXT_INSN is not itself a load/store instruction,
2508 and it does not conflict with INSN. */
2509
2510 ok = TRUE;
2511
2512 /* If PREV_INSN is a load, and it sets a register
2513 which NEXT_INSN uses, then putting NEXT_INSN
2514 immediately after PREV_INSN will cause a pipeline
2515 bubble, so there is no reason to make this swap. */
2516 if (prev_op != NULL
2517 && (prev_op->flags & LOAD) != 0
2518 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2519 ok = FALSE;
2520
2521 /* If INSN is a load, and it sets a register which
2522 the insn after NEXT_INSN uses, then doing the
2523 swap will cause a pipeline bubble, so there is no
2524 reason to make the swap. However, if the insn
2525 after NEXT_INSN is itself a load or store
2526 instruction, then it is misaligned, so
2527 optimistically hope that it will be swapped
2528 itself, and just live with the pipeline bubble if
2529 it isn't. */
2530 if (ok
2531 && i + 4 < stop
2532 && (op->flags & LOAD) != 0)
2533 {
2534 unsigned int next2_insn;
2535 const struct sh_opcode *next2_op;
2536
2537 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2538 next2_op = sh_insn_info (next2_insn);
2539 if (next2_op == NULL
2540 || ((next2_op->flags & (LOAD | STORE)) == 0
2541 && sh_load_use (insn, op, next2_insn, next2_op)))
2542 ok = FALSE;
2543 }
2544
2545 if (ok)
2546 {
2547 if (! (*swap) (abfd, sec, relocs, contents, i))
2548 return FALSE;
2549 *pswapped = TRUE;
2550 continue;
2551 }
2552 }
2553 }
2554 }
2555
2556 return TRUE;
2557 }
2558 #endif /* not COFF_IMAGE_WITH_PE */
2559
2560 /* Look for loads and stores which we can align to four byte
2561 boundaries. See the longer comment above sh_relax_section for why
2562 this is desirable. This sets *PSWAPPED if some instruction was
2563 swapped. */
2564
2565 static bfd_boolean
2566 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2567 bfd *abfd;
2568 asection *sec;
2569 struct internal_reloc *internal_relocs;
2570 bfd_byte *contents;
2571 bfd_boolean *pswapped;
2572 {
2573 struct internal_reloc *irel, *irelend;
2574 bfd_vma *labels = NULL;
2575 bfd_vma *label, *label_end;
2576 bfd_size_type amt;
2577
2578 *pswapped = FALSE;
2579
2580 irelend = internal_relocs + sec->reloc_count;
2581
2582 /* Get all the addresses with labels on them. */
2583 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2584 labels = (bfd_vma *) bfd_malloc (amt);
2585 if (labels == NULL)
2586 goto error_return;
2587 label_end = labels;
2588 for (irel = internal_relocs; irel < irelend; irel++)
2589 {
2590 if (irel->r_type == R_SH_LABEL)
2591 {
2592 *label_end = irel->r_vaddr - sec->vma;
2593 ++label_end;
2594 }
2595 }
2596
2597 /* Note that the assembler currently always outputs relocs in
2598 address order. If that ever changes, this code will need to sort
2599 the label values and the relocs. */
2600
2601 label = labels;
2602
2603 for (irel = internal_relocs; irel < irelend; irel++)
2604 {
2605 bfd_vma start, stop;
2606
2607 if (irel->r_type != R_SH_CODE)
2608 continue;
2609
2610 start = irel->r_vaddr - sec->vma;
2611
2612 for (irel++; irel < irelend; irel++)
2613 if (irel->r_type == R_SH_DATA)
2614 break;
2615 if (irel < irelend)
2616 stop = irel->r_vaddr - sec->vma;
2617 else
2618 stop = sec->size;
2619
2620 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2621 (PTR) internal_relocs, &label,
2622 label_end, start, stop, pswapped))
2623 goto error_return;
2624 }
2625
2626 free (labels);
2627
2628 return TRUE;
2629
2630 error_return:
2631 if (labels != NULL)
2632 free (labels);
2633 return FALSE;
2634 }
2635
2636 /* Swap two SH instructions. */
2637
2638 static bfd_boolean
2639 sh_swap_insns (abfd, sec, relocs, contents, addr)
2640 bfd *abfd;
2641 asection *sec;
2642 PTR relocs;
2643 bfd_byte *contents;
2644 bfd_vma addr;
2645 {
2646 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2647 unsigned short i1, i2;
2648 struct internal_reloc *irel, *irelend;
2649
2650 /* Swap the instructions themselves. */
2651 i1 = bfd_get_16 (abfd, contents + addr);
2652 i2 = bfd_get_16 (abfd, contents + addr + 2);
2653 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2654 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2655
2656 /* Adjust all reloc addresses. */
2657 irelend = internal_relocs + sec->reloc_count;
2658 for (irel = internal_relocs; irel < irelend; irel++)
2659 {
2660 int type, add;
2661
2662 /* There are a few special types of relocs that we don't want to
2663 adjust. These relocs do not apply to the instruction itself,
2664 but are only associated with the address. */
2665 type = irel->r_type;
2666 if (type == R_SH_ALIGN
2667 || type == R_SH_CODE
2668 || type == R_SH_DATA
2669 || type == R_SH_LABEL)
2670 continue;
2671
2672 /* If an R_SH_USES reloc points to one of the addresses being
2673 swapped, we must adjust it. It would be incorrect to do this
2674 for a jump, though, since we want to execute both
2675 instructions after the jump. (We have avoided swapping
2676 around a label, so the jump will not wind up executing an
2677 instruction it shouldn't). */
2678 if (type == R_SH_USES)
2679 {
2680 bfd_vma off;
2681
2682 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2683 if (off == addr)
2684 irel->r_offset += 2;
2685 else if (off == addr + 2)
2686 irel->r_offset -= 2;
2687 }
2688
2689 if (irel->r_vaddr - sec->vma == addr)
2690 {
2691 irel->r_vaddr += 2;
2692 add = -2;
2693 }
2694 else if (irel->r_vaddr - sec->vma == addr + 2)
2695 {
2696 irel->r_vaddr -= 2;
2697 add = 2;
2698 }
2699 else
2700 add = 0;
2701
2702 if (add != 0)
2703 {
2704 bfd_byte *loc;
2705 unsigned short insn, oinsn;
2706 bfd_boolean overflow;
2707
2708 loc = contents + irel->r_vaddr - sec->vma;
2709 overflow = FALSE;
2710 switch (type)
2711 {
2712 default:
2713 break;
2714
2715 case R_SH_PCDISP8BY2:
2716 case R_SH_PCRELIMM8BY2:
2717 insn = bfd_get_16 (abfd, loc);
2718 oinsn = insn;
2719 insn += add / 2;
2720 if ((oinsn & 0xff00) != (insn & 0xff00))
2721 overflow = TRUE;
2722 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2723 break;
2724
2725 case R_SH_PCDISP:
2726 insn = bfd_get_16 (abfd, loc);
2727 oinsn = insn;
2728 insn += add / 2;
2729 if ((oinsn & 0xf000) != (insn & 0xf000))
2730 overflow = TRUE;
2731 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2732 break;
2733
2734 case R_SH_PCRELIMM8BY4:
2735 /* This reloc ignores the least significant 3 bits of
2736 the program counter before adding in the offset.
2737 This means that if ADDR is at an even address, the
2738 swap will not affect the offset. If ADDR is an at an
2739 odd address, then the instruction will be crossing a
2740 four byte boundary, and must be adjusted. */
2741 if ((addr & 3) != 0)
2742 {
2743 insn = bfd_get_16 (abfd, loc);
2744 oinsn = insn;
2745 insn += add / 2;
2746 if ((oinsn & 0xff00) != (insn & 0xff00))
2747 overflow = TRUE;
2748 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2749 }
2750
2751 break;
2752 }
2753
2754 if (overflow)
2755 {
2756 ((*_bfd_error_handler)
2757 ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2758 abfd, (unsigned long) irel->r_vaddr));
2759 bfd_set_error (bfd_error_bad_value);
2760 return FALSE;
2761 }
2762 }
2763 }
2764
2765 return TRUE;
2766 }
2767 \f
2768 /* This is a modification of _bfd_coff_generic_relocate_section, which
2769 will handle SH relaxing. */
2770
2771 static bfd_boolean
2772 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2773 relocs, syms, sections)
2774 bfd *output_bfd ATTRIBUTE_UNUSED;
2775 struct bfd_link_info *info;
2776 bfd *input_bfd;
2777 asection *input_section;
2778 bfd_byte *contents;
2779 struct internal_reloc *relocs;
2780 struct internal_syment *syms;
2781 asection **sections;
2782 {
2783 struct internal_reloc *rel;
2784 struct internal_reloc *relend;
2785
2786 rel = relocs;
2787 relend = rel + input_section->reloc_count;
2788 for (; rel < relend; rel++)
2789 {
2790 long symndx;
2791 struct coff_link_hash_entry *h;
2792 struct internal_syment *sym;
2793 bfd_vma addend;
2794 bfd_vma val;
2795 reloc_howto_type *howto;
2796 bfd_reloc_status_type rstat;
2797
2798 /* Almost all relocs have to do with relaxing. If any work must
2799 be done for them, it has been done in sh_relax_section. */
2800 if (rel->r_type != R_SH_IMM32
2801 #ifdef COFF_WITH_PE
2802 && rel->r_type != R_SH_IMM32CE
2803 && rel->r_type != R_SH_IMAGEBASE
2804 #endif
2805 && rel->r_type != R_SH_PCDISP)
2806 continue;
2807
2808 symndx = rel->r_symndx;
2809
2810 if (symndx == -1)
2811 {
2812 h = NULL;
2813 sym = NULL;
2814 }
2815 else
2816 {
2817 if (symndx < 0
2818 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2819 {
2820 (*_bfd_error_handler)
2821 ("%B: illegal symbol index %ld in relocs",
2822 input_bfd, symndx);
2823 bfd_set_error (bfd_error_bad_value);
2824 return FALSE;
2825 }
2826 h = obj_coff_sym_hashes (input_bfd)[symndx];
2827 sym = syms + symndx;
2828 }
2829
2830 if (sym != NULL && sym->n_scnum != 0)
2831 addend = - sym->n_value;
2832 else
2833 addend = 0;
2834
2835 if (rel->r_type == R_SH_PCDISP)
2836 addend -= 4;
2837
2838 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2839 howto = NULL;
2840 else
2841 howto = &sh_coff_howtos[rel->r_type];
2842
2843 if (howto == NULL)
2844 {
2845 bfd_set_error (bfd_error_bad_value);
2846 return FALSE;
2847 }
2848
2849 #ifdef COFF_WITH_PE
2850 if (rel->r_type == R_SH_IMAGEBASE)
2851 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2852 #endif
2853
2854 val = 0;
2855
2856 if (h == NULL)
2857 {
2858 asection *sec;
2859
2860 /* There is nothing to do for an internal PCDISP reloc. */
2861 if (rel->r_type == R_SH_PCDISP)
2862 continue;
2863
2864 if (symndx == -1)
2865 {
2866 sec = bfd_abs_section_ptr;
2867 val = 0;
2868 }
2869 else
2870 {
2871 sec = sections[symndx];
2872 val = (sec->output_section->vma
2873 + sec->output_offset
2874 + sym->n_value
2875 - sec->vma);
2876 }
2877 }
2878 else
2879 {
2880 if (h->root.type == bfd_link_hash_defined
2881 || h->root.type == bfd_link_hash_defweak)
2882 {
2883 asection *sec;
2884
2885 sec = h->root.u.def.section;
2886 val = (h->root.u.def.value
2887 + sec->output_section->vma
2888 + sec->output_offset);
2889 }
2890 else if (! info->relocatable)
2891 {
2892 if (! ((*info->callbacks->undefined_symbol)
2893 (info, h->root.root.string, input_bfd, input_section,
2894 rel->r_vaddr - input_section->vma, TRUE)))
2895 return FALSE;
2896 }
2897 }
2898
2899 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2900 contents,
2901 rel->r_vaddr - input_section->vma,
2902 val, addend);
2903
2904 switch (rstat)
2905 {
2906 default:
2907 abort ();
2908 case bfd_reloc_ok:
2909 break;
2910 case bfd_reloc_overflow:
2911 {
2912 const char *name;
2913 char buf[SYMNMLEN + 1];
2914
2915 if (symndx == -1)
2916 name = "*ABS*";
2917 else if (h != NULL)
2918 name = NULL;
2919 else if (sym->_n._n_n._n_zeroes == 0
2920 && sym->_n._n_n._n_offset != 0)
2921 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2922 else
2923 {
2924 strncpy (buf, sym->_n._n_name, SYMNMLEN);
2925 buf[SYMNMLEN] = '\0';
2926 name = buf;
2927 }
2928
2929 if (! ((*info->callbacks->reloc_overflow)
2930 (info, (h ? &h->root : NULL), name, howto->name,
2931 (bfd_vma) 0, input_bfd, input_section,
2932 rel->r_vaddr - input_section->vma)))
2933 return FALSE;
2934 }
2935 }
2936 }
2937
2938 return TRUE;
2939 }
2940
2941 /* This is a version of bfd_generic_get_relocated_section_contents
2942 which uses sh_relocate_section. */
2943
2944 static bfd_byte *
2945 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2946 data, relocatable, symbols)
2947 bfd *output_bfd;
2948 struct bfd_link_info *link_info;
2949 struct bfd_link_order *link_order;
2950 bfd_byte *data;
2951 bfd_boolean relocatable;
2952 asymbol **symbols;
2953 {
2954 asection *input_section = link_order->u.indirect.section;
2955 bfd *input_bfd = input_section->owner;
2956 asection **sections = NULL;
2957 struct internal_reloc *internal_relocs = NULL;
2958 struct internal_syment *internal_syms = NULL;
2959
2960 /* We only need to handle the case of relaxing, or of having a
2961 particular set of section contents, specially. */
2962 if (relocatable
2963 || coff_section_data (input_bfd, input_section) == NULL
2964 || coff_section_data (input_bfd, input_section)->contents == NULL)
2965 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2966 link_order, data,
2967 relocatable,
2968 symbols);
2969
2970 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2971 (size_t) input_section->size);
2972
2973 if ((input_section->flags & SEC_RELOC) != 0
2974 && input_section->reloc_count > 0)
2975 {
2976 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2977 bfd_byte *esym, *esymend;
2978 struct internal_syment *isymp;
2979 asection **secpp;
2980 bfd_size_type amt;
2981
2982 if (! _bfd_coff_get_external_symbols (input_bfd))
2983 goto error_return;
2984
2985 internal_relocs = (_bfd_coff_read_internal_relocs
2986 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2987 FALSE, (struct internal_reloc *) NULL));
2988 if (internal_relocs == NULL)
2989 goto error_return;
2990
2991 amt = obj_raw_syment_count (input_bfd);
2992 amt *= sizeof (struct internal_syment);
2993 internal_syms = (struct internal_syment *) bfd_malloc (amt);
2994 if (internal_syms == NULL)
2995 goto error_return;
2996
2997 amt = obj_raw_syment_count (input_bfd);
2998 amt *= sizeof (asection *);
2999 sections = (asection **) bfd_malloc (amt);
3000 if (sections == NULL)
3001 goto error_return;
3002
3003 isymp = internal_syms;
3004 secpp = sections;
3005 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3006 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3007 while (esym < esymend)
3008 {
3009 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3010
3011 if (isymp->n_scnum != 0)
3012 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3013 else
3014 {
3015 if (isymp->n_value == 0)
3016 *secpp = bfd_und_section_ptr;
3017 else
3018 *secpp = bfd_com_section_ptr;
3019 }
3020
3021 esym += (isymp->n_numaux + 1) * symesz;
3022 secpp += isymp->n_numaux + 1;
3023 isymp += isymp->n_numaux + 1;
3024 }
3025
3026 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3027 input_section, data, internal_relocs,
3028 internal_syms, sections))
3029 goto error_return;
3030
3031 free (sections);
3032 sections = NULL;
3033 free (internal_syms);
3034 internal_syms = NULL;
3035 free (internal_relocs);
3036 internal_relocs = NULL;
3037 }
3038
3039 return data;
3040
3041 error_return:
3042 if (internal_relocs != NULL)
3043 free (internal_relocs);
3044 if (internal_syms != NULL)
3045 free (internal_syms);
3046 if (sections != NULL)
3047 free (sections);
3048 return NULL;
3049 }
3050
3051 /* The target vectors. */
3052
3053 #ifndef TARGET_SHL_SYM
3054 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3055 #endif
3056
3057 #ifdef TARGET_SHL_SYM
3058 #define TARGET_SYM TARGET_SHL_SYM
3059 #else
3060 #define TARGET_SYM shlcoff_vec
3061 #endif
3062
3063 #ifndef TARGET_SHL_NAME
3064 #define TARGET_SHL_NAME "coff-shl"
3065 #endif
3066
3067 #ifdef COFF_WITH_PE
3068 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3069 SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3070 #else
3071 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3072 0, '_', NULL, COFF_SWAP_TABLE)
3073 #endif
3074
3075 #ifndef TARGET_SHL_SYM
3076 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3077 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3078 /* Some people want versions of the SH COFF target which do not align
3079 to 16 byte boundaries. We implement that by adding a couple of new
3080 target vectors. These are just like the ones above, but they
3081 change the default section alignment. To generate them in the
3082 assembler, use -small. To use them in the linker, use -b
3083 coff-sh{l}-small and -oformat coff-sh{l}-small.
3084
3085 Yes, this is a horrible hack. A general solution for setting
3086 section alignment in COFF is rather complex. ELF handles this
3087 correctly. */
3088
3089 /* Only recognize the small versions if the target was not defaulted.
3090 Otherwise we won't recognize the non default endianness. */
3091
3092 static const bfd_target *
3093 coff_small_object_p (abfd)
3094 bfd *abfd;
3095 {
3096 if (abfd->target_defaulted)
3097 {
3098 bfd_set_error (bfd_error_wrong_format);
3099 return NULL;
3100 }
3101 return coff_object_p (abfd);
3102 }
3103
3104 /* Set the section alignment for the small versions. */
3105
3106 static bfd_boolean
3107 coff_small_new_section_hook (abfd, section)
3108 bfd *abfd;
3109 asection *section;
3110 {
3111 if (! coff_new_section_hook (abfd, section))
3112 return FALSE;
3113
3114 /* We must align to at least a four byte boundary, because longword
3115 accesses must be on a four byte boundary. */
3116 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3117 section->alignment_power = 2;
3118
3119 return TRUE;
3120 }
3121
3122 /* This is copied from bfd_coff_std_swap_table so that we can change
3123 the default section alignment power. */
3124
3125 static bfd_coff_backend_data bfd_coff_small_swap_table =
3126 {
3127 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3128 coff_swap_aux_out, coff_swap_sym_out,
3129 coff_swap_lineno_out, coff_swap_reloc_out,
3130 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3131 coff_swap_scnhdr_out,
3132 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3133 #ifdef COFF_LONG_FILENAMES
3134 TRUE,
3135 #else
3136 FALSE,
3137 #endif
3138 COFF_DEFAULT_LONG_SECTION_NAMES,
3139 2,
3140 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3141 TRUE,
3142 #else
3143 FALSE,
3144 #endif
3145 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3146 4,
3147 #else
3148 2,
3149 #endif
3150 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3151 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3152 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3153 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3154 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3155 coff_classify_symbol, coff_compute_section_file_positions,
3156 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3157 coff_adjust_symndx, coff_link_add_one_symbol,
3158 coff_link_output_has_begun, coff_final_link_postscript,
3159 bfd_pe_print_pdata
3160 };
3161
3162 #define coff_small_close_and_cleanup \
3163 coff_close_and_cleanup
3164 #define coff_small_bfd_free_cached_info \
3165 coff_bfd_free_cached_info
3166 #define coff_small_get_section_contents \
3167 coff_get_section_contents
3168 #define coff_small_get_section_contents_in_window \
3169 coff_get_section_contents_in_window
3170
3171 extern const bfd_target shlcoff_small_vec;
3172
3173 const bfd_target shcoff_small_vec =
3174 {
3175 "coff-sh-small", /* name */
3176 bfd_target_coff_flavour,
3177 BFD_ENDIAN_BIG, /* data byte order is big */
3178 BFD_ENDIAN_BIG, /* header byte order is big */
3179
3180 (HAS_RELOC | EXEC_P | /* object flags */
3181 HAS_LINENO | HAS_DEBUG |
3182 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3183
3184 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3185 '_', /* leading symbol underscore */
3186 '/', /* ar_pad_char */
3187 15, /* ar_max_namelen */
3188 0, /* match priority. */
3189 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3190 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3191 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3192 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3193 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3194 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3195
3196 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3197 bfd_generic_archive_p, _bfd_dummy_target},
3198 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3199 bfd_false},
3200 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3201 _bfd_write_archive_contents, bfd_false},
3202
3203 BFD_JUMP_TABLE_GENERIC (coff_small),
3204 BFD_JUMP_TABLE_COPY (coff),
3205 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3206 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3207 BFD_JUMP_TABLE_SYMBOLS (coff),
3208 BFD_JUMP_TABLE_RELOCS (coff),
3209 BFD_JUMP_TABLE_WRITE (coff),
3210 BFD_JUMP_TABLE_LINK (coff),
3211 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3212
3213 & shlcoff_small_vec,
3214
3215 (PTR) &bfd_coff_small_swap_table
3216 };
3217
3218 const bfd_target shlcoff_small_vec =
3219 {
3220 "coff-shl-small", /* name */
3221 bfd_target_coff_flavour,
3222 BFD_ENDIAN_LITTLE, /* data byte order is little */
3223 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3224
3225 (HAS_RELOC | EXEC_P | /* object flags */
3226 HAS_LINENO | HAS_DEBUG |
3227 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3228
3229 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3230 '_', /* leading symbol underscore */
3231 '/', /* ar_pad_char */
3232 15, /* ar_max_namelen */
3233 0, /* match priority. */
3234 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3235 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3236 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3237 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3238 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3239 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3240
3241 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3242 bfd_generic_archive_p, _bfd_dummy_target},
3243 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3244 bfd_false},
3245 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3246 _bfd_write_archive_contents, bfd_false},
3247
3248 BFD_JUMP_TABLE_GENERIC (coff_small),
3249 BFD_JUMP_TABLE_COPY (coff),
3250 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3251 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3252 BFD_JUMP_TABLE_SYMBOLS (coff),
3253 BFD_JUMP_TABLE_RELOCS (coff),
3254 BFD_JUMP_TABLE_WRITE (coff),
3255 BFD_JUMP_TABLE_LINK (coff),
3256 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3257
3258 & shcoff_small_vec,
3259
3260 (PTR) &bfd_coff_small_swap_table
3261 };
3262 #endif
This page took 0.149892 seconds and 4 git commands to generate.