s/boolean/bfd_boolean/ s/true/TRUE/ s/false/FALSE/. Simplify
[deliverable/binutils-gdb.git] / bfd / coff-sh.c
1 /* BFD back-end for Hitachi Super-H COFF binaries.
2 Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
3 Free Software Foundation, Inc.
4 Contributed by Cygnus Support.
5 Written by Steve Chamberlain, <sac@cygnus.com>.
6 Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7
8 This file is part of BFD, the Binary File Descriptor library.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24 #include "bfd.h"
25 #include "sysdep.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31
32 #ifdef COFF_WITH_PE
33 #include "coff/pe.h"
34
35 #ifndef COFF_IMAGE_WITH_PE
36 static bfd_boolean sh_align_load_span
37 PARAMS ((bfd *, asection *, bfd_byte *,
38 bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
39 PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
40
41 #define _bfd_sh_align_load_span sh_align_load_span
42 #endif
43 #endif
44
45 #include "libcoff.h"
46
47 /* Internal functions. */
48 static bfd_reloc_status_type sh_reloc
49 PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
50 static long get_symbol_value PARAMS ((asymbol *));
51 static bfd_boolean sh_relax_section
52 PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
53 static bfd_boolean sh_relax_delete_bytes
54 PARAMS ((bfd *, asection *, bfd_vma, int));
55 #ifndef COFF_IMAGE_WITH_PE
56 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
57 #endif
58 static bfd_boolean sh_align_loads
59 PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
60 bfd_boolean *));
61 static bfd_boolean sh_swap_insns
62 PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
63 static bfd_boolean sh_relocate_section
64 PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
65 struct internal_reloc *, struct internal_syment *, asection **));
66 static bfd_byte *sh_coff_get_relocated_section_contents
67 PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
68 bfd_byte *, bfd_boolean, asymbol **));
69 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
70
71 #ifdef COFF_WITH_PE
72 /* Can't build import tables with 2**4 alignment. */
73 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
74 #else
75 /* Default section alignment to 2**4. */
76 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
77 #endif
78
79 #ifdef COFF_IMAGE_WITH_PE
80 /* Align PE executables. */
81 #define COFF_PAGE_SIZE 0x1000
82 #endif
83
84 /* Generate long file names. */
85 #define COFF_LONG_FILENAMES
86
87 #ifdef COFF_WITH_PE
88 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
89 /* Return TRUE if this relocation should
90 appear in the output .reloc section. */
91 static bfd_boolean in_reloc_p (abfd, howto)
92 bfd * abfd ATTRIBUTE_UNUSED;
93 reloc_howto_type * howto;
94 {
95 return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
96 }
97 #endif
98
99 /* The supported relocations. There are a lot of relocations defined
100 in coff/internal.h which we do not expect to ever see. */
101 static reloc_howto_type sh_coff_howtos[] =
102 {
103 EMPTY_HOWTO (0),
104 EMPTY_HOWTO (1),
105 #ifdef COFF_WITH_PE
106 /* Windows CE */
107 HOWTO (R_SH_IMM32CE, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield, /* complain_on_overflow */
114 sh_reloc, /* special_function */
115 "r_imm32ce", /* name */
116 TRUE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120 #else
121 EMPTY_HOWTO (2),
122 #endif
123 EMPTY_HOWTO (3), /* R_SH_PCREL8 */
124 EMPTY_HOWTO (4), /* R_SH_PCREL16 */
125 EMPTY_HOWTO (5), /* R_SH_HIGH8 */
126 EMPTY_HOWTO (6), /* R_SH_IMM24 */
127 EMPTY_HOWTO (7), /* R_SH_LOW16 */
128 EMPTY_HOWTO (8),
129 EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
130
131 HOWTO (R_SH_PCDISP8BY2, /* type */
132 1, /* rightshift */
133 1, /* size (0 = byte, 1 = short, 2 = long) */
134 8, /* bitsize */
135 TRUE, /* pc_relative */
136 0, /* bitpos */
137 complain_overflow_signed, /* complain_on_overflow */
138 sh_reloc, /* special_function */
139 "r_pcdisp8by2", /* name */
140 TRUE, /* partial_inplace */
141 0xff, /* src_mask */
142 0xff, /* dst_mask */
143 TRUE), /* pcrel_offset */
144
145 EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
146
147 HOWTO (R_SH_PCDISP, /* type */
148 1, /* rightshift */
149 1, /* size (0 = byte, 1 = short, 2 = long) */
150 12, /* bitsize */
151 TRUE, /* pc_relative */
152 0, /* bitpos */
153 complain_overflow_signed, /* complain_on_overflow */
154 sh_reloc, /* special_function */
155 "r_pcdisp12by2", /* name */
156 TRUE, /* partial_inplace */
157 0xfff, /* src_mask */
158 0xfff, /* dst_mask */
159 TRUE), /* pcrel_offset */
160
161 EMPTY_HOWTO (13),
162
163 HOWTO (R_SH_IMM32, /* type */
164 0, /* rightshift */
165 2, /* size (0 = byte, 1 = short, 2 = long) */
166 32, /* bitsize */
167 FALSE, /* pc_relative */
168 0, /* bitpos */
169 complain_overflow_bitfield, /* complain_on_overflow */
170 sh_reloc, /* special_function */
171 "r_imm32", /* name */
172 TRUE, /* partial_inplace */
173 0xffffffff, /* src_mask */
174 0xffffffff, /* dst_mask */
175 FALSE), /* pcrel_offset */
176
177 EMPTY_HOWTO (15),
178 #ifdef COFF_WITH_PE
179 HOWTO (R_SH_IMAGEBASE, /* type */
180 0, /* rightshift */
181 2, /* size (0 = byte, 1 = short, 2 = long) */
182 32, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield, /* complain_on_overflow */
186 sh_reloc, /* special_function */
187 "rva32", /* name */
188 TRUE, /* partial_inplace */
189 0xffffffff, /* src_mask */
190 0xffffffff, /* dst_mask */
191 FALSE), /* pcrel_offset */
192 #else
193 EMPTY_HOWTO (16), /* R_SH_IMM8 */
194 #endif
195 EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
196 EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
197 EMPTY_HOWTO (19), /* R_SH_IMM4 */
198 EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
199 EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
200
201 HOWTO (R_SH_PCRELIMM8BY2, /* type */
202 1, /* rightshift */
203 1, /* size (0 = byte, 1 = short, 2 = long) */
204 8, /* bitsize */
205 TRUE, /* pc_relative */
206 0, /* bitpos */
207 complain_overflow_unsigned, /* complain_on_overflow */
208 sh_reloc, /* special_function */
209 "r_pcrelimm8by2", /* name */
210 TRUE, /* partial_inplace */
211 0xff, /* src_mask */
212 0xff, /* dst_mask */
213 TRUE), /* pcrel_offset */
214
215 HOWTO (R_SH_PCRELIMM8BY4, /* type */
216 2, /* rightshift */
217 1, /* size (0 = byte, 1 = short, 2 = long) */
218 8, /* bitsize */
219 TRUE, /* pc_relative */
220 0, /* bitpos */
221 complain_overflow_unsigned, /* complain_on_overflow */
222 sh_reloc, /* special_function */
223 "r_pcrelimm8by4", /* name */
224 TRUE, /* partial_inplace */
225 0xff, /* src_mask */
226 0xff, /* dst_mask */
227 TRUE), /* pcrel_offset */
228
229 HOWTO (R_SH_IMM16, /* type */
230 0, /* rightshift */
231 1, /* size (0 = byte, 1 = short, 2 = long) */
232 16, /* bitsize */
233 FALSE, /* pc_relative */
234 0, /* bitpos */
235 complain_overflow_bitfield, /* complain_on_overflow */
236 sh_reloc, /* special_function */
237 "r_imm16", /* name */
238 TRUE, /* partial_inplace */
239 0xffff, /* src_mask */
240 0xffff, /* dst_mask */
241 FALSE), /* pcrel_offset */
242
243 HOWTO (R_SH_SWITCH16, /* type */
244 0, /* rightshift */
245 1, /* size (0 = byte, 1 = short, 2 = long) */
246 16, /* bitsize */
247 FALSE, /* pc_relative */
248 0, /* bitpos */
249 complain_overflow_bitfield, /* complain_on_overflow */
250 sh_reloc, /* special_function */
251 "r_switch16", /* name */
252 TRUE, /* partial_inplace */
253 0xffff, /* src_mask */
254 0xffff, /* dst_mask */
255 FALSE), /* pcrel_offset */
256
257 HOWTO (R_SH_SWITCH32, /* type */
258 0, /* rightshift */
259 2, /* size (0 = byte, 1 = short, 2 = long) */
260 32, /* bitsize */
261 FALSE, /* pc_relative */
262 0, /* bitpos */
263 complain_overflow_bitfield, /* complain_on_overflow */
264 sh_reloc, /* special_function */
265 "r_switch32", /* name */
266 TRUE, /* partial_inplace */
267 0xffffffff, /* src_mask */
268 0xffffffff, /* dst_mask */
269 FALSE), /* pcrel_offset */
270
271 HOWTO (R_SH_USES, /* type */
272 0, /* rightshift */
273 1, /* size (0 = byte, 1 = short, 2 = long) */
274 16, /* bitsize */
275 FALSE, /* pc_relative */
276 0, /* bitpos */
277 complain_overflow_bitfield, /* complain_on_overflow */
278 sh_reloc, /* special_function */
279 "r_uses", /* name */
280 TRUE, /* partial_inplace */
281 0xffff, /* src_mask */
282 0xffff, /* dst_mask */
283 FALSE), /* pcrel_offset */
284
285 HOWTO (R_SH_COUNT, /* type */
286 0, /* rightshift */
287 2, /* size (0 = byte, 1 = short, 2 = long) */
288 32, /* bitsize */
289 FALSE, /* pc_relative */
290 0, /* bitpos */
291 complain_overflow_bitfield, /* complain_on_overflow */
292 sh_reloc, /* special_function */
293 "r_count", /* name */
294 TRUE, /* partial_inplace */
295 0xffffffff, /* src_mask */
296 0xffffffff, /* dst_mask */
297 FALSE), /* pcrel_offset */
298
299 HOWTO (R_SH_ALIGN, /* type */
300 0, /* rightshift */
301 2, /* size (0 = byte, 1 = short, 2 = long) */
302 32, /* bitsize */
303 FALSE, /* pc_relative */
304 0, /* bitpos */
305 complain_overflow_bitfield, /* complain_on_overflow */
306 sh_reloc, /* special_function */
307 "r_align", /* name */
308 TRUE, /* partial_inplace */
309 0xffffffff, /* src_mask */
310 0xffffffff, /* dst_mask */
311 FALSE), /* pcrel_offset */
312
313 HOWTO (R_SH_CODE, /* type */
314 0, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 32, /* bitsize */
317 FALSE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_bitfield, /* complain_on_overflow */
320 sh_reloc, /* special_function */
321 "r_code", /* name */
322 TRUE, /* partial_inplace */
323 0xffffffff, /* src_mask */
324 0xffffffff, /* dst_mask */
325 FALSE), /* pcrel_offset */
326
327 HOWTO (R_SH_DATA, /* type */
328 0, /* rightshift */
329 2, /* size (0 = byte, 1 = short, 2 = long) */
330 32, /* bitsize */
331 FALSE, /* pc_relative */
332 0, /* bitpos */
333 complain_overflow_bitfield, /* complain_on_overflow */
334 sh_reloc, /* special_function */
335 "r_data", /* name */
336 TRUE, /* partial_inplace */
337 0xffffffff, /* src_mask */
338 0xffffffff, /* dst_mask */
339 FALSE), /* pcrel_offset */
340
341 HOWTO (R_SH_LABEL, /* type */
342 0, /* rightshift */
343 2, /* size (0 = byte, 1 = short, 2 = long) */
344 32, /* bitsize */
345 FALSE, /* pc_relative */
346 0, /* bitpos */
347 complain_overflow_bitfield, /* complain_on_overflow */
348 sh_reloc, /* special_function */
349 "r_label", /* name */
350 TRUE, /* partial_inplace */
351 0xffffffff, /* src_mask */
352 0xffffffff, /* dst_mask */
353 FALSE), /* pcrel_offset */
354
355 HOWTO (R_SH_SWITCH8, /* type */
356 0, /* rightshift */
357 0, /* size (0 = byte, 1 = short, 2 = long) */
358 8, /* bitsize */
359 FALSE, /* pc_relative */
360 0, /* bitpos */
361 complain_overflow_bitfield, /* complain_on_overflow */
362 sh_reloc, /* special_function */
363 "r_switch8", /* name */
364 TRUE, /* partial_inplace */
365 0xff, /* src_mask */
366 0xff, /* dst_mask */
367 FALSE) /* pcrel_offset */
368 };
369
370 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
371
372 /* Check for a bad magic number. */
373 #define BADMAG(x) SHBADMAG(x)
374
375 /* Customize coffcode.h (this is not currently used). */
376 #define SH 1
377
378 /* FIXME: This should not be set here. */
379 #define __A_MAGIC_SET__
380
381 #ifndef COFF_WITH_PE
382 /* Swap the r_offset field in and out. */
383 #define SWAP_IN_RELOC_OFFSET H_GET_32
384 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
385
386 /* Swap out extra information in the reloc structure. */
387 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
388 do \
389 { \
390 dst->r_stuff[0] = 'S'; \
391 dst->r_stuff[1] = 'C'; \
392 } \
393 while (0)
394 #endif
395
396 /* Get the value of a symbol, when performing a relocation. */
397
398 static long
399 get_symbol_value (symbol)
400 asymbol *symbol;
401 {
402 bfd_vma relocation;
403
404 if (bfd_is_com_section (symbol->section))
405 relocation = 0;
406 else
407 relocation = (symbol->value +
408 symbol->section->output_section->vma +
409 symbol->section->output_offset);
410
411 return relocation;
412 }
413
414 #ifdef COFF_WITH_PE
415 /* Convert an rtype to howto for the COFF backend linker.
416 Copied from coff-i386. */
417 #define coff_rtype_to_howto coff_sh_rtype_to_howto
418 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
419
420 static reloc_howto_type *
421 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
422 bfd * abfd ATTRIBUTE_UNUSED;
423 asection * sec;
424 struct internal_reloc * rel;
425 struct coff_link_hash_entry * h;
426 struct internal_syment * sym;
427 bfd_vma * addendp;
428 {
429 reloc_howto_type * howto;
430
431 howto = sh_coff_howtos + rel->r_type;
432
433 *addendp = 0;
434
435 if (howto->pc_relative)
436 *addendp += sec->vma;
437
438 if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
439 {
440 /* This is a common symbol. The section contents include the
441 size (sym->n_value) as an addend. The relocate_section
442 function will be adding in the final value of the symbol. We
443 need to subtract out the current size in order to get the
444 correct result. */
445 BFD_ASSERT (h != NULL);
446 }
447
448 if (howto->pc_relative)
449 {
450 *addendp -= 4;
451
452 /* If the symbol is defined, then the generic code is going to
453 add back the symbol value in order to cancel out an
454 adjustment it made to the addend. However, we set the addend
455 to 0 at the start of this function. We need to adjust here,
456 to avoid the adjustment the generic code will make. FIXME:
457 This is getting a bit hackish. */
458 if (sym != NULL && sym->n_scnum != 0)
459 *addendp -= sym->n_value;
460 }
461
462 if (rel->r_type == R_SH_IMAGEBASE)
463 *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
464
465 return howto;
466 }
467
468 #endif /* COFF_WITH_PE */
469
470 /* This structure is used to map BFD reloc codes to SH PE relocs. */
471 struct shcoff_reloc_map
472 {
473 bfd_reloc_code_real_type bfd_reloc_val;
474 unsigned char shcoff_reloc_val;
475 };
476
477 #ifdef COFF_WITH_PE
478 /* An array mapping BFD reloc codes to SH PE relocs. */
479 static const struct shcoff_reloc_map sh_reloc_map[] =
480 {
481 { BFD_RELOC_32, R_SH_IMM32CE },
482 { BFD_RELOC_RVA, R_SH_IMAGEBASE },
483 { BFD_RELOC_CTOR, R_SH_IMM32CE },
484 };
485 #else
486 /* An array mapping BFD reloc codes to SH PE relocs. */
487 static const struct shcoff_reloc_map sh_reloc_map[] =
488 {
489 { BFD_RELOC_32, R_SH_IMM32 },
490 { BFD_RELOC_CTOR, R_SH_IMM32 },
491 };
492 #endif
493
494 /* Given a BFD reloc code, return the howto structure for the
495 corresponding SH PE reloc. */
496 #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
497
498 static reloc_howto_type *
499 sh_coff_reloc_type_lookup (abfd, code)
500 bfd * abfd ATTRIBUTE_UNUSED;
501 bfd_reloc_code_real_type code;
502 {
503 unsigned int i;
504
505 for (i = ARRAY_SIZE (sh_reloc_map); i--;)
506 if (sh_reloc_map[i].bfd_reloc_val == code)
507 return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
508
509 fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
510 return NULL;
511 }
512
513 /* This macro is used in coffcode.h to get the howto corresponding to
514 an internal reloc. */
515
516 #define RTYPE2HOWTO(relent, internal) \
517 ((relent)->howto = \
518 ((internal)->r_type < SH_COFF_HOWTO_COUNT \
519 ? &sh_coff_howtos[(internal)->r_type] \
520 : (reloc_howto_type *) NULL))
521
522 /* This is the same as the macro in coffcode.h, except that it copies
523 r_offset into reloc_entry->addend for some relocs. */
524 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
525 { \
526 coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
527 if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
528 coffsym = (obj_symbols (abfd) \
529 + (cache_ptr->sym_ptr_ptr - symbols)); \
530 else if (ptr) \
531 coffsym = coff_symbol_from (abfd, ptr); \
532 if (coffsym != (coff_symbol_type *) NULL \
533 && coffsym->native->u.syment.n_scnum == 0) \
534 cache_ptr->addend = 0; \
535 else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
536 && ptr->section != (asection *) NULL) \
537 cache_ptr->addend = - (ptr->section->vma + ptr->value); \
538 else \
539 cache_ptr->addend = 0; \
540 if ((reloc).r_type == R_SH_SWITCH8 \
541 || (reloc).r_type == R_SH_SWITCH16 \
542 || (reloc).r_type == R_SH_SWITCH32 \
543 || (reloc).r_type == R_SH_USES \
544 || (reloc).r_type == R_SH_COUNT \
545 || (reloc).r_type == R_SH_ALIGN) \
546 cache_ptr->addend = (reloc).r_offset; \
547 }
548
549 /* This is the howto function for the SH relocations. */
550
551 static bfd_reloc_status_type
552 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
553 error_message)
554 bfd *abfd;
555 arelent *reloc_entry;
556 asymbol *symbol_in;
557 PTR data;
558 asection *input_section;
559 bfd *output_bfd;
560 char **error_message ATTRIBUTE_UNUSED;
561 {
562 unsigned long insn;
563 bfd_vma sym_value;
564 unsigned short r_type;
565 bfd_vma addr = reloc_entry->address;
566 bfd_byte *hit_data = addr + (bfd_byte *) data;
567
568 r_type = reloc_entry->howto->type;
569
570 if (output_bfd != NULL)
571 {
572 /* Partial linking--do nothing. */
573 reloc_entry->address += input_section->output_offset;
574 return bfd_reloc_ok;
575 }
576
577 /* Almost all relocs have to do with relaxing. If any work must be
578 done for them, it has been done in sh_relax_section. */
579 if (r_type != R_SH_IMM32
580 #ifdef COFF_WITH_PE
581 && r_type != R_SH_IMM32CE
582 && r_type != R_SH_IMAGEBASE
583 #endif
584 && (r_type != R_SH_PCDISP
585 || (symbol_in->flags & BSF_LOCAL) != 0))
586 return bfd_reloc_ok;
587
588 if (symbol_in != NULL
589 && bfd_is_und_section (symbol_in->section))
590 return bfd_reloc_undefined;
591
592 sym_value = get_symbol_value (symbol_in);
593
594 switch (r_type)
595 {
596 case R_SH_IMM32:
597 #ifdef COFF_WITH_PE
598 case R_SH_IMM32CE:
599 #endif
600 insn = bfd_get_32 (abfd, hit_data);
601 insn += sym_value + reloc_entry->addend;
602 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
603 break;
604 #ifdef COFF_WITH_PE
605 case R_SH_IMAGEBASE:
606 insn = bfd_get_32 (abfd, hit_data);
607 insn += sym_value + reloc_entry->addend;
608 insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
609 bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
610 break;
611 #endif
612 case R_SH_PCDISP:
613 insn = bfd_get_16 (abfd, hit_data);
614 sym_value += reloc_entry->addend;
615 sym_value -= (input_section->output_section->vma
616 + input_section->output_offset
617 + addr
618 + 4);
619 sym_value += (insn & 0xfff) << 1;
620 if (insn & 0x800)
621 sym_value -= 0x1000;
622 insn = (insn & 0xf000) | (sym_value & 0xfff);
623 bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
624 if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
625 return bfd_reloc_overflow;
626 break;
627 default:
628 abort ();
629 break;
630 }
631
632 return bfd_reloc_ok;
633 }
634
635 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
636
637 /* We can do relaxing. */
638 #define coff_bfd_relax_section sh_relax_section
639
640 /* We use the special COFF backend linker. */
641 #define coff_relocate_section sh_relocate_section
642
643 /* When relaxing, we need to use special code to get the relocated
644 section contents. */
645 #define coff_bfd_get_relocated_section_contents \
646 sh_coff_get_relocated_section_contents
647
648 #include "coffcode.h"
649 \f
650 /* This function handles relaxing on the SH.
651
652 Function calls on the SH look like this:
653
654 movl L1,r0
655 ...
656 jsr @r0
657 ...
658 L1:
659 .long function
660
661 The compiler and assembler will cooperate to create R_SH_USES
662 relocs on the jsr instructions. The r_offset field of the
663 R_SH_USES reloc is the PC relative offset to the instruction which
664 loads the register (the r_offset field is computed as though it
665 were a jump instruction, so the offset value is actually from four
666 bytes past the instruction). The linker can use this reloc to
667 determine just which function is being called, and thus decide
668 whether it is possible to replace the jsr with a bsr.
669
670 If multiple function calls are all based on a single register load
671 (i.e., the same function is called multiple times), the compiler
672 guarantees that each function call will have an R_SH_USES reloc.
673 Therefore, if the linker is able to convert each R_SH_USES reloc
674 which refers to that address, it can safely eliminate the register
675 load.
676
677 When the assembler creates an R_SH_USES reloc, it examines it to
678 determine which address is being loaded (L1 in the above example).
679 It then counts the number of references to that address, and
680 creates an R_SH_COUNT reloc at that address. The r_offset field of
681 the R_SH_COUNT reloc will be the number of references. If the
682 linker is able to eliminate a register load, it can use the
683 R_SH_COUNT reloc to see whether it can also eliminate the function
684 address.
685
686 SH relaxing also handles another, unrelated, matter. On the SH, if
687 a load or store instruction is not aligned on a four byte boundary,
688 the memory cycle interferes with the 32 bit instruction fetch,
689 causing a one cycle bubble in the pipeline. Therefore, we try to
690 align load and store instructions on four byte boundaries if we
691 can, by swapping them with one of the adjacent instructions. */
692
693 static bfd_boolean
694 sh_relax_section (abfd, sec, link_info, again)
695 bfd *abfd;
696 asection *sec;
697 struct bfd_link_info *link_info;
698 bfd_boolean *again;
699 {
700 struct internal_reloc *internal_relocs;
701 struct internal_reloc *free_relocs = NULL;
702 bfd_boolean have_code;
703 struct internal_reloc *irel, *irelend;
704 bfd_byte *contents = NULL;
705 bfd_byte *free_contents = NULL;
706
707 *again = FALSE;
708
709 if (link_info->relocateable
710 || (sec->flags & SEC_RELOC) == 0
711 || sec->reloc_count == 0)
712 return TRUE;
713
714 /* If this is the first time we have been called for this section,
715 initialize the cooked size. */
716 if (sec->_cooked_size == 0)
717 sec->_cooked_size = sec->_raw_size;
718
719 internal_relocs = (_bfd_coff_read_internal_relocs
720 (abfd, sec, link_info->keep_memory,
721 (bfd_byte *) NULL, FALSE,
722 (struct internal_reloc *) NULL));
723 if (internal_relocs == NULL)
724 goto error_return;
725 if (! link_info->keep_memory)
726 free_relocs = internal_relocs;
727
728 have_code = FALSE;
729
730 irelend = internal_relocs + sec->reloc_count;
731 for (irel = internal_relocs; irel < irelend; irel++)
732 {
733 bfd_vma laddr, paddr, symval;
734 unsigned short insn;
735 struct internal_reloc *irelfn, *irelscan, *irelcount;
736 struct internal_syment sym;
737 bfd_signed_vma foff;
738
739 if (irel->r_type == R_SH_CODE)
740 have_code = TRUE;
741
742 if (irel->r_type != R_SH_USES)
743 continue;
744
745 /* Get the section contents. */
746 if (contents == NULL)
747 {
748 if (coff_section_data (abfd, sec) != NULL
749 && coff_section_data (abfd, sec)->contents != NULL)
750 contents = coff_section_data (abfd, sec)->contents;
751 else
752 {
753 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
754 if (contents == NULL)
755 goto error_return;
756 free_contents = contents;
757
758 if (! bfd_get_section_contents (abfd, sec, contents,
759 (file_ptr) 0, sec->_raw_size))
760 goto error_return;
761 }
762 }
763
764 /* The r_offset field of the R_SH_USES reloc will point us to
765 the register load. The 4 is because the r_offset field is
766 computed as though it were a jump offset, which are based
767 from 4 bytes after the jump instruction. */
768 laddr = irel->r_vaddr - sec->vma + 4;
769 /* Careful to sign extend the 32-bit offset. */
770 laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
771 if (laddr >= sec->_raw_size)
772 {
773 (*_bfd_error_handler) ("%s: 0x%lx: warning: bad R_SH_USES offset",
774 bfd_archive_filename (abfd),
775 (unsigned long) irel->r_vaddr);
776 continue;
777 }
778 insn = bfd_get_16 (abfd, contents + laddr);
779
780 /* If the instruction is not mov.l NN,rN, we don't know what to do. */
781 if ((insn & 0xf000) != 0xd000)
782 {
783 ((*_bfd_error_handler)
784 ("%s: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
785 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr, insn));
786 continue;
787 }
788
789 /* Get the address from which the register is being loaded. The
790 displacement in the mov.l instruction is quadrupled. It is a
791 displacement from four bytes after the movl instruction, but,
792 before adding in the PC address, two least significant bits
793 of the PC are cleared. We assume that the section is aligned
794 on a four byte boundary. */
795 paddr = insn & 0xff;
796 paddr *= 4;
797 paddr += (laddr + 4) &~ (bfd_vma) 3;
798 if (paddr >= sec->_raw_size)
799 {
800 ((*_bfd_error_handler)
801 ("%s: 0x%lx: warning: bad R_SH_USES load offset",
802 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
803 continue;
804 }
805
806 /* Get the reloc for the address from which the register is
807 being loaded. This reloc will tell us which function is
808 actually being called. */
809 paddr += sec->vma;
810 for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
811 if (irelfn->r_vaddr == paddr
812 #ifdef COFF_WITH_PE
813 && (irelfn->r_type == R_SH_IMM32
814 || irelfn->r_type == R_SH_IMM32CE
815 || irelfn->r_type == R_SH_IMAGEBASE))
816
817 #else
818 && irelfn->r_type == R_SH_IMM32)
819 #endif
820 break;
821 if (irelfn >= irelend)
822 {
823 ((*_bfd_error_handler)
824 ("%s: 0x%lx: warning: could not find expected reloc",
825 bfd_archive_filename (abfd), (unsigned long) paddr));
826 continue;
827 }
828
829 /* Get the value of the symbol referred to by the reloc. */
830 if (! _bfd_coff_get_external_symbols (abfd))
831 goto error_return;
832 bfd_coff_swap_sym_in (abfd,
833 ((bfd_byte *) obj_coff_external_syms (abfd)
834 + (irelfn->r_symndx
835 * bfd_coff_symesz (abfd))),
836 &sym);
837 if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
838 {
839 ((*_bfd_error_handler)
840 ("%s: 0x%lx: warning: symbol in unexpected section",
841 bfd_archive_filename (abfd), (unsigned long) paddr));
842 continue;
843 }
844
845 if (sym.n_sclass != C_EXT)
846 {
847 symval = (sym.n_value
848 - sec->vma
849 + sec->output_section->vma
850 + sec->output_offset);
851 }
852 else
853 {
854 struct coff_link_hash_entry *h;
855
856 h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
857 BFD_ASSERT (h != NULL);
858 if (h->root.type != bfd_link_hash_defined
859 && h->root.type != bfd_link_hash_defweak)
860 {
861 /* This appears to be a reference to an undefined
862 symbol. Just ignore it--it will be caught by the
863 regular reloc processing. */
864 continue;
865 }
866
867 symval = (h->root.u.def.value
868 + h->root.u.def.section->output_section->vma
869 + h->root.u.def.section->output_offset);
870 }
871
872 symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
873
874 /* See if this function call can be shortened. */
875 foff = (symval
876 - (irel->r_vaddr
877 - sec->vma
878 + sec->output_section->vma
879 + sec->output_offset
880 + 4));
881 if (foff < -0x1000 || foff >= 0x1000)
882 {
883 /* After all that work, we can't shorten this function call. */
884 continue;
885 }
886
887 /* Shorten the function call. */
888
889 /* For simplicity of coding, we are going to modify the section
890 contents, the section relocs, and the BFD symbol table. We
891 must tell the rest of the code not to free up this
892 information. It would be possible to instead create a table
893 of changes which have to be made, as is done in coff-mips.c;
894 that would be more work, but would require less memory when
895 the linker is run. */
896
897 if (coff_section_data (abfd, sec) == NULL)
898 {
899 bfd_size_type amt = sizeof (struct coff_section_tdata);
900 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
901 if (sec->used_by_bfd == NULL)
902 goto error_return;
903 }
904
905 coff_section_data (abfd, sec)->relocs = internal_relocs;
906 coff_section_data (abfd, sec)->keep_relocs = TRUE;
907 free_relocs = NULL;
908
909 coff_section_data (abfd, sec)->contents = contents;
910 coff_section_data (abfd, sec)->keep_contents = TRUE;
911 free_contents = NULL;
912
913 obj_coff_keep_syms (abfd) = TRUE;
914
915 /* Replace the jsr with a bsr. */
916
917 /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
918 replace the jsr with a bsr. */
919 irel->r_type = R_SH_PCDISP;
920 irel->r_symndx = irelfn->r_symndx;
921 if (sym.n_sclass != C_EXT)
922 {
923 /* If this needs to be changed because of future relaxing,
924 it will be handled here like other internal PCDISP
925 relocs. */
926 bfd_put_16 (abfd,
927 (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
928 contents + irel->r_vaddr - sec->vma);
929 }
930 else
931 {
932 /* We can't fully resolve this yet, because the external
933 symbol value may be changed by future relaxing. We let
934 the final link phase handle it. */
935 bfd_put_16 (abfd, (bfd_vma) 0xb000,
936 contents + irel->r_vaddr - sec->vma);
937 }
938
939 /* See if there is another R_SH_USES reloc referring to the same
940 register load. */
941 for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
942 if (irelscan->r_type == R_SH_USES
943 && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
944 break;
945 if (irelscan < irelend)
946 {
947 /* Some other function call depends upon this register load,
948 and we have not yet converted that function call.
949 Indeed, we may never be able to convert it. There is
950 nothing else we can do at this point. */
951 continue;
952 }
953
954 /* Look for a R_SH_COUNT reloc on the location where the
955 function address is stored. Do this before deleting any
956 bytes, to avoid confusion about the address. */
957 for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
958 if (irelcount->r_vaddr == paddr
959 && irelcount->r_type == R_SH_COUNT)
960 break;
961
962 /* Delete the register load. */
963 if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
964 goto error_return;
965
966 /* That will change things, so, just in case it permits some
967 other function call to come within range, we should relax
968 again. Note that this is not required, and it may be slow. */
969 *again = TRUE;
970
971 /* Now check whether we got a COUNT reloc. */
972 if (irelcount >= irelend)
973 {
974 ((*_bfd_error_handler)
975 ("%s: 0x%lx: warning: could not find expected COUNT reloc",
976 bfd_archive_filename (abfd), (unsigned long) paddr));
977 continue;
978 }
979
980 /* The number of uses is stored in the r_offset field. We've
981 just deleted one. */
982 if (irelcount->r_offset == 0)
983 {
984 ((*_bfd_error_handler) ("%s: 0x%lx: warning: bad count",
985 bfd_archive_filename (abfd),
986 (unsigned long) paddr));
987 continue;
988 }
989
990 --irelcount->r_offset;
991
992 /* If there are no more uses, we can delete the address. Reload
993 the address from irelfn, in case it was changed by the
994 previous call to sh_relax_delete_bytes. */
995 if (irelcount->r_offset == 0)
996 {
997 if (! sh_relax_delete_bytes (abfd, sec,
998 irelfn->r_vaddr - sec->vma, 4))
999 goto error_return;
1000 }
1001
1002 /* We've done all we can with that function call. */
1003 }
1004
1005 /* Look for load and store instructions that we can align on four
1006 byte boundaries. */
1007 if (have_code)
1008 {
1009 bfd_boolean swapped;
1010
1011 /* Get the section contents. */
1012 if (contents == NULL)
1013 {
1014 if (coff_section_data (abfd, sec) != NULL
1015 && coff_section_data (abfd, sec)->contents != NULL)
1016 contents = coff_section_data (abfd, sec)->contents;
1017 else
1018 {
1019 contents = (bfd_byte *) bfd_malloc (sec->_raw_size);
1020 if (contents == NULL)
1021 goto error_return;
1022 free_contents = contents;
1023
1024 if (! bfd_get_section_contents (abfd, sec, contents,
1025 (file_ptr) 0, sec->_raw_size))
1026 goto error_return;
1027 }
1028 }
1029
1030 if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1031 goto error_return;
1032
1033 if (swapped)
1034 {
1035 if (coff_section_data (abfd, sec) == NULL)
1036 {
1037 bfd_size_type amt = sizeof (struct coff_section_tdata);
1038 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
1039 if (sec->used_by_bfd == NULL)
1040 goto error_return;
1041 }
1042
1043 coff_section_data (abfd, sec)->relocs = internal_relocs;
1044 coff_section_data (abfd, sec)->keep_relocs = TRUE;
1045 free_relocs = NULL;
1046
1047 coff_section_data (abfd, sec)->contents = contents;
1048 coff_section_data (abfd, sec)->keep_contents = TRUE;
1049 free_contents = NULL;
1050
1051 obj_coff_keep_syms (abfd) = TRUE;
1052 }
1053 }
1054
1055 if (free_relocs != NULL)
1056 {
1057 free (free_relocs);
1058 free_relocs = NULL;
1059 }
1060
1061 if (free_contents != NULL)
1062 {
1063 if (! link_info->keep_memory)
1064 free (free_contents);
1065 else
1066 {
1067 /* Cache the section contents for coff_link_input_bfd. */
1068 if (coff_section_data (abfd, sec) == NULL)
1069 {
1070 bfd_size_type amt = sizeof (struct coff_section_tdata);
1071 sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
1072 if (sec->used_by_bfd == NULL)
1073 goto error_return;
1074 coff_section_data (abfd, sec)->relocs = NULL;
1075 }
1076 coff_section_data (abfd, sec)->contents = contents;
1077 }
1078 }
1079
1080 return TRUE;
1081
1082 error_return:
1083 if (free_relocs != NULL)
1084 free (free_relocs);
1085 if (free_contents != NULL)
1086 free (free_contents);
1087 return FALSE;
1088 }
1089
1090 /* Delete some bytes from a section while relaxing. */
1091
1092 static bfd_boolean
1093 sh_relax_delete_bytes (abfd, sec, addr, count)
1094 bfd *abfd;
1095 asection *sec;
1096 bfd_vma addr;
1097 int count;
1098 {
1099 bfd_byte *contents;
1100 struct internal_reloc *irel, *irelend;
1101 struct internal_reloc *irelalign;
1102 bfd_vma toaddr;
1103 bfd_byte *esym, *esymend;
1104 bfd_size_type symesz;
1105 struct coff_link_hash_entry **sym_hash;
1106 asection *o;
1107
1108 contents = coff_section_data (abfd, sec)->contents;
1109
1110 /* The deletion must stop at the next ALIGN reloc for an aligment
1111 power larger than the number of bytes we are deleting. */
1112
1113 irelalign = NULL;
1114 toaddr = sec->_cooked_size;
1115
1116 irel = coff_section_data (abfd, sec)->relocs;
1117 irelend = irel + sec->reloc_count;
1118 for (; irel < irelend; irel++)
1119 {
1120 if (irel->r_type == R_SH_ALIGN
1121 && irel->r_vaddr - sec->vma > addr
1122 && count < (1 << irel->r_offset))
1123 {
1124 irelalign = irel;
1125 toaddr = irel->r_vaddr - sec->vma;
1126 break;
1127 }
1128 }
1129
1130 /* Actually delete the bytes. */
1131 memmove (contents + addr, contents + addr + count,
1132 (size_t) (toaddr - addr - count));
1133 if (irelalign == NULL)
1134 sec->_cooked_size -= count;
1135 else
1136 {
1137 int i;
1138
1139 #define NOP_OPCODE (0x0009)
1140
1141 BFD_ASSERT ((count & 1) == 0);
1142 for (i = 0; i < count; i += 2)
1143 bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1144 }
1145
1146 /* Adjust all the relocs. */
1147 for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1148 {
1149 bfd_vma nraddr, stop;
1150 bfd_vma start = 0;
1151 int insn = 0;
1152 struct internal_syment sym;
1153 int off, adjust, oinsn;
1154 bfd_signed_vma voff = 0;
1155 bfd_boolean overflow;
1156
1157 /* Get the new reloc address. */
1158 nraddr = irel->r_vaddr - sec->vma;
1159 if ((irel->r_vaddr - sec->vma > addr
1160 && irel->r_vaddr - sec->vma < toaddr)
1161 || (irel->r_type == R_SH_ALIGN
1162 && irel->r_vaddr - sec->vma == toaddr))
1163 nraddr -= count;
1164
1165 /* See if this reloc was for the bytes we have deleted, in which
1166 case we no longer care about it. Don't delete relocs which
1167 represent addresses, though. */
1168 if (irel->r_vaddr - sec->vma >= addr
1169 && irel->r_vaddr - sec->vma < addr + count
1170 && irel->r_type != R_SH_ALIGN
1171 && irel->r_type != R_SH_CODE
1172 && irel->r_type != R_SH_DATA
1173 && irel->r_type != R_SH_LABEL)
1174 irel->r_type = R_SH_UNUSED;
1175
1176 /* If this is a PC relative reloc, see if the range it covers
1177 includes the bytes we have deleted. */
1178 switch (irel->r_type)
1179 {
1180 default:
1181 break;
1182
1183 case R_SH_PCDISP8BY2:
1184 case R_SH_PCDISP:
1185 case R_SH_PCRELIMM8BY2:
1186 case R_SH_PCRELIMM8BY4:
1187 start = irel->r_vaddr - sec->vma;
1188 insn = bfd_get_16 (abfd, contents + nraddr);
1189 break;
1190 }
1191
1192 switch (irel->r_type)
1193 {
1194 default:
1195 start = stop = addr;
1196 break;
1197
1198 case R_SH_IMM32:
1199 #ifdef COFF_WITH_PE
1200 case R_SH_IMM32CE:
1201 case R_SH_IMAGEBASE:
1202 #endif
1203 /* If this reloc is against a symbol defined in this
1204 section, and the symbol will not be adjusted below, we
1205 must check the addend to see it will put the value in
1206 range to be adjusted, and hence must be changed. */
1207 bfd_coff_swap_sym_in (abfd,
1208 ((bfd_byte *) obj_coff_external_syms (abfd)
1209 + (irel->r_symndx
1210 * bfd_coff_symesz (abfd))),
1211 &sym);
1212 if (sym.n_sclass != C_EXT
1213 && sym.n_scnum == sec->target_index
1214 && ((bfd_vma) sym.n_value <= addr
1215 || (bfd_vma) sym.n_value >= toaddr))
1216 {
1217 bfd_vma val;
1218
1219 val = bfd_get_32 (abfd, contents + nraddr);
1220 val += sym.n_value;
1221 if (val > addr && val < toaddr)
1222 bfd_put_32 (abfd, val - count, contents + nraddr);
1223 }
1224 start = stop = addr;
1225 break;
1226
1227 case R_SH_PCDISP8BY2:
1228 off = insn & 0xff;
1229 if (off & 0x80)
1230 off -= 0x100;
1231 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1232 break;
1233
1234 case R_SH_PCDISP:
1235 bfd_coff_swap_sym_in (abfd,
1236 ((bfd_byte *) obj_coff_external_syms (abfd)
1237 + (irel->r_symndx
1238 * bfd_coff_symesz (abfd))),
1239 &sym);
1240 if (sym.n_sclass == C_EXT)
1241 start = stop = addr;
1242 else
1243 {
1244 off = insn & 0xfff;
1245 if (off & 0x800)
1246 off -= 0x1000;
1247 stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1248 }
1249 break;
1250
1251 case R_SH_PCRELIMM8BY2:
1252 off = insn & 0xff;
1253 stop = start + 4 + off * 2;
1254 break;
1255
1256 case R_SH_PCRELIMM8BY4:
1257 off = insn & 0xff;
1258 stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1259 break;
1260
1261 case R_SH_SWITCH8:
1262 case R_SH_SWITCH16:
1263 case R_SH_SWITCH32:
1264 /* These relocs types represent
1265 .word L2-L1
1266 The r_offset field holds the difference between the reloc
1267 address and L1. That is the start of the reloc, and
1268 adding in the contents gives us the top. We must adjust
1269 both the r_offset field and the section contents. */
1270
1271 start = irel->r_vaddr - sec->vma;
1272 stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1273
1274 if (start > addr
1275 && start < toaddr
1276 && (stop <= addr || stop >= toaddr))
1277 irel->r_offset += count;
1278 else if (stop > addr
1279 && stop < toaddr
1280 && (start <= addr || start >= toaddr))
1281 irel->r_offset -= count;
1282
1283 start = stop;
1284
1285 if (irel->r_type == R_SH_SWITCH16)
1286 voff = bfd_get_signed_16 (abfd, contents + nraddr);
1287 else if (irel->r_type == R_SH_SWITCH8)
1288 voff = bfd_get_8 (abfd, contents + nraddr);
1289 else
1290 voff = bfd_get_signed_32 (abfd, contents + nraddr);
1291 stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1292
1293 break;
1294
1295 case R_SH_USES:
1296 start = irel->r_vaddr - sec->vma;
1297 stop = (bfd_vma) ((bfd_signed_vma) start
1298 + (long) irel->r_offset
1299 + 4);
1300 break;
1301 }
1302
1303 if (start > addr
1304 && start < toaddr
1305 && (stop <= addr || stop >= toaddr))
1306 adjust = count;
1307 else if (stop > addr
1308 && stop < toaddr
1309 && (start <= addr || start >= toaddr))
1310 adjust = - count;
1311 else
1312 adjust = 0;
1313
1314 if (adjust != 0)
1315 {
1316 oinsn = insn;
1317 overflow = FALSE;
1318 switch (irel->r_type)
1319 {
1320 default:
1321 abort ();
1322 break;
1323
1324 case R_SH_PCDISP8BY2:
1325 case R_SH_PCRELIMM8BY2:
1326 insn += adjust / 2;
1327 if ((oinsn & 0xff00) != (insn & 0xff00))
1328 overflow = TRUE;
1329 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1330 break;
1331
1332 case R_SH_PCDISP:
1333 insn += adjust / 2;
1334 if ((oinsn & 0xf000) != (insn & 0xf000))
1335 overflow = TRUE;
1336 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1337 break;
1338
1339 case R_SH_PCRELIMM8BY4:
1340 BFD_ASSERT (adjust == count || count >= 4);
1341 if (count >= 4)
1342 insn += adjust / 4;
1343 else
1344 {
1345 if ((irel->r_vaddr & 3) == 0)
1346 ++insn;
1347 }
1348 if ((oinsn & 0xff00) != (insn & 0xff00))
1349 overflow = TRUE;
1350 bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1351 break;
1352
1353 case R_SH_SWITCH8:
1354 voff += adjust;
1355 if (voff < 0 || voff >= 0xff)
1356 overflow = TRUE;
1357 bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1358 break;
1359
1360 case R_SH_SWITCH16:
1361 voff += adjust;
1362 if (voff < - 0x8000 || voff >= 0x8000)
1363 overflow = TRUE;
1364 bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1365 break;
1366
1367 case R_SH_SWITCH32:
1368 voff += adjust;
1369 bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1370 break;
1371
1372 case R_SH_USES:
1373 irel->r_offset += adjust;
1374 break;
1375 }
1376
1377 if (overflow)
1378 {
1379 ((*_bfd_error_handler)
1380 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
1381 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
1382 bfd_set_error (bfd_error_bad_value);
1383 return FALSE;
1384 }
1385 }
1386
1387 irel->r_vaddr = nraddr + sec->vma;
1388 }
1389
1390 /* Look through all the other sections. If there contain any IMM32
1391 relocs against internal symbols which we are not going to adjust
1392 below, we may need to adjust the addends. */
1393 for (o = abfd->sections; o != NULL; o = o->next)
1394 {
1395 struct internal_reloc *internal_relocs;
1396 struct internal_reloc *irelscan, *irelscanend;
1397 bfd_byte *ocontents;
1398
1399 if (o == sec
1400 || (o->flags & SEC_RELOC) == 0
1401 || o->reloc_count == 0)
1402 continue;
1403
1404 /* We always cache the relocs. Perhaps, if info->keep_memory is
1405 FALSE, we should free them, if we are permitted to, when we
1406 leave sh_coff_relax_section. */
1407 internal_relocs = (_bfd_coff_read_internal_relocs
1408 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1409 (struct internal_reloc *) NULL));
1410 if (internal_relocs == NULL)
1411 return FALSE;
1412
1413 ocontents = NULL;
1414 irelscanend = internal_relocs + o->reloc_count;
1415 for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1416 {
1417 struct internal_syment sym;
1418
1419 #ifdef COFF_WITH_PE
1420 if (irelscan->r_type != R_SH_IMM32
1421 && irelscan->r_type != R_SH_IMAGEBASE
1422 && irelscan->r_type != R_SH_IMM32CE)
1423 #else
1424 if (irelscan->r_type != R_SH_IMM32)
1425 #endif
1426 continue;
1427
1428 bfd_coff_swap_sym_in (abfd,
1429 ((bfd_byte *) obj_coff_external_syms (abfd)
1430 + (irelscan->r_symndx
1431 * bfd_coff_symesz (abfd))),
1432 &sym);
1433 if (sym.n_sclass != C_EXT
1434 && sym.n_scnum == sec->target_index
1435 && ((bfd_vma) sym.n_value <= addr
1436 || (bfd_vma) sym.n_value >= toaddr))
1437 {
1438 bfd_vma val;
1439
1440 if (ocontents == NULL)
1441 {
1442 if (coff_section_data (abfd, o)->contents != NULL)
1443 ocontents = coff_section_data (abfd, o)->contents;
1444 else
1445 {
1446 /* We always cache the section contents.
1447 Perhaps, if info->keep_memory is FALSE, we
1448 should free them, if we are permitted to,
1449 when we leave sh_coff_relax_section. */
1450 ocontents = (bfd_byte *) bfd_malloc (o->_raw_size);
1451 if (ocontents == NULL)
1452 return FALSE;
1453 if (! bfd_get_section_contents (abfd, o, ocontents,
1454 (file_ptr) 0,
1455 o->_raw_size))
1456 return FALSE;
1457 coff_section_data (abfd, o)->contents = ocontents;
1458 }
1459 }
1460
1461 val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1462 val += sym.n_value;
1463 if (val > addr && val < toaddr)
1464 bfd_put_32 (abfd, val - count,
1465 ocontents + irelscan->r_vaddr - o->vma);
1466
1467 coff_section_data (abfd, o)->keep_contents = TRUE;
1468 }
1469 }
1470 }
1471
1472 /* Adjusting the internal symbols will not work if something has
1473 already retrieved the generic symbols. It would be possible to
1474 make this work by adjusting the generic symbols at the same time.
1475 However, this case should not arise in normal usage. */
1476 if (obj_symbols (abfd) != NULL
1477 || obj_raw_syments (abfd) != NULL)
1478 {
1479 ((*_bfd_error_handler)
1480 ("%s: fatal: generic symbols retrieved before relaxing",
1481 bfd_archive_filename (abfd)));
1482 bfd_set_error (bfd_error_invalid_operation);
1483 return FALSE;
1484 }
1485
1486 /* Adjust all the symbols. */
1487 sym_hash = obj_coff_sym_hashes (abfd);
1488 symesz = bfd_coff_symesz (abfd);
1489 esym = (bfd_byte *) obj_coff_external_syms (abfd);
1490 esymend = esym + obj_raw_syment_count (abfd) * symesz;
1491 while (esym < esymend)
1492 {
1493 struct internal_syment isym;
1494
1495 bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1496
1497 if (isym.n_scnum == sec->target_index
1498 && (bfd_vma) isym.n_value > addr
1499 && (bfd_vma) isym.n_value < toaddr)
1500 {
1501 isym.n_value -= count;
1502
1503 bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1504
1505 if (*sym_hash != NULL)
1506 {
1507 BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1508 || (*sym_hash)->root.type == bfd_link_hash_defweak);
1509 BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1510 && (*sym_hash)->root.u.def.value < toaddr);
1511 (*sym_hash)->root.u.def.value -= count;
1512 }
1513 }
1514
1515 esym += (isym.n_numaux + 1) * symesz;
1516 sym_hash += isym.n_numaux + 1;
1517 }
1518
1519 /* See if we can move the ALIGN reloc forward. We have adjusted
1520 r_vaddr for it already. */
1521 if (irelalign != NULL)
1522 {
1523 bfd_vma alignto, alignaddr;
1524
1525 alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1526 alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1527 1 << irelalign->r_offset);
1528 if (alignto != alignaddr)
1529 {
1530 /* Tail recursion. */
1531 return sh_relax_delete_bytes (abfd, sec, alignaddr,
1532 (int) (alignto - alignaddr));
1533 }
1534 }
1535
1536 return TRUE;
1537 }
1538 \f
1539 /* This is yet another version of the SH opcode table, used to rapidly
1540 get information about a particular instruction. */
1541
1542 /* The opcode map is represented by an array of these structures. The
1543 array is indexed by the high order four bits in the instruction. */
1544
1545 struct sh_major_opcode
1546 {
1547 /* A pointer to the instruction list. This is an array which
1548 contains all the instructions with this major opcode. */
1549 const struct sh_minor_opcode *minor_opcodes;
1550 /* The number of elements in minor_opcodes. */
1551 unsigned short count;
1552 };
1553
1554 /* This structure holds information for a set of SH opcodes. The
1555 instruction code is anded with the mask value, and the resulting
1556 value is used to search the order opcode list. */
1557
1558 struct sh_minor_opcode
1559 {
1560 /* The sorted opcode list. */
1561 const struct sh_opcode *opcodes;
1562 /* The number of elements in opcodes. */
1563 unsigned short count;
1564 /* The mask value to use when searching the opcode list. */
1565 unsigned short mask;
1566 };
1567
1568 /* This structure holds information for an SH instruction. An array
1569 of these structures is sorted in order by opcode. */
1570
1571 struct sh_opcode
1572 {
1573 /* The code for this instruction, after it has been anded with the
1574 mask value in the sh_major_opcode structure. */
1575 unsigned short opcode;
1576 /* Flags for this instruction. */
1577 unsigned long flags;
1578 };
1579
1580 /* Flag which appear in the sh_opcode structure. */
1581
1582 /* This instruction loads a value from memory. */
1583 #define LOAD (0x1)
1584
1585 /* This instruction stores a value to memory. */
1586 #define STORE (0x2)
1587
1588 /* This instruction is a branch. */
1589 #define BRANCH (0x4)
1590
1591 /* This instruction has a delay slot. */
1592 #define DELAY (0x8)
1593
1594 /* This instruction uses the value in the register in the field at
1595 mask 0x0f00 of the instruction. */
1596 #define USES1 (0x10)
1597 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1598
1599 /* This instruction uses the value in the register in the field at
1600 mask 0x00f0 of the instruction. */
1601 #define USES2 (0x20)
1602 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1603
1604 /* This instruction uses the value in register 0. */
1605 #define USESR0 (0x40)
1606
1607 /* This instruction sets the value in the register in the field at
1608 mask 0x0f00 of the instruction. */
1609 #define SETS1 (0x80)
1610 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1611
1612 /* This instruction sets the value in the register in the field at
1613 mask 0x00f0 of the instruction. */
1614 #define SETS2 (0x100)
1615 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1616
1617 /* This instruction sets register 0. */
1618 #define SETSR0 (0x200)
1619
1620 /* This instruction sets a special register. */
1621 #define SETSSP (0x400)
1622
1623 /* This instruction uses a special register. */
1624 #define USESSP (0x800)
1625
1626 /* This instruction uses the floating point register in the field at
1627 mask 0x0f00 of the instruction. */
1628 #define USESF1 (0x1000)
1629 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1630
1631 /* This instruction uses the floating point register in the field at
1632 mask 0x00f0 of the instruction. */
1633 #define USESF2 (0x2000)
1634 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1635
1636 /* This instruction uses floating point register 0. */
1637 #define USESF0 (0x4000)
1638
1639 /* This instruction sets the floating point register in the field at
1640 mask 0x0f00 of the instruction. */
1641 #define SETSF1 (0x8000)
1642 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1643
1644 #define USESAS (0x10000)
1645 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1646 #define USESR8 (0x20000)
1647 #define SETSAS (0x40000)
1648 #define SETSAS_REG(x) USESAS_REG (x)
1649
1650 #ifndef COFF_IMAGE_WITH_PE
1651 static bfd_boolean sh_insn_uses_reg
1652 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1653 static bfd_boolean sh_insn_sets_reg
1654 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1655 static bfd_boolean sh_insn_uses_or_sets_reg
1656 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1657 static bfd_boolean sh_insn_uses_freg
1658 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1659 static bfd_boolean sh_insn_sets_freg
1660 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1661 static bfd_boolean sh_insn_uses_or_sets_freg
1662 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1663 static bfd_boolean sh_insns_conflict
1664 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1665 const struct sh_opcode *));
1666 static bfd_boolean sh_load_use
1667 PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1668 const struct sh_opcode *));
1669 #endif
1670 /* The opcode maps. */
1671
1672 #define MAP(a) a, sizeof a / sizeof a[0]
1673
1674 static const struct sh_opcode sh_opcode00[] =
1675 {
1676 { 0x0008, SETSSP }, /* clrt */
1677 { 0x0009, 0 }, /* nop */
1678 { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
1679 { 0x0018, SETSSP }, /* sett */
1680 { 0x0019, SETSSP }, /* div0u */
1681 { 0x001b, 0 }, /* sleep */
1682 { 0x0028, SETSSP }, /* clrmac */
1683 { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
1684 { 0x0038, USESSP | SETSSP }, /* ldtlb */
1685 { 0x0048, SETSSP }, /* clrs */
1686 { 0x0058, SETSSP } /* sets */
1687 };
1688
1689 static const struct sh_opcode sh_opcode01[] =
1690 {
1691 { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
1692 { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
1693 { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
1694 { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
1695 { 0x0029, SETS1 | USESSP }, /* movt rn */
1696 { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
1697 { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
1698 { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
1699 { 0x0083, LOAD | USES1 }, /* pref @rn */
1700 { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
1701 { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
1702 { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
1703 { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
1704 { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
1705 };
1706
1707 /* These sixteen instructions can be handled with one table entry below. */
1708 #if 0
1709 { 0x0002, SETS1 | USESSP }, /* stc sr,rn */
1710 { 0x0012, SETS1 | USESSP }, /* stc gbr,rn */
1711 { 0x0022, SETS1 | USESSP }, /* stc vbr,rn */
1712 { 0x0032, SETS1 | USESSP }, /* stc ssr,rn */
1713 { 0x0042, SETS1 | USESSP }, /* stc spc,rn */
1714 { 0x0052, SETS1 | USESSP }, /* stc mod,rn */
1715 { 0x0062, SETS1 | USESSP }, /* stc rs,rn */
1716 { 0x0072, SETS1 | USESSP }, /* stc re,rn */
1717 { 0x0082, SETS1 | USESSP }, /* stc r0_bank,rn */
1718 { 0x0092, SETS1 | USESSP }, /* stc r1_bank,rn */
1719 { 0x00a2, SETS1 | USESSP }, /* stc r2_bank,rn */
1720 { 0x00b2, SETS1 | USESSP }, /* stc r3_bank,rn */
1721 { 0x00c2, SETS1 | USESSP }, /* stc r4_bank,rn */
1722 { 0x00d2, SETS1 | USESSP }, /* stc r5_bank,rn */
1723 { 0x00e2, SETS1 | USESSP }, /* stc r6_bank,rn */
1724 { 0x00f2, SETS1 | USESSP } /* stc r7_bank,rn */
1725 #endif
1726
1727 static const struct sh_opcode sh_opcode02[] =
1728 {
1729 { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
1730 { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
1731 { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
1732 { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
1733 { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
1734 { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
1735 { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
1736 { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
1737 { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1738 };
1739
1740 static const struct sh_minor_opcode sh_opcode0[] =
1741 {
1742 { MAP (sh_opcode00), 0xffff },
1743 { MAP (sh_opcode01), 0xf0ff },
1744 { MAP (sh_opcode02), 0xf00f }
1745 };
1746
1747 static const struct sh_opcode sh_opcode10[] =
1748 {
1749 { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
1750 };
1751
1752 static const struct sh_minor_opcode sh_opcode1[] =
1753 {
1754 { MAP (sh_opcode10), 0xf000 }
1755 };
1756
1757 static const struct sh_opcode sh_opcode20[] =
1758 {
1759 { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
1760 { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
1761 { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
1762 { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
1763 { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
1764 { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
1765 { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
1766 { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
1767 { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
1768 { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
1769 { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
1770 { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
1771 { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
1772 { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
1773 { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
1774 };
1775
1776 static const struct sh_minor_opcode sh_opcode2[] =
1777 {
1778 { MAP (sh_opcode20), 0xf00f }
1779 };
1780
1781 static const struct sh_opcode sh_opcode30[] =
1782 {
1783 { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
1784 { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
1785 { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
1786 { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
1787 { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
1788 { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
1789 { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
1790 { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
1791 { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1792 { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
1793 { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
1794 { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
1795 { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1796 { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
1797 };
1798
1799 static const struct sh_minor_opcode sh_opcode3[] =
1800 {
1801 { MAP (sh_opcode30), 0xf00f }
1802 };
1803
1804 static const struct sh_opcode sh_opcode40[] =
1805 {
1806 { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
1807 { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
1808 { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
1809 { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
1810 { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
1811 { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
1812 { 0x4008, SETS1 | USES1 }, /* shll2 rn */
1813 { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
1814 { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
1815 { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
1816 { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
1817 { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
1818 { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
1819 { 0x4014, SETSSP | USES1 }, /* setrc rm */
1820 { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
1821 { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
1822 { 0x4018, SETS1 | USES1 }, /* shll8 rn */
1823 { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
1824 { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
1825 { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
1826 { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
1827 { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
1828 { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
1829 { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
1830 { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
1831 { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
1832 { 0x4028, SETS1 | USES1 }, /* shll16 rn */
1833 { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
1834 { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
1835 { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
1836 { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
1837 { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
1838 { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
1839 { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
1840 { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
1841 { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
1842 { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
1843 { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
1844 { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
1845 { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
1846 { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
1847 { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
1848 { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
1849 { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
1850 { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
1851 { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
1852 { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
1853 { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
1854 { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
1855 { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
1856 { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
1857 #if 0 /* These groups sixteen insns can be
1858 handled with one table entry each below. */
1859 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l sr,@-rn */
1860 { 0x4013, STORE | SETS1 | USES1 | USESSP }, /* stc.l gbr,@-rn */
1861 { 0x4023, STORE | SETS1 | USES1 | USESSP }, /* stc.l vbr,@-rn */
1862 { 0x4033, STORE | SETS1 | USES1 | USESSP }, /* stc.l ssr,@-rn */
1863 { 0x4043, STORE | SETS1 | USES1 | USESSP }, /* stc.l spc,@-rn */
1864 { 0x4053, STORE | SETS1 | USES1 | USESSP }, /* stc.l mod,@-rn */
1865 { 0x4063, STORE | SETS1 | USES1 | USESSP }, /* stc.l rs,@-rn */
1866 { 0x4073, STORE | SETS1 | USES1 | USESSP }, /* stc.l re,@-rn */
1867 { 0x4083, STORE | SETS1 | USES1 | USESSP }, /* stc.l r0_bank,@-rn */
1868 ..
1869 { 0x40f3, STORE | SETS1 | USES1 | USESSP }, /* stc.l r7_bank,@-rn */
1870
1871 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,sr */
1872 { 0x4017, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,gbr */
1873 { 0x4027, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,vbr */
1874 { 0x4037, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,ssr */
1875 { 0x4047, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,spc */
1876 { 0x4057, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,mod */
1877 { 0x4067, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,rs */
1878 { 0x4077, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,re */
1879 { 0x4087, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r0_bank */
1880 ..
1881 { 0x40f7, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,r7_bank */
1882
1883 { 0x400e, SETSSP | USES1 }, /* ldc rm,sr */
1884 { 0x401e, SETSSP | USES1 }, /* ldc rm,gbr */
1885 { 0x402e, SETSSP | USES1 }, /* ldc rm,vbr */
1886 { 0x403e, SETSSP | USES1 }, /* ldc rm,ssr */
1887 { 0x404e, SETSSP | USES1 }, /* ldc rm,spc */
1888 { 0x405e, SETSSP | USES1 }, /* ldc rm,mod */
1889 { 0x406e, SETSSP | USES1 }, /* ldc rm,rs */
1890 { 0x407e, SETSSP | USES1 } /* ldc rm,re */
1891 { 0x408e, SETSSP | USES1 } /* ldc rm,r0_bank */
1892 ..
1893 { 0x40fe, SETSSP | USES1 } /* ldc rm,r7_bank */
1894 #endif
1895 };
1896
1897 static const struct sh_opcode sh_opcode41[] =
1898 {
1899 { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
1900 { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
1901 { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
1902 { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
1903 { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
1904 { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1905 };
1906
1907 static const struct sh_minor_opcode sh_opcode4[] =
1908 {
1909 { MAP (sh_opcode40), 0xf0ff },
1910 { MAP (sh_opcode41), 0xf00f }
1911 };
1912
1913 static const struct sh_opcode sh_opcode50[] =
1914 {
1915 { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
1916 };
1917
1918 static const struct sh_minor_opcode sh_opcode5[] =
1919 {
1920 { MAP (sh_opcode50), 0xf000 }
1921 };
1922
1923 static const struct sh_opcode sh_opcode60[] =
1924 {
1925 { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
1926 { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
1927 { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
1928 { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
1929 { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
1930 { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
1931 { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
1932 { 0x6007, SETS1 | USES2 }, /* not rm,rn */
1933 { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
1934 { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
1935 { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
1936 { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
1937 { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
1938 { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
1939 { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
1940 { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
1941 };
1942
1943 static const struct sh_minor_opcode sh_opcode6[] =
1944 {
1945 { MAP (sh_opcode60), 0xf00f }
1946 };
1947
1948 static const struct sh_opcode sh_opcode70[] =
1949 {
1950 { 0x7000, SETS1 | USES1 } /* add #imm,rn */
1951 };
1952
1953 static const struct sh_minor_opcode sh_opcode7[] =
1954 {
1955 { MAP (sh_opcode70), 0xf000 }
1956 };
1957
1958 static const struct sh_opcode sh_opcode80[] =
1959 {
1960 { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
1961 { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
1962 { 0x8200, SETSSP }, /* setrc #imm */
1963 { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
1964 { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
1965 { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
1966 { 0x8900, BRANCH | USESSP }, /* bt label */
1967 { 0x8b00, BRANCH | USESSP }, /* bf label */
1968 { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
1969 { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
1970 { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
1971 { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
1972 };
1973
1974 static const struct sh_minor_opcode sh_opcode8[] =
1975 {
1976 { MAP (sh_opcode80), 0xff00 }
1977 };
1978
1979 static const struct sh_opcode sh_opcode90[] =
1980 {
1981 { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
1982 };
1983
1984 static const struct sh_minor_opcode sh_opcode9[] =
1985 {
1986 { MAP (sh_opcode90), 0xf000 }
1987 };
1988
1989 static const struct sh_opcode sh_opcodea0[] =
1990 {
1991 { 0xa000, BRANCH | DELAY } /* bra label */
1992 };
1993
1994 static const struct sh_minor_opcode sh_opcodea[] =
1995 {
1996 { MAP (sh_opcodea0), 0xf000 }
1997 };
1998
1999 static const struct sh_opcode sh_opcodeb0[] =
2000 {
2001 { 0xb000, BRANCH | DELAY } /* bsr label */
2002 };
2003
2004 static const struct sh_minor_opcode sh_opcodeb[] =
2005 {
2006 { MAP (sh_opcodeb0), 0xf000 }
2007 };
2008
2009 static const struct sh_opcode sh_opcodec0[] =
2010 {
2011 { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
2012 { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
2013 { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
2014 { 0xc300, BRANCH | USESSP }, /* trapa #imm */
2015 { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
2016 { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
2017 { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
2018 { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
2019 { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
2020 { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
2021 { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
2022 { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
2023 { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
2024 { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
2025 { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
2026 { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
2027 };
2028
2029 static const struct sh_minor_opcode sh_opcodec[] =
2030 {
2031 { MAP (sh_opcodec0), 0xff00 }
2032 };
2033
2034 static const struct sh_opcode sh_opcoded0[] =
2035 {
2036 { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
2037 };
2038
2039 static const struct sh_minor_opcode sh_opcoded[] =
2040 {
2041 { MAP (sh_opcoded0), 0xf000 }
2042 };
2043
2044 static const struct sh_opcode sh_opcodee0[] =
2045 {
2046 { 0xe000, SETS1 } /* mov #imm,rn */
2047 };
2048
2049 static const struct sh_minor_opcode sh_opcodee[] =
2050 {
2051 { MAP (sh_opcodee0), 0xf000 }
2052 };
2053
2054 static const struct sh_opcode sh_opcodef0[] =
2055 {
2056 { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
2057 { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
2058 { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
2059 { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
2060 { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
2061 { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
2062 { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
2063 { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
2064 { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
2065 { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
2066 { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
2067 { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
2068 { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
2069 { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
2070 };
2071
2072 static const struct sh_opcode sh_opcodef1[] =
2073 {
2074 { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
2075 { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
2076 { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
2077 { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
2078 { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
2079 { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
2080 { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
2081 { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
2082 { 0xf08d, SETSF1 }, /* fldi0 fn */
2083 { 0xf09d, SETSF1 } /* fldi1 fn */
2084 };
2085
2086 static const struct sh_minor_opcode sh_opcodef[] =
2087 {
2088 { MAP (sh_opcodef0), 0xf00f },
2089 { MAP (sh_opcodef1), 0xf0ff }
2090 };
2091
2092 #ifndef COFF_IMAGE_WITH_PE
2093 static struct sh_major_opcode sh_opcodes[] =
2094 {
2095 { MAP (sh_opcode0) },
2096 { MAP (sh_opcode1) },
2097 { MAP (sh_opcode2) },
2098 { MAP (sh_opcode3) },
2099 { MAP (sh_opcode4) },
2100 { MAP (sh_opcode5) },
2101 { MAP (sh_opcode6) },
2102 { MAP (sh_opcode7) },
2103 { MAP (sh_opcode8) },
2104 { MAP (sh_opcode9) },
2105 { MAP (sh_opcodea) },
2106 { MAP (sh_opcodeb) },
2107 { MAP (sh_opcodec) },
2108 { MAP (sh_opcoded) },
2109 { MAP (sh_opcodee) },
2110 { MAP (sh_opcodef) }
2111 };
2112 #endif
2113
2114 /* The double data transfer / parallel processing insns are not
2115 described here. This will cause sh_align_load_span to leave them alone. */
2116
2117 static const struct sh_opcode sh_dsp_opcodef0[] =
2118 {
2119 { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
2120 { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
2121 { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
2122 { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
2123 { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
2124 { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
2125 { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
2126 { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
2127 };
2128
2129 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2130 {
2131 { MAP (sh_dsp_opcodef0), 0xfc0d }
2132 };
2133
2134 #ifndef COFF_IMAGE_WITH_PE
2135 /* Given an instruction, return a pointer to the corresponding
2136 sh_opcode structure. Return NULL if the instruction is not
2137 recognized. */
2138
2139 static const struct sh_opcode *
2140 sh_insn_info (insn)
2141 unsigned int insn;
2142 {
2143 const struct sh_major_opcode *maj;
2144 const struct sh_minor_opcode *min, *minend;
2145
2146 maj = &sh_opcodes[(insn & 0xf000) >> 12];
2147 min = maj->minor_opcodes;
2148 minend = min + maj->count;
2149 for (; min < minend; min++)
2150 {
2151 unsigned int l;
2152 const struct sh_opcode *op, *opend;
2153
2154 l = insn & min->mask;
2155 op = min->opcodes;
2156 opend = op + min->count;
2157
2158 /* Since the opcodes tables are sorted, we could use a binary
2159 search here if the count were above some cutoff value. */
2160 for (; op < opend; op++)
2161 if (op->opcode == l)
2162 return op;
2163 }
2164
2165 return NULL;
2166 }
2167
2168 /* See whether an instruction uses or sets a general purpose register */
2169
2170 static bfd_boolean
2171 sh_insn_uses_or_sets_reg (insn, op, reg)
2172 unsigned int insn;
2173 const struct sh_opcode *op;
2174 unsigned int reg;
2175 {
2176 if (sh_insn_uses_reg (insn, op, reg))
2177 return TRUE;
2178
2179 return sh_insn_sets_reg (insn, op, reg);
2180 }
2181
2182 /* See whether an instruction uses a general purpose register. */
2183
2184 static bfd_boolean
2185 sh_insn_uses_reg (insn, op, reg)
2186 unsigned int insn;
2187 const struct sh_opcode *op;
2188 unsigned int reg;
2189 {
2190 unsigned int f;
2191
2192 f = op->flags;
2193
2194 if ((f & USES1) != 0
2195 && USES1_REG (insn) == reg)
2196 return TRUE;
2197 if ((f & USES2) != 0
2198 && USES2_REG (insn) == reg)
2199 return TRUE;
2200 if ((f & USESR0) != 0
2201 && reg == 0)
2202 return TRUE;
2203 if ((f & USESAS) && reg == USESAS_REG (insn))
2204 return TRUE;
2205 if ((f & USESR8) && reg == 8)
2206 return TRUE;
2207
2208 return FALSE;
2209 }
2210
2211 /* See whether an instruction sets a general purpose register. */
2212
2213 static bfd_boolean
2214 sh_insn_sets_reg (insn, op, reg)
2215 unsigned int insn;
2216 const struct sh_opcode *op;
2217 unsigned int reg;
2218 {
2219 unsigned int f;
2220
2221 f = op->flags;
2222
2223 if ((f & SETS1) != 0
2224 && SETS1_REG (insn) == reg)
2225 return TRUE;
2226 if ((f & SETS2) != 0
2227 && SETS2_REG (insn) == reg)
2228 return TRUE;
2229 if ((f & SETSR0) != 0
2230 && reg == 0)
2231 return TRUE;
2232 if ((f & SETSAS) && reg == SETSAS_REG (insn))
2233 return TRUE;
2234
2235 return FALSE;
2236 }
2237
2238 /* See whether an instruction uses or sets a floating point register */
2239
2240 static bfd_boolean
2241 sh_insn_uses_or_sets_freg (insn, op, reg)
2242 unsigned int insn;
2243 const struct sh_opcode *op;
2244 unsigned int reg;
2245 {
2246 if (sh_insn_uses_freg (insn, op, reg))
2247 return TRUE;
2248
2249 return sh_insn_sets_freg (insn, op, reg);
2250 }
2251
2252 /* See whether an instruction uses a floating point register. */
2253
2254 static bfd_boolean
2255 sh_insn_uses_freg (insn, op, freg)
2256 unsigned int insn;
2257 const struct sh_opcode *op;
2258 unsigned int freg;
2259 {
2260 unsigned int f;
2261
2262 f = op->flags;
2263
2264 /* We can't tell if this is a double-precision insn, so just play safe
2265 and assume that it might be. So not only have we test FREG against
2266 itself, but also even FREG against FREG+1 - if the using insn uses
2267 just the low part of a double precision value - but also an odd
2268 FREG against FREG-1 - if the setting insn sets just the low part
2269 of a double precision value.
2270 So what this all boils down to is that we have to ignore the lowest
2271 bit of the register number. */
2272
2273 if ((f & USESF1) != 0
2274 && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2275 return TRUE;
2276 if ((f & USESF2) != 0
2277 && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2278 return TRUE;
2279 if ((f & USESF0) != 0
2280 && freg == 0)
2281 return TRUE;
2282
2283 return FALSE;
2284 }
2285
2286 /* See whether an instruction sets a floating point register. */
2287
2288 static bfd_boolean
2289 sh_insn_sets_freg (insn, op, freg)
2290 unsigned int insn;
2291 const struct sh_opcode *op;
2292 unsigned int freg;
2293 {
2294 unsigned int f;
2295
2296 f = op->flags;
2297
2298 /* We can't tell if this is a double-precision insn, so just play safe
2299 and assume that it might be. So not only have we test FREG against
2300 itself, but also even FREG against FREG+1 - if the using insn uses
2301 just the low part of a double precision value - but also an odd
2302 FREG against FREG-1 - if the setting insn sets just the low part
2303 of a double precision value.
2304 So what this all boils down to is that we have to ignore the lowest
2305 bit of the register number. */
2306
2307 if ((f & SETSF1) != 0
2308 && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2309 return TRUE;
2310
2311 return FALSE;
2312 }
2313
2314 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2315 before I2. OP1 and OP2 are the corresponding sh_opcode structures.
2316 This should return TRUE if there is a conflict, or FALSE if the
2317 instructions can be swapped safely. */
2318
2319 static bfd_boolean
2320 sh_insns_conflict (i1, op1, i2, op2)
2321 unsigned int i1;
2322 const struct sh_opcode *op1;
2323 unsigned int i2;
2324 const struct sh_opcode *op2;
2325 {
2326 unsigned int f1, f2;
2327
2328 f1 = op1->flags;
2329 f2 = op2->flags;
2330
2331 /* Load of fpscr conflicts with floating point operations.
2332 FIXME: shouldn't test raw opcodes here. */
2333 if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2334 || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2335 return TRUE;
2336
2337 if ((f1 & (BRANCH | DELAY)) != 0
2338 || (f2 & (BRANCH | DELAY)) != 0)
2339 return TRUE;
2340
2341 if (((f1 | f2) & SETSSP)
2342 && (f1 & (SETSSP | USESSP))
2343 && (f2 & (SETSSP | USESSP)))
2344 return TRUE;
2345
2346 if ((f1 & SETS1) != 0
2347 && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2348 return TRUE;
2349 if ((f1 & SETS2) != 0
2350 && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2351 return TRUE;
2352 if ((f1 & SETSR0) != 0
2353 && sh_insn_uses_or_sets_reg (i2, op2, 0))
2354 return TRUE;
2355 if ((f1 & SETSAS)
2356 && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2357 return TRUE;
2358 if ((f1 & SETSF1) != 0
2359 && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2360 return TRUE;
2361
2362 if ((f2 & SETS1) != 0
2363 && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2364 return TRUE;
2365 if ((f2 & SETS2) != 0
2366 && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2367 return TRUE;
2368 if ((f2 & SETSR0) != 0
2369 && sh_insn_uses_or_sets_reg (i1, op1, 0))
2370 return TRUE;
2371 if ((f2 & SETSAS)
2372 && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2373 return TRUE;
2374 if ((f2 & SETSF1) != 0
2375 && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2376 return TRUE;
2377
2378 /* The instructions do not conflict. */
2379 return FALSE;
2380 }
2381
2382 /* I1 is a load instruction, and I2 is some other instruction. Return
2383 TRUE if I1 loads a register which I2 uses. */
2384
2385 static bfd_boolean
2386 sh_load_use (i1, op1, i2, op2)
2387 unsigned int i1;
2388 const struct sh_opcode *op1;
2389 unsigned int i2;
2390 const struct sh_opcode *op2;
2391 {
2392 unsigned int f1;
2393
2394 f1 = op1->flags;
2395
2396 if ((f1 & LOAD) == 0)
2397 return FALSE;
2398
2399 /* If both SETS1 and SETSSP are set, that means a load to a special
2400 register using postincrement addressing mode, which we don't care
2401 about here. */
2402 if ((f1 & SETS1) != 0
2403 && (f1 & SETSSP) == 0
2404 && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2405 return TRUE;
2406
2407 if ((f1 & SETSR0) != 0
2408 && sh_insn_uses_reg (i2, op2, 0))
2409 return TRUE;
2410
2411 if ((f1 & SETSF1) != 0
2412 && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2413 return TRUE;
2414
2415 return FALSE;
2416 }
2417
2418 /* Try to align loads and stores within a span of memory. This is
2419 called by both the ELF and the COFF sh targets. ABFD and SEC are
2420 the BFD and section we are examining. CONTENTS is the contents of
2421 the section. SWAP is the routine to call to swap two instructions.
2422 RELOCS is a pointer to the internal relocation information, to be
2423 passed to SWAP. PLABEL is a pointer to the current label in a
2424 sorted list of labels; LABEL_END is the end of the list. START and
2425 STOP are the range of memory to examine. If a swap is made,
2426 *PSWAPPED is set to TRUE. */
2427
2428 #ifdef COFF_WITH_PE
2429 static
2430 #endif
2431 bfd_boolean
2432 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2433 plabel, label_end, start, stop, pswapped)
2434 bfd *abfd;
2435 asection *sec;
2436 bfd_byte *contents;
2437 bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2438 PTR relocs;
2439 bfd_vma **plabel;
2440 bfd_vma *label_end;
2441 bfd_vma start;
2442 bfd_vma stop;
2443 bfd_boolean *pswapped;
2444 {
2445 int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2446 || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2447 bfd_vma i;
2448
2449 /* The SH4 has a Harvard architecture, hence aligning loads is not
2450 desirable. In fact, it is counter-productive, since it interferes
2451 with the schedules generated by the compiler. */
2452 if (abfd->arch_info->mach == bfd_mach_sh4)
2453 return TRUE;
2454
2455 /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2456 instructions. */
2457 if (dsp)
2458 {
2459 sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2460 sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2461 }
2462
2463 /* Instructions should be aligned on 2 byte boundaries. */
2464 if ((start & 1) == 1)
2465 ++start;
2466
2467 /* Now look through the unaligned addresses. */
2468 i = start;
2469 if ((i & 2) == 0)
2470 i += 2;
2471 for (; i < stop; i += 4)
2472 {
2473 unsigned int insn;
2474 const struct sh_opcode *op;
2475 unsigned int prev_insn = 0;
2476 const struct sh_opcode *prev_op = NULL;
2477
2478 insn = bfd_get_16 (abfd, contents + i);
2479 op = sh_insn_info (insn);
2480 if (op == NULL
2481 || (op->flags & (LOAD | STORE)) == 0)
2482 continue;
2483
2484 /* This is a load or store which is not on a four byte boundary. */
2485
2486 while (*plabel < label_end && **plabel < i)
2487 ++*plabel;
2488
2489 if (i > start)
2490 {
2491 prev_insn = bfd_get_16 (abfd, contents + i - 2);
2492 /* If INSN is the field b of a parallel processing insn, it is not
2493 a load / store after all. Note that the test here might mistake
2494 the field_b of a pcopy insn for the starting code of a parallel
2495 processing insn; this might miss a swapping opportunity, but at
2496 least we're on the safe side. */
2497 if (dsp && (prev_insn & 0xfc00) == 0xf800)
2498 continue;
2499
2500 /* Check if prev_insn is actually the field b of a parallel
2501 processing insn. Again, this can give a spurious match
2502 after a pcopy. */
2503 if (dsp && i - 2 > start)
2504 {
2505 unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2506
2507 if ((pprev_insn & 0xfc00) == 0xf800)
2508 prev_op = NULL;
2509 else
2510 prev_op = sh_insn_info (prev_insn);
2511 }
2512 else
2513 prev_op = sh_insn_info (prev_insn);
2514
2515 /* If the load/store instruction is in a delay slot, we
2516 can't swap. */
2517 if (prev_op == NULL
2518 || (prev_op->flags & DELAY) != 0)
2519 continue;
2520 }
2521 if (i > start
2522 && (*plabel >= label_end || **plabel != i)
2523 && prev_op != NULL
2524 && (prev_op->flags & (LOAD | STORE)) == 0
2525 && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2526 {
2527 bfd_boolean ok;
2528
2529 /* The load/store instruction does not have a label, and
2530 there is a previous instruction; PREV_INSN is not
2531 itself a load/store instruction, and PREV_INSN and
2532 INSN do not conflict. */
2533
2534 ok = TRUE;
2535
2536 if (i >= start + 4)
2537 {
2538 unsigned int prev2_insn;
2539 const struct sh_opcode *prev2_op;
2540
2541 prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2542 prev2_op = sh_insn_info (prev2_insn);
2543
2544 /* If the instruction before PREV_INSN has a delay
2545 slot--that is, PREV_INSN is in a delay slot--we
2546 can not swap. */
2547 if (prev2_op == NULL
2548 || (prev2_op->flags & DELAY) != 0)
2549 ok = FALSE;
2550
2551 /* If the instruction before PREV_INSN is a load,
2552 and it sets a register which INSN uses, then
2553 putting INSN immediately after PREV_INSN will
2554 cause a pipeline bubble, so there is no point to
2555 making the swap. */
2556 if (ok
2557 && (prev2_op->flags & LOAD) != 0
2558 && sh_load_use (prev2_insn, prev2_op, insn, op))
2559 ok = FALSE;
2560 }
2561
2562 if (ok)
2563 {
2564 if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2565 return FALSE;
2566 *pswapped = TRUE;
2567 continue;
2568 }
2569 }
2570
2571 while (*plabel < label_end && **plabel < i + 2)
2572 ++*plabel;
2573
2574 if (i + 2 < stop
2575 && (*plabel >= label_end || **plabel != i + 2))
2576 {
2577 unsigned int next_insn;
2578 const struct sh_opcode *next_op;
2579
2580 /* There is an instruction after the load/store
2581 instruction, and it does not have a label. */
2582 next_insn = bfd_get_16 (abfd, contents + i + 2);
2583 next_op = sh_insn_info (next_insn);
2584 if (next_op != NULL
2585 && (next_op->flags & (LOAD | STORE)) == 0
2586 && ! sh_insns_conflict (insn, op, next_insn, next_op))
2587 {
2588 bfd_boolean ok;
2589
2590 /* NEXT_INSN is not itself a load/store instruction,
2591 and it does not conflict with INSN. */
2592
2593 ok = TRUE;
2594
2595 /* If PREV_INSN is a load, and it sets a register
2596 which NEXT_INSN uses, then putting NEXT_INSN
2597 immediately after PREV_INSN will cause a pipeline
2598 bubble, so there is no reason to make this swap. */
2599 if (prev_op != NULL
2600 && (prev_op->flags & LOAD) != 0
2601 && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2602 ok = FALSE;
2603
2604 /* If INSN is a load, and it sets a register which
2605 the insn after NEXT_INSN uses, then doing the
2606 swap will cause a pipeline bubble, so there is no
2607 reason to make the swap. However, if the insn
2608 after NEXT_INSN is itself a load or store
2609 instruction, then it is misaligned, so
2610 optimistically hope that it will be swapped
2611 itself, and just live with the pipeline bubble if
2612 it isn't. */
2613 if (ok
2614 && i + 4 < stop
2615 && (op->flags & LOAD) != 0)
2616 {
2617 unsigned int next2_insn;
2618 const struct sh_opcode *next2_op;
2619
2620 next2_insn = bfd_get_16 (abfd, contents + i + 4);
2621 next2_op = sh_insn_info (next2_insn);
2622 if ((next2_op->flags & (LOAD | STORE)) == 0
2623 && sh_load_use (insn, op, next2_insn, next2_op))
2624 ok = FALSE;
2625 }
2626
2627 if (ok)
2628 {
2629 if (! (*swap) (abfd, sec, relocs, contents, i))
2630 return FALSE;
2631 *pswapped = TRUE;
2632 continue;
2633 }
2634 }
2635 }
2636 }
2637
2638 return TRUE;
2639 }
2640 #endif /* not COFF_IMAGE_WITH_PE */
2641
2642 /* Look for loads and stores which we can align to four byte
2643 boundaries. See the longer comment above sh_relax_section for why
2644 this is desirable. This sets *PSWAPPED if some instruction was
2645 swapped. */
2646
2647 static bfd_boolean
2648 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2649 bfd *abfd;
2650 asection *sec;
2651 struct internal_reloc *internal_relocs;
2652 bfd_byte *contents;
2653 bfd_boolean *pswapped;
2654 {
2655 struct internal_reloc *irel, *irelend;
2656 bfd_vma *labels = NULL;
2657 bfd_vma *label, *label_end;
2658 bfd_size_type amt;
2659
2660 *pswapped = FALSE;
2661
2662 irelend = internal_relocs + sec->reloc_count;
2663
2664 /* Get all the addresses with labels on them. */
2665 amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2666 labels = (bfd_vma *) bfd_malloc (amt);
2667 if (labels == NULL)
2668 goto error_return;
2669 label_end = labels;
2670 for (irel = internal_relocs; irel < irelend; irel++)
2671 {
2672 if (irel->r_type == R_SH_LABEL)
2673 {
2674 *label_end = irel->r_vaddr - sec->vma;
2675 ++label_end;
2676 }
2677 }
2678
2679 /* Note that the assembler currently always outputs relocs in
2680 address order. If that ever changes, this code will need to sort
2681 the label values and the relocs. */
2682
2683 label = labels;
2684
2685 for (irel = internal_relocs; irel < irelend; irel++)
2686 {
2687 bfd_vma start, stop;
2688
2689 if (irel->r_type != R_SH_CODE)
2690 continue;
2691
2692 start = irel->r_vaddr - sec->vma;
2693
2694 for (irel++; irel < irelend; irel++)
2695 if (irel->r_type == R_SH_DATA)
2696 break;
2697 if (irel < irelend)
2698 stop = irel->r_vaddr - sec->vma;
2699 else
2700 stop = sec->_cooked_size;
2701
2702 if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2703 (PTR) internal_relocs, &label,
2704 label_end, start, stop, pswapped))
2705 goto error_return;
2706 }
2707
2708 free (labels);
2709
2710 return TRUE;
2711
2712 error_return:
2713 if (labels != NULL)
2714 free (labels);
2715 return FALSE;
2716 }
2717
2718 /* Swap two SH instructions. */
2719
2720 static bfd_boolean
2721 sh_swap_insns (abfd, sec, relocs, contents, addr)
2722 bfd *abfd;
2723 asection *sec;
2724 PTR relocs;
2725 bfd_byte *contents;
2726 bfd_vma addr;
2727 {
2728 struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2729 unsigned short i1, i2;
2730 struct internal_reloc *irel, *irelend;
2731
2732 /* Swap the instructions themselves. */
2733 i1 = bfd_get_16 (abfd, contents + addr);
2734 i2 = bfd_get_16 (abfd, contents + addr + 2);
2735 bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2736 bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2737
2738 /* Adjust all reloc addresses. */
2739 irelend = internal_relocs + sec->reloc_count;
2740 for (irel = internal_relocs; irel < irelend; irel++)
2741 {
2742 int type, add;
2743
2744 /* There are a few special types of relocs that we don't want to
2745 adjust. These relocs do not apply to the instruction itself,
2746 but are only associated with the address. */
2747 type = irel->r_type;
2748 if (type == R_SH_ALIGN
2749 || type == R_SH_CODE
2750 || type == R_SH_DATA
2751 || type == R_SH_LABEL)
2752 continue;
2753
2754 /* If an R_SH_USES reloc points to one of the addresses being
2755 swapped, we must adjust it. It would be incorrect to do this
2756 for a jump, though, since we want to execute both
2757 instructions after the jump. (We have avoided swapping
2758 around a label, so the jump will not wind up executing an
2759 instruction it shouldn't). */
2760 if (type == R_SH_USES)
2761 {
2762 bfd_vma off;
2763
2764 off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2765 if (off == addr)
2766 irel->r_offset += 2;
2767 else if (off == addr + 2)
2768 irel->r_offset -= 2;
2769 }
2770
2771 if (irel->r_vaddr - sec->vma == addr)
2772 {
2773 irel->r_vaddr += 2;
2774 add = -2;
2775 }
2776 else if (irel->r_vaddr - sec->vma == addr + 2)
2777 {
2778 irel->r_vaddr -= 2;
2779 add = 2;
2780 }
2781 else
2782 add = 0;
2783
2784 if (add != 0)
2785 {
2786 bfd_byte *loc;
2787 unsigned short insn, oinsn;
2788 bfd_boolean overflow;
2789
2790 loc = contents + irel->r_vaddr - sec->vma;
2791 overflow = FALSE;
2792 switch (type)
2793 {
2794 default:
2795 break;
2796
2797 case R_SH_PCDISP8BY2:
2798 case R_SH_PCRELIMM8BY2:
2799 insn = bfd_get_16 (abfd, loc);
2800 oinsn = insn;
2801 insn += add / 2;
2802 if ((oinsn & 0xff00) != (insn & 0xff00))
2803 overflow = TRUE;
2804 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2805 break;
2806
2807 case R_SH_PCDISP:
2808 insn = bfd_get_16 (abfd, loc);
2809 oinsn = insn;
2810 insn += add / 2;
2811 if ((oinsn & 0xf000) != (insn & 0xf000))
2812 overflow = TRUE;
2813 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2814 break;
2815
2816 case R_SH_PCRELIMM8BY4:
2817 /* This reloc ignores the least significant 3 bits of
2818 the program counter before adding in the offset.
2819 This means that if ADDR is at an even address, the
2820 swap will not affect the offset. If ADDR is an at an
2821 odd address, then the instruction will be crossing a
2822 four byte boundary, and must be adjusted. */
2823 if ((addr & 3) != 0)
2824 {
2825 insn = bfd_get_16 (abfd, loc);
2826 oinsn = insn;
2827 insn += add / 2;
2828 if ((oinsn & 0xff00) != (insn & 0xff00))
2829 overflow = TRUE;
2830 bfd_put_16 (abfd, (bfd_vma) insn, loc);
2831 }
2832
2833 break;
2834 }
2835
2836 if (overflow)
2837 {
2838 ((*_bfd_error_handler)
2839 ("%s: 0x%lx: fatal: reloc overflow while relaxing",
2840 bfd_archive_filename (abfd), (unsigned long) irel->r_vaddr));
2841 bfd_set_error (bfd_error_bad_value);
2842 return FALSE;
2843 }
2844 }
2845 }
2846
2847 return TRUE;
2848 }
2849 \f
2850 /* This is a modification of _bfd_coff_generic_relocate_section, which
2851 will handle SH relaxing. */
2852
2853 static bfd_boolean
2854 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2855 relocs, syms, sections)
2856 bfd *output_bfd ATTRIBUTE_UNUSED;
2857 struct bfd_link_info *info;
2858 bfd *input_bfd;
2859 asection *input_section;
2860 bfd_byte *contents;
2861 struct internal_reloc *relocs;
2862 struct internal_syment *syms;
2863 asection **sections;
2864 {
2865 struct internal_reloc *rel;
2866 struct internal_reloc *relend;
2867
2868 rel = relocs;
2869 relend = rel + input_section->reloc_count;
2870 for (; rel < relend; rel++)
2871 {
2872 long symndx;
2873 struct coff_link_hash_entry *h;
2874 struct internal_syment *sym;
2875 bfd_vma addend;
2876 bfd_vma val;
2877 reloc_howto_type *howto;
2878 bfd_reloc_status_type rstat;
2879
2880 /* Almost all relocs have to do with relaxing. If any work must
2881 be done for them, it has been done in sh_relax_section. */
2882 if (rel->r_type != R_SH_IMM32
2883 #ifdef COFF_WITH_PE
2884 && rel->r_type != R_SH_IMM32CE
2885 && rel->r_type != R_SH_IMAGEBASE
2886 #endif
2887 && rel->r_type != R_SH_PCDISP)
2888 continue;
2889
2890 symndx = rel->r_symndx;
2891
2892 if (symndx == -1)
2893 {
2894 h = NULL;
2895 sym = NULL;
2896 }
2897 else
2898 {
2899 if (symndx < 0
2900 || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2901 {
2902 (*_bfd_error_handler)
2903 ("%s: illegal symbol index %ld in relocs",
2904 bfd_archive_filename (input_bfd), symndx);
2905 bfd_set_error (bfd_error_bad_value);
2906 return FALSE;
2907 }
2908 h = obj_coff_sym_hashes (input_bfd)[symndx];
2909 sym = syms + symndx;
2910 }
2911
2912 if (sym != NULL && sym->n_scnum != 0)
2913 addend = - sym->n_value;
2914 else
2915 addend = 0;
2916
2917 if (rel->r_type == R_SH_PCDISP)
2918 addend -= 4;
2919
2920 if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2921 howto = NULL;
2922 else
2923 howto = &sh_coff_howtos[rel->r_type];
2924
2925 if (howto == NULL)
2926 {
2927 bfd_set_error (bfd_error_bad_value);
2928 return FALSE;
2929 }
2930
2931 #ifdef COFF_WITH_PE
2932 if (rel->r_type == R_SH_IMAGEBASE)
2933 addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2934 #endif
2935
2936 val = 0;
2937
2938 if (h == NULL)
2939 {
2940 asection *sec;
2941
2942 /* There is nothing to do for an internal PCDISP reloc. */
2943 if (rel->r_type == R_SH_PCDISP)
2944 continue;
2945
2946 if (symndx == -1)
2947 {
2948 sec = bfd_abs_section_ptr;
2949 val = 0;
2950 }
2951 else
2952 {
2953 sec = sections[symndx];
2954 val = (sec->output_section->vma
2955 + sec->output_offset
2956 + sym->n_value
2957 - sec->vma);
2958 }
2959 }
2960 else
2961 {
2962 if (h->root.type == bfd_link_hash_defined
2963 || h->root.type == bfd_link_hash_defweak)
2964 {
2965 asection *sec;
2966
2967 sec = h->root.u.def.section;
2968 val = (h->root.u.def.value
2969 + sec->output_section->vma
2970 + sec->output_offset);
2971 }
2972 else if (! info->relocateable)
2973 {
2974 if (! ((*info->callbacks->undefined_symbol)
2975 (info, h->root.root.string, input_bfd, input_section,
2976 rel->r_vaddr - input_section->vma, TRUE)))
2977 return FALSE;
2978 }
2979 }
2980
2981 rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2982 contents,
2983 rel->r_vaddr - input_section->vma,
2984 val, addend);
2985
2986 switch (rstat)
2987 {
2988 default:
2989 abort ();
2990 case bfd_reloc_ok:
2991 break;
2992 case bfd_reloc_overflow:
2993 {
2994 const char *name;
2995 char buf[SYMNMLEN + 1];
2996
2997 if (symndx == -1)
2998 name = "*ABS*";
2999 else if (h != NULL)
3000 name = h->root.root.string;
3001 else if (sym->_n._n_n._n_zeroes == 0
3002 && sym->_n._n_n._n_offset != 0)
3003 name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
3004 else
3005 {
3006 strncpy (buf, sym->_n._n_name, SYMNMLEN);
3007 buf[SYMNMLEN] = '\0';
3008 name = buf;
3009 }
3010
3011 if (! ((*info->callbacks->reloc_overflow)
3012 (info, name, howto->name, (bfd_vma) 0, input_bfd,
3013 input_section, rel->r_vaddr - input_section->vma)))
3014 return FALSE;
3015 }
3016 }
3017 }
3018
3019 return TRUE;
3020 }
3021
3022 /* This is a version of bfd_generic_get_relocated_section_contents
3023 which uses sh_relocate_section. */
3024
3025 static bfd_byte *
3026 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
3027 data, relocateable, symbols)
3028 bfd *output_bfd;
3029 struct bfd_link_info *link_info;
3030 struct bfd_link_order *link_order;
3031 bfd_byte *data;
3032 bfd_boolean relocateable;
3033 asymbol **symbols;
3034 {
3035 asection *input_section = link_order->u.indirect.section;
3036 bfd *input_bfd = input_section->owner;
3037 asection **sections = NULL;
3038 struct internal_reloc *internal_relocs = NULL;
3039 struct internal_syment *internal_syms = NULL;
3040
3041 /* We only need to handle the case of relaxing, or of having a
3042 particular set of section contents, specially. */
3043 if (relocateable
3044 || coff_section_data (input_bfd, input_section) == NULL
3045 || coff_section_data (input_bfd, input_section)->contents == NULL)
3046 return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
3047 link_order, data,
3048 relocateable,
3049 symbols);
3050
3051 memcpy (data, coff_section_data (input_bfd, input_section)->contents,
3052 (size_t) input_section->_raw_size);
3053
3054 if ((input_section->flags & SEC_RELOC) != 0
3055 && input_section->reloc_count > 0)
3056 {
3057 bfd_size_type symesz = bfd_coff_symesz (input_bfd);
3058 bfd_byte *esym, *esymend;
3059 struct internal_syment *isymp;
3060 asection **secpp;
3061 bfd_size_type amt;
3062
3063 if (! _bfd_coff_get_external_symbols (input_bfd))
3064 goto error_return;
3065
3066 internal_relocs = (_bfd_coff_read_internal_relocs
3067 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
3068 FALSE, (struct internal_reloc *) NULL));
3069 if (internal_relocs == NULL)
3070 goto error_return;
3071
3072 amt = obj_raw_syment_count (input_bfd);
3073 amt *= sizeof (struct internal_syment);
3074 internal_syms = (struct internal_syment *) bfd_malloc (amt);
3075 if (internal_syms == NULL)
3076 goto error_return;
3077
3078 amt = obj_raw_syment_count (input_bfd);
3079 amt *= sizeof (asection *);
3080 sections = (asection **) bfd_malloc (amt);
3081 if (sections == NULL)
3082 goto error_return;
3083
3084 isymp = internal_syms;
3085 secpp = sections;
3086 esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
3087 esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
3088 while (esym < esymend)
3089 {
3090 bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
3091
3092 if (isymp->n_scnum != 0)
3093 *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
3094 else
3095 {
3096 if (isymp->n_value == 0)
3097 *secpp = bfd_und_section_ptr;
3098 else
3099 *secpp = bfd_com_section_ptr;
3100 }
3101
3102 esym += (isymp->n_numaux + 1) * symesz;
3103 secpp += isymp->n_numaux + 1;
3104 isymp += isymp->n_numaux + 1;
3105 }
3106
3107 if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3108 input_section, data, internal_relocs,
3109 internal_syms, sections))
3110 goto error_return;
3111
3112 free (sections);
3113 sections = NULL;
3114 free (internal_syms);
3115 internal_syms = NULL;
3116 free (internal_relocs);
3117 internal_relocs = NULL;
3118 }
3119
3120 return data;
3121
3122 error_return:
3123 if (internal_relocs != NULL)
3124 free (internal_relocs);
3125 if (internal_syms != NULL)
3126 free (internal_syms);
3127 if (sections != NULL)
3128 free (sections);
3129 return NULL;
3130 }
3131
3132 /* The target vectors. */
3133
3134 #ifndef TARGET_SHL_SYM
3135 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL)
3136 #endif
3137
3138 #ifdef TARGET_SHL_SYM
3139 #define TARGET_SYM TARGET_SHL_SYM
3140 #else
3141 #define TARGET_SYM shlcoff_vec
3142 #endif
3143
3144 #ifndef TARGET_SHL_NAME
3145 #define TARGET_SHL_NAME "coff-shl"
3146 #endif
3147
3148 #ifdef COFF_WITH_PE
3149 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3150 SEC_CODE | SEC_DATA, '_', NULL);
3151 #else
3152 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3153 0, '_', NULL)
3154 #endif
3155
3156 #ifndef TARGET_SHL_SYM
3157 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3158 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3159 /* Some people want versions of the SH COFF target which do not align
3160 to 16 byte boundaries. We implement that by adding a couple of new
3161 target vectors. These are just like the ones above, but they
3162 change the default section alignment. To generate them in the
3163 assembler, use -small. To use them in the linker, use -b
3164 coff-sh{l}-small and -oformat coff-sh{l}-small.
3165
3166 Yes, this is a horrible hack. A general solution for setting
3167 section alignment in COFF is rather complex. ELF handles this
3168 correctly. */
3169
3170 /* Only recognize the small versions if the target was not defaulted.
3171 Otherwise we won't recognize the non default endianness. */
3172
3173 static const bfd_target *
3174 coff_small_object_p (abfd)
3175 bfd *abfd;
3176 {
3177 if (abfd->target_defaulted)
3178 {
3179 bfd_set_error (bfd_error_wrong_format);
3180 return NULL;
3181 }
3182 return coff_object_p (abfd);
3183 }
3184
3185 /* Set the section alignment for the small versions. */
3186
3187 static bfd_boolean
3188 coff_small_new_section_hook (abfd, section)
3189 bfd *abfd;
3190 asection *section;
3191 {
3192 if (! coff_new_section_hook (abfd, section))
3193 return FALSE;
3194
3195 /* We must align to at least a four byte boundary, because longword
3196 accesses must be on a four byte boundary. */
3197 if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3198 section->alignment_power = 2;
3199
3200 return TRUE;
3201 }
3202
3203 /* This is copied from bfd_coff_std_swap_table so that we can change
3204 the default section alignment power. */
3205
3206 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3207 {
3208 coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3209 coff_swap_aux_out, coff_swap_sym_out,
3210 coff_swap_lineno_out, coff_swap_reloc_out,
3211 coff_swap_filehdr_out, coff_swap_aouthdr_out,
3212 coff_swap_scnhdr_out,
3213 FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3214 #ifdef COFF_LONG_FILENAMES
3215 TRUE,
3216 #else
3217 FALSE,
3218 #endif
3219 #ifdef COFF_LONG_SECTION_NAMES
3220 TRUE,
3221 #else
3222 FALSE,
3223 #endif
3224 2,
3225 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3226 TRUE,
3227 #else
3228 FALSE,
3229 #endif
3230 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3231 4,
3232 #else
3233 2,
3234 #endif
3235 coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3236 coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3237 coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3238 coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3239 coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3240 coff_classify_symbol, coff_compute_section_file_positions,
3241 coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3242 coff_adjust_symndx, coff_link_add_one_symbol,
3243 coff_link_output_has_begun, coff_final_link_postscript
3244 };
3245
3246 #define coff_small_close_and_cleanup \
3247 coff_close_and_cleanup
3248 #define coff_small_bfd_free_cached_info \
3249 coff_bfd_free_cached_info
3250 #define coff_small_get_section_contents \
3251 coff_get_section_contents
3252 #define coff_small_get_section_contents_in_window \
3253 coff_get_section_contents_in_window
3254
3255 extern const bfd_target shlcoff_small_vec;
3256
3257 const bfd_target shcoff_small_vec =
3258 {
3259 "coff-sh-small", /* name */
3260 bfd_target_coff_flavour,
3261 BFD_ENDIAN_BIG, /* data byte order is big */
3262 BFD_ENDIAN_BIG, /* header byte order is big */
3263
3264 (HAS_RELOC | EXEC_P | /* object flags */
3265 HAS_LINENO | HAS_DEBUG |
3266 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3267
3268 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3269 '_', /* leading symbol underscore */
3270 '/', /* ar_pad_char */
3271 15, /* ar_max_namelen */
3272 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3273 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3274 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3275 bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3276 bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3277 bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3278
3279 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3280 bfd_generic_archive_p, _bfd_dummy_target},
3281 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3282 bfd_false},
3283 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3284 _bfd_write_archive_contents, bfd_false},
3285
3286 BFD_JUMP_TABLE_GENERIC (coff_small),
3287 BFD_JUMP_TABLE_COPY (coff),
3288 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3289 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3290 BFD_JUMP_TABLE_SYMBOLS (coff),
3291 BFD_JUMP_TABLE_RELOCS (coff),
3292 BFD_JUMP_TABLE_WRITE (coff),
3293 BFD_JUMP_TABLE_LINK (coff),
3294 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3295
3296 & shlcoff_small_vec,
3297
3298 (PTR) &bfd_coff_small_swap_table
3299 };
3300
3301 const bfd_target shlcoff_small_vec =
3302 {
3303 "coff-shl-small", /* name */
3304 bfd_target_coff_flavour,
3305 BFD_ENDIAN_LITTLE, /* data byte order is little */
3306 BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
3307
3308 (HAS_RELOC | EXEC_P | /* object flags */
3309 HAS_LINENO | HAS_DEBUG |
3310 HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3311
3312 (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3313 '_', /* leading symbol underscore */
3314 '/', /* ar_pad_char */
3315 15, /* ar_max_namelen */
3316 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3317 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3318 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3319 bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3320 bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3321 bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3322
3323 {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3324 bfd_generic_archive_p, _bfd_dummy_target},
3325 {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3326 bfd_false},
3327 {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3328 _bfd_write_archive_contents, bfd_false},
3329
3330 BFD_JUMP_TABLE_GENERIC (coff_small),
3331 BFD_JUMP_TABLE_COPY (coff),
3332 BFD_JUMP_TABLE_CORE (_bfd_nocore),
3333 BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3334 BFD_JUMP_TABLE_SYMBOLS (coff),
3335 BFD_JUMP_TABLE_RELOCS (coff),
3336 BFD_JUMP_TABLE_WRITE (coff),
3337 BFD_JUMP_TABLE_LINK (coff),
3338 BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3339
3340 & shcoff_small_vec,
3341
3342 (PTR) &bfd_coff_small_swap_table
3343 };
3344 #endif
This page took 0.095771 seconds and 5 git commands to generate.