2006-05-11 Paul Brook <paul@codesourcery.com>
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <string.h>
29 #include <limits.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33
34 /* Need TARGET_CPU. */
35 #include "config.h"
36 #include "subsegs.h"
37 #include "obstack.h"
38 #include "symbols.h"
39 #include "listing.h"
40
41 #include "opcode/arm.h"
42
43 #ifdef OBJ_ELF
44 #include "elf/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
47 #endif
48
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
51
52 #ifdef OBJ_ELF
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
55
56 /* This structure holds the unwinding state. */
57
58 static struct
59 {
60 symbolS * proc_start;
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
65 segT saved_seg;
66 subsegT saved_subseg;
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
69 int opcode_count;
70 int opcode_alloc;
71 /* The number of bytes pushed to the stack. */
72 offsetT frame_size;
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
79 offsetT fp_offset;
80 int fp_reg;
81 /* Nonzero if an unwind_setfp directive has been seen. */
82 unsigned fp_used:1;
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
85 } unwind;
86
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
91
92 #endif /* OBJ_ELF */
93
94 enum arm_float_abi
95 {
96 ARM_FLOAT_ABI_HARD,
97 ARM_FLOAT_ABI_SOFTFP,
98 ARM_FLOAT_ABI_SOFT
99 };
100
101 /* Types of processor to assemble for. */
102 #ifndef CPU_DEFAULT
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
105 #else
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
108 #endif
109 #endif
110 #endif
111
112 #ifndef FPU_DEFAULT
113 # ifdef TE_LINUX
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
116 # ifdef OBJ_ELF
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
118 # else
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
121 # endif
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
124 # else
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
127 # endif
128 #endif /* ifndef FPU_DEFAULT */
129
130 #define streq(a, b) (strcmp (a, b) == 0)
131
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
135
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
165
166 #ifdef CPU_DEFAULT
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
168 #endif
169
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
194
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
199
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
216
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
222 #ifdef OBJ_ELF
223 # ifdef EABI_DEFAULT
224 static int meabi_flags = EABI_DEFAULT;
225 # else
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
227 # endif
228 #endif
229
230 #ifdef OBJ_ELF
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
233 #endif
234
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
238 instructions. */
239 static int thumb_mode = 0;
240
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
243
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
248 there.)
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
251 machine code.
252
253 Important differences from the old Thumb mode:
254
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
263
264 static bfd_boolean unified_syntax = FALSE;
265
266 enum neon_el_type
267 {
268 NT_invtype,
269 NT_untyped,
270 NT_integer,
271 NT_float,
272 NT_poly,
273 NT_signed,
274 NT_unsigned
275 };
276
277 struct neon_type_el
278 {
279 enum neon_el_type type;
280 unsigned size;
281 };
282
283 #define NEON_MAX_TYPE_ELS 4
284
285 struct neon_type
286 {
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
288 unsigned elems;
289 };
290
291 struct arm_it
292 {
293 const char * error;
294 unsigned long instruction;
295 int size;
296 int size_req;
297 int cond;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
301 unsigned long relax;
302 struct
303 {
304 bfd_reloc_code_real_type type;
305 expressionS exp;
306 int pc_rel;
307 } reloc;
308
309 struct
310 {
311 unsigned reg;
312 signed int imm;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
330 } operands[6];
331 };
332
333 static struct arm_it inst;
334
335 #define NUM_FLOAT_VALS 8
336
337 const char * fp_const[] =
338 {
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
340 };
341
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
344
345 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
346
347 #define FAIL (-1)
348 #define SUCCESS (0)
349
350 #define SUFF_S 1
351 #define SUFF_D 2
352 #define SUFF_E 3
353 #define SUFF_P 4
354
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
357
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
360
361 #define DOUBLE_LOAD_FLAG 0x00000001
362
363 struct asm_cond
364 {
365 const char * template;
366 unsigned long value;
367 };
368
369 #define COND_ALWAYS 0xE
370
371 struct asm_psr
372 {
373 const char *template;
374 unsigned long field;
375 };
376
377 struct asm_barrier_opt
378 {
379 const char *template;
380 unsigned long value;
381 };
382
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
385
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
391
392 struct reloc_entry
393 {
394 char *name;
395 bfd_reloc_code_real_type reloc;
396 };
397
398 enum vfp_reg_pos
399 {
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
402 };
403
404 enum vfp_ldstm_type
405 {
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
407 };
408
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
412
413 struct neon_typed_alias
414 {
415 unsigned char defined;
416 unsigned char index;
417 struct neon_type_el eltype;
418 };
419
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
422 enum arm_reg_type
423 {
424 REG_TYPE_RN,
425 REG_TYPE_CP,
426 REG_TYPE_CN,
427 REG_TYPE_FN,
428 REG_TYPE_VFS,
429 REG_TYPE_VFD,
430 REG_TYPE_NQ,
431 REG_TYPE_NDQ,
432 REG_TYPE_VFC,
433 REG_TYPE_MVF,
434 REG_TYPE_MVD,
435 REG_TYPE_MVFX,
436 REG_TYPE_MVDX,
437 REG_TYPE_MVAX,
438 REG_TYPE_DSPSC,
439 REG_TYPE_MMXWR,
440 REG_TYPE_MMXWC,
441 REG_TYPE_MMXWCG,
442 REG_TYPE_XSCALE,
443 };
444
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
449 struct reg_entry
450 {
451 const char *name;
452 unsigned char number;
453 unsigned char type;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
456 };
457
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs[] =
460 {
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
480 };
481
482 /* Some well known registers that we refer to directly elsewhere. */
483 #define REG_SP 13
484 #define REG_LR 14
485 #define REG_PC 15
486
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
488 take 2: */
489 #define INSN_SIZE 4
490
491 struct asm_opcode
492 {
493 /* Basic string to match. */
494 const char *template;
495
496 /* Parameters to instruction. */
497 unsigned char operands[8];
498
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
501
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
504
505 /* Thumb-format instruction code. */
506 unsigned int tvalue;
507
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
511
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
514
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
517 };
518
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
528
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
532
533 #define DATA_OP_SHIFT 21
534
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
537
538 /* Codes to distinguish the arithmetic instructions. */
539 #define OPCODE_AND 0
540 #define OPCODE_EOR 1
541 #define OPCODE_SUB 2
542 #define OPCODE_RSB 3
543 #define OPCODE_ADD 4
544 #define OPCODE_ADC 5
545 #define OPCODE_SBC 6
546 #define OPCODE_RSC 7
547 #define OPCODE_TST 8
548 #define OPCODE_TEQ 9
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
555
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
566
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
572
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
584
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
592
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
598
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
614
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
617
618 #define T_OPCODE_BRANCH 0xe000
619
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
624
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
634
635 static struct hash_control *arm_ops_hsh;
636 static struct hash_control *arm_cond_hsh;
637 static struct hash_control *arm_shift_hsh;
638 static struct hash_control *arm_psr_hsh;
639 static struct hash_control *arm_v7m_psr_hsh;
640 static struct hash_control *arm_reg_hsh;
641 static struct hash_control *arm_reloc_hsh;
642 static struct hash_control *arm_barrier_opt_hsh;
643
644 /* Stuff needed to resolve the label ambiguity
645 As:
646 ...
647 label: <insn>
648 may differ from:
649 ...
650 label:
651 <insn>
652 */
653
654 symbolS * last_label_seen;
655 static int label_is_thumb_function_name = FALSE;
656 \f
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
659
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
662 {
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
665 unsigned int id;
666 symbolS * symbol;
667 segT section;
668 subsegT sub_section;
669 struct literal_pool * next;
670 } literal_pool;
671
672 /* Pointer to a linked list of literal pools. */
673 literal_pool * list_of_pools = NULL;
674
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask = 0;
677 static int current_cc;
678
679 \f
680 /* Pure syntax. */
681
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars[] = "@";
685
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars[] = "#";
694
695 const char line_separator_chars[] = ";";
696
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS[] = "eE";
700
701 /* Chars that mean this number is a floating point constant. */
702 /* As in 0f12.456 */
703 /* or 0d1.2345e12 */
704
705 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
706
707 /* Prefix characters that indicate the start of an immediate
708 value. */
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
710
711 /* Separator character handling. */
712
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
714
715 static inline int
716 skip_past_char (char ** str, char c)
717 {
718 if (**str == c)
719 {
720 (*str)++;
721 return SUCCESS;
722 }
723 else
724 return FAIL;
725 }
726 #define skip_past_comma(str) skip_past_char (str, ',')
727
728 /* Arithmetic expressions (possibly involving symbols). */
729
730 /* Return TRUE if anything in the expression is a bignum. */
731
732 static int
733 walk_no_bignums (symbolS * sp)
734 {
735 if (symbol_get_value_expression (sp)->X_op == O_big)
736 return 1;
737
738 if (symbol_get_value_expression (sp)->X_add_symbol)
739 {
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
743 }
744
745 return 0;
746 }
747
748 static int in_my_get_expression = 0;
749
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
757
758 static int
759 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
760 {
761 char * save_in;
762 segT seg;
763
764 /* In unified syntax, all prefixes are optional. */
765 if (unified_syntax)
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
767 : GE_OPT_PREFIX;
768
769 switch (prefix_mode)
770 {
771 case GE_NO_PREFIX: break;
772 case GE_IMM_PREFIX:
773 if (!is_immediate_prefix (**str))
774 {
775 inst.error = _("immediate expression requires a # prefix");
776 return FAIL;
777 }
778 (*str)++;
779 break;
780 case GE_OPT_PREFIX:
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
783 (*str)++;
784 break;
785 default: abort ();
786 }
787
788 memset (ep, 0, sizeof (expressionS));
789
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
795
796 if (ep->X_op == O_illegal)
797 {
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
803 return 1;
804 }
805
806 #ifdef OBJ_AOUT
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
812 {
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
816 return 1;
817 }
818 #endif
819
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
825 || (ep->X_add_symbol
826 && (walk_no_bignums (ep->X_add_symbol)
827 || (ep->X_op_symbol
828 && walk_no_bignums (ep->X_op_symbol))))))
829 {
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
833 return 1;
834 }
835
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
838 return 0;
839 }
840
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
845
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
852
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
854
855 char *
856 md_atof (int type, char * litP, int * sizeP)
857 {
858 int prec;
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
860 char *t;
861 int i;
862
863 switch (type)
864 {
865 case 'f':
866 case 'F':
867 case 's':
868 case 'S':
869 prec = 2;
870 break;
871
872 case 'd':
873 case 'D':
874 case 'r':
875 case 'R':
876 prec = 4;
877 break;
878
879 case 'x':
880 case 'X':
881 prec = 6;
882 break;
883
884 case 'p':
885 case 'P':
886 prec = 6;
887 break;
888
889 default:
890 *sizeP = 0;
891 return _("bad call to MD_ATOF()");
892 }
893
894 t = atof_ieee (input_line_pointer, type, words);
895 if (t)
896 input_line_pointer = t;
897 *sizeP = prec * 2;
898
899 if (target_big_endian)
900 {
901 for (i = 0; i < prec; i++)
902 {
903 md_number_to_chars (litP, (valueT) words[i], 2);
904 litP += 2;
905 }
906 }
907 else
908 {
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
911 {
912 md_number_to_chars (litP, (valueT) words[i], 2);
913 litP += 2;
914 }
915 else
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
919 {
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
922 litP += 4;
923 }
924 }
925
926 return 0;
927 }
928
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
931 void
932 md_operand (expressionS * expr)
933 {
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
936 }
937
938 /* Immediate values. */
939
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
943 #ifdef OBJ_ELF
944 static int
945 immediate_for_directive (int *val)
946 {
947 expressionS exp;
948 exp.X_op = O_illegal;
949
950 if (is_immediate_prefix (*input_line_pointer))
951 {
952 input_line_pointer++;
953 expression (&exp);
954 }
955
956 if (exp.X_op != O_constant)
957 {
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
960 return FAIL;
961 }
962 *val = exp.X_add_number;
963 return SUCCESS;
964 }
965 #endif
966
967 /* Register parsing. */
968
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
973
974 static struct reg_entry *
975 arm_reg_parse_multi (char **ccp)
976 {
977 char *start = *ccp;
978 char *p;
979 struct reg_entry *reg;
980
981 #ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
983 return NULL;
984 start++;
985 #endif
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
988 start++;
989 #endif
990
991 p = start;
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
993 return NULL;
994
995 do
996 p++;
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
998
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1000
1001 if (!reg)
1002 return NULL;
1003
1004 *ccp = p;
1005 return reg;
1006 }
1007
1008 static int
1009 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1011 {
1012 /* Alternative syntaxes are accepted for a few register classes. */
1013 switch (type)
1014 {
1015 case REG_TYPE_MVF:
1016 case REG_TYPE_MVD:
1017 case REG_TYPE_MVFX:
1018 case REG_TYPE_MVDX:
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1021 return reg->number;
1022 break;
1023
1024 case REG_TYPE_CP:
1025 /* For backward compatibility, a bare number is valid here. */
1026 {
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1029 return processor;
1030 }
1031
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1036 return reg->number;
1037 break;
1038
1039 default:
1040 break;
1041 }
1042
1043 return FAIL;
1044 }
1045
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1048
1049 static int
1050 arm_reg_parse (char **ccp, enum arm_reg_type type)
1051 {
1052 char *start = *ccp;
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1054 int ret;
1055
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1058 return FAIL;
1059
1060 if (reg && reg->type == type)
1061 return reg->number;
1062
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1064 return ret;
1065
1066 *ccp = start;
1067 return FAIL;
1068 }
1069
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1072 properly. E.g.,
1073
1074 .i32.i32.s16
1075 .s32.f32
1076 .u16
1077
1078 Can all be legally parsed by this function.
1079
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1083
1084 static int
1085 parse_neon_type (struct neon_type *type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 if (type)
1090 type->elems = 0;
1091
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1093 {
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1096
1097 if (*ptr != '.')
1098 break;
1099
1100 ptr++;
1101
1102 /* Just a size without an explicit type. */
1103 if (ISDIGIT (*ptr))
1104 goto parsesize;
1105
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1113 default:
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1115 return FAIL;
1116 }
1117
1118 ptr++;
1119
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1122 thissize = 32;
1123 else
1124 {
1125 parsesize:
1126 thissize = strtoul (ptr, &ptr, 10);
1127
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1129 && thissize != 64)
1130 {
1131 as_bad (_("bad size %d in type specifier"), thissize);
1132 return FAIL;
1133 }
1134 }
1135
1136 if (type)
1137 {
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1140 type->elems++;
1141 }
1142 }
1143
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1146 return FAIL;
1147
1148 *str = ptr;
1149
1150 return SUCCESS;
1151 }
1152
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1157
1158 static void
1159 first_error (const char *err)
1160 {
1161 if (!inst.error)
1162 inst.error = err;
1163 }
1164
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1166 static int
1167 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1168 {
1169 char *str = *ccp;
1170 struct neon_type optype;
1171
1172 if (*str == '.')
1173 {
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1175 {
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1178 else
1179 {
1180 first_error (_("only one type should be specified for operand"));
1181 return FAIL;
1182 }
1183 }
1184 else
1185 {
1186 first_error (_("vector type expected"));
1187 return FAIL;
1188 }
1189 }
1190 else
1191 return FAIL;
1192
1193 *ccp = str;
1194
1195 return SUCCESS;
1196 }
1197
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1199 a 4-bit integer. */
1200
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1203
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1208
1209 static int
1210 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1213 {
1214 char *str = *ccp;
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1218
1219 atype.defined = 0;
1220 atype.index = -1;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1223
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1226 if (reg == NULL)
1227 {
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1229 if (altreg != FAIL)
1230 *ccp = str;
1231 if (typeinfo)
1232 *typeinfo = atype;
1233 return altreg;
1234 }
1235
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1239 type = reg->type;
1240
1241 if (type != reg->type)
1242 return FAIL;
1243
1244 if (reg->neon)
1245 atype = *reg->neon;
1246
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1248 {
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1250 {
1251 first_error (_("can't redefine type for operand"));
1252 return FAIL;
1253 }
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1256 }
1257
1258 if (skip_past_char (&str, '[') == SUCCESS)
1259 {
1260 if (type != REG_TYPE_VFD)
1261 {
1262 first_error (_("only D registers may be indexed"));
1263 return FAIL;
1264 }
1265
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1267 {
1268 first_error (_("can't change index for operand"));
1269 return FAIL;
1270 }
1271
1272 atype.defined |= NTA_HASINDEX;
1273
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1276 else
1277 {
1278 expressionS exp;
1279
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1281
1282 if (exp.X_op != O_constant)
1283 {
1284 first_error (_("constant expression required"));
1285 return FAIL;
1286 }
1287
1288 if (skip_past_char (&str, ']') == FAIL)
1289 return FAIL;
1290
1291 atype.index = exp.X_add_number;
1292 }
1293 }
1294
1295 if (typeinfo)
1296 *typeinfo = atype;
1297
1298 if (rtype)
1299 *rtype = type;
1300
1301 *ccp = str;
1302
1303 return reg->number;
1304 }
1305
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1312 */
1313
1314 static int
1315 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1317 {
1318 struct neon_typed_alias atype;
1319 char *str = *ccp;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1321
1322 if (reg == FAIL)
1323 return FAIL;
1324
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1327 {
1328 first_error (_("register operand expected, but got scalar"));
1329 return FAIL;
1330 }
1331
1332 if (vectype)
1333 *vectype = atype.eltype;
1334
1335 *ccp = str;
1336
1337 return reg;
1338 }
1339
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1342
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1346
1347 static int
1348 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1349 {
1350 int reg;
1351 char *str = *ccp;
1352 struct neon_typed_alias atype;
1353
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1355
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1357 return FAIL;
1358
1359 if (atype.index == NEON_ALL_LANES)
1360 {
1361 first_error (_("scalar must have an index"));
1362 return FAIL;
1363 }
1364 else if (atype.index >= 64 / elsize)
1365 {
1366 first_error (_("scalar index out of range"));
1367 return FAIL;
1368 }
1369
1370 if (type)
1371 *type = atype.eltype;
1372
1373 *ccp = str;
1374
1375 return reg * 16 + atype.index;
1376 }
1377
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1379 static long
1380 parse_reg_list (char ** strp)
1381 {
1382 char * str = * strp;
1383 long range = 0;
1384 int another_range;
1385
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1387 do
1388 {
1389 another_range = 0;
1390
1391 if (*str == '{')
1392 {
1393 int in_range = 0;
1394 int cur_reg = -1;
1395
1396 str++;
1397 do
1398 {
1399 int reg;
1400
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1402 {
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1404 return FAIL;
1405 }
1406
1407 if (in_range)
1408 {
1409 int i;
1410
1411 if (reg <= cur_reg)
1412 {
1413 first_error (_("bad range in register list"));
1414 return FAIL;
1415 }
1416
1417 for (i = cur_reg + 1; i < reg; i++)
1418 {
1419 if (range & (1 << i))
1420 as_tsktsk
1421 (_("Warning: duplicated register (r%d) in register list"),
1422 i);
1423 else
1424 range |= 1 << i;
1425 }
1426 in_range = 0;
1427 }
1428
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1431 reg);
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1434
1435 range |= 1 << reg;
1436 cur_reg = reg;
1437 }
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1440 str--;
1441
1442 if (*str++ != '}')
1443 {
1444 first_error (_("missing `}'"));
1445 return FAIL;
1446 }
1447 }
1448 else
1449 {
1450 expressionS expr;
1451
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1453 return FAIL;
1454
1455 if (expr.X_op == O_constant)
1456 {
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1459 {
1460 inst.error = _("invalid register mask");
1461 return FAIL;
1462 }
1463
1464 if ((range & expr.X_add_number) != 0)
1465 {
1466 int regno = range & expr.X_add_number;
1467
1468 regno &= -regno;
1469 regno = (1 << regno) - 1;
1470 as_tsktsk
1471 (_("Warning: duplicated register (r%d) in register list"),
1472 regno);
1473 }
1474
1475 range |= expr.X_add_number;
1476 }
1477 else
1478 {
1479 if (inst.reloc.type != 0)
1480 {
1481 inst.error = _("expression too complex");
1482 return FAIL;
1483 }
1484
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1488 }
1489 }
1490
1491 if (*str == '|' || *str == '+')
1492 {
1493 str++;
1494 another_range = 1;
1495 }
1496 }
1497 while (another_range);
1498
1499 *strp = str;
1500 return range;
1501 }
1502
1503 /* Types of registers in a list. */
1504
1505 enum reg_list_els
1506 {
1507 REGLIST_VFP_S,
1508 REGLIST_VFP_D,
1509 REGLIST_NEON_D
1510 };
1511
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1519 some cases, e.g.:
1520 vtbl.8 d3,d4,d5
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1525 bug. */
1526
1527 static int
1528 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1529 {
1530 int base_reg;
1531 int new_base;
1532 enum arm_reg_type regtype = 0;
1533 int max_regs = 0;
1534 int count = 0;
1535 int warned = 0;
1536 unsigned long mask = 0;
1537 int i;
1538
1539 if (**str != '{')
1540 {
1541 inst.error = _("expecting {");
1542 return FAIL;
1543 }
1544
1545 (*str)++;
1546
1547 switch (etype)
1548 {
1549 case REGLIST_VFP_S:
1550 regtype = REG_TYPE_VFS;
1551 max_regs = 32;
1552 break;
1553
1554 case REGLIST_VFP_D:
1555 regtype = REG_TYPE_VFD;
1556 break;
1557
1558 case REGLIST_NEON_D:
1559 regtype = REG_TYPE_NDQ;
1560 break;
1561 }
1562
1563 if (etype != REGLIST_VFP_S)
1564 {
1565 /* VFPv3 allows 32 D registers. */
1566 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1567 {
1568 max_regs = 32;
1569 if (thumb_mode)
1570 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1571 fpu_vfp_ext_v3);
1572 else
1573 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1574 fpu_vfp_ext_v3);
1575 }
1576 else
1577 max_regs = 16;
1578 }
1579
1580 base_reg = max_regs;
1581
1582 do
1583 {
1584 int setmask = 1, addregs = 1;
1585
1586 new_base = arm_typed_reg_parse (str, regtype, &regtype, NULL);
1587
1588 if (new_base == FAIL)
1589 {
1590 first_error (_(reg_expected_msgs[regtype]));
1591 return FAIL;
1592 }
1593
1594 if (new_base >= max_regs)
1595 {
1596 first_error (_("register out of range in list"));
1597 return FAIL;
1598 }
1599
1600 /* Note: a value of 2 * n is returned for the register Q<n>. */
1601 if (regtype == REG_TYPE_NQ)
1602 {
1603 setmask = 3;
1604 addregs = 2;
1605 }
1606
1607 if (new_base < base_reg)
1608 base_reg = new_base;
1609
1610 if (mask & (setmask << new_base))
1611 {
1612 first_error (_("invalid register list"));
1613 return FAIL;
1614 }
1615
1616 if ((mask >> new_base) != 0 && ! warned)
1617 {
1618 as_tsktsk (_("register list not in ascending order"));
1619 warned = 1;
1620 }
1621
1622 mask |= setmask << new_base;
1623 count += addregs;
1624
1625 if (**str == '-') /* We have the start of a range expression */
1626 {
1627 int high_range;
1628
1629 (*str)++;
1630
1631 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1632 == FAIL)
1633 {
1634 inst.error = gettext (reg_expected_msgs[regtype]);
1635 return FAIL;
1636 }
1637
1638 if (high_range >= max_regs)
1639 {
1640 first_error (_("register out of range in list"));
1641 return FAIL;
1642 }
1643
1644 if (regtype == REG_TYPE_NQ)
1645 high_range = high_range + 1;
1646
1647 if (high_range <= new_base)
1648 {
1649 inst.error = _("register range not in ascending order");
1650 return FAIL;
1651 }
1652
1653 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1654 {
1655 if (mask & (setmask << new_base))
1656 {
1657 inst.error = _("invalid register list");
1658 return FAIL;
1659 }
1660
1661 mask |= setmask << new_base;
1662 count += addregs;
1663 }
1664 }
1665 }
1666 while (skip_past_comma (str) != FAIL);
1667
1668 (*str)++;
1669
1670 /* Sanity check -- should have raised a parse error above. */
1671 if (count == 0 || count > max_regs)
1672 abort ();
1673
1674 *pbase = base_reg;
1675
1676 /* Final test -- the registers must be consecutive. */
1677 mask >>= base_reg;
1678 for (i = 0; i < count; i++)
1679 {
1680 if ((mask & (1u << i)) == 0)
1681 {
1682 inst.error = _("non-contiguous register range");
1683 return FAIL;
1684 }
1685 }
1686
1687 return count;
1688 }
1689
1690 /* True if two alias types are the same. */
1691
1692 static int
1693 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1694 {
1695 if (!a && !b)
1696 return 1;
1697
1698 if (!a || !b)
1699 return 0;
1700
1701 if (a->defined != b->defined)
1702 return 0;
1703
1704 if ((a->defined & NTA_HASTYPE) != 0
1705 && (a->eltype.type != b->eltype.type
1706 || a->eltype.size != b->eltype.size))
1707 return 0;
1708
1709 if ((a->defined & NTA_HASINDEX) != 0
1710 && (a->index != b->index))
1711 return 0;
1712
1713 return 1;
1714 }
1715
1716 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1717 The base register is put in *PBASE.
1718 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1719 the return value.
1720 The register stride (minus one) is put in bit 4 of the return value.
1721 Bits [6:5] encode the list length (minus one).
1722 The type of the list elements is put in *ELTYPE, if non-NULL. */
1723
1724 #define NEON_LANE(X) ((X) & 0xf)
1725 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1726 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1727
1728 static int
1729 parse_neon_el_struct_list (char **str, unsigned *pbase,
1730 struct neon_type_el *eltype)
1731 {
1732 char *ptr = *str;
1733 int base_reg = -1;
1734 int reg_incr = -1;
1735 int count = 0;
1736 int lane = -1;
1737 int leading_brace = 0;
1738 enum arm_reg_type rtype = REG_TYPE_NDQ;
1739 int addregs = 1;
1740 const char *const incr_error = "register stride must be 1 or 2";
1741 const char *const type_error = "mismatched element/structure types in list";
1742 struct neon_typed_alias firsttype;
1743
1744 if (skip_past_char (&ptr, '{') == SUCCESS)
1745 leading_brace = 1;
1746
1747 do
1748 {
1749 struct neon_typed_alias atype;
1750 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1751
1752 if (getreg == FAIL)
1753 {
1754 first_error (_(reg_expected_msgs[rtype]));
1755 return FAIL;
1756 }
1757
1758 if (base_reg == -1)
1759 {
1760 base_reg = getreg;
1761 if (rtype == REG_TYPE_NQ)
1762 {
1763 reg_incr = 1;
1764 addregs = 2;
1765 }
1766 firsttype = atype;
1767 }
1768 else if (reg_incr == -1)
1769 {
1770 reg_incr = getreg - base_reg;
1771 if (reg_incr < 1 || reg_incr > 2)
1772 {
1773 first_error (_(incr_error));
1774 return FAIL;
1775 }
1776 }
1777 else if (getreg != base_reg + reg_incr * count)
1778 {
1779 first_error (_(incr_error));
1780 return FAIL;
1781 }
1782
1783 if (!neon_alias_types_same (&atype, &firsttype))
1784 {
1785 first_error (_(type_error));
1786 return FAIL;
1787 }
1788
1789 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1790 modes. */
1791 if (ptr[0] == '-')
1792 {
1793 struct neon_typed_alias htype;
1794 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1795 if (lane == -1)
1796 lane = NEON_INTERLEAVE_LANES;
1797 else if (lane != NEON_INTERLEAVE_LANES)
1798 {
1799 first_error (_(type_error));
1800 return FAIL;
1801 }
1802 if (reg_incr == -1)
1803 reg_incr = 1;
1804 else if (reg_incr != 1)
1805 {
1806 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1807 return FAIL;
1808 }
1809 ptr++;
1810 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1811 if (hireg == FAIL)
1812 {
1813 first_error (_(reg_expected_msgs[rtype]));
1814 return FAIL;
1815 }
1816 if (!neon_alias_types_same (&htype, &firsttype))
1817 {
1818 first_error (_(type_error));
1819 return FAIL;
1820 }
1821 count += hireg + dregs - getreg;
1822 continue;
1823 }
1824
1825 /* If we're using Q registers, we can't use [] or [n] syntax. */
1826 if (rtype == REG_TYPE_NQ)
1827 {
1828 count += 2;
1829 continue;
1830 }
1831
1832 if ((atype.defined & NTA_HASINDEX) != 0)
1833 {
1834 if (lane == -1)
1835 lane = atype.index;
1836 else if (lane != atype.index)
1837 {
1838 first_error (_(type_error));
1839 return FAIL;
1840 }
1841 }
1842 else if (lane == -1)
1843 lane = NEON_INTERLEAVE_LANES;
1844 else if (lane != NEON_INTERLEAVE_LANES)
1845 {
1846 first_error (_(type_error));
1847 return FAIL;
1848 }
1849 count++;
1850 }
1851 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1852
1853 /* No lane set by [x]. We must be interleaving structures. */
1854 if (lane == -1)
1855 lane = NEON_INTERLEAVE_LANES;
1856
1857 /* Sanity check. */
1858 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1859 || (count > 1 && reg_incr == -1))
1860 {
1861 first_error (_("error parsing element/structure list"));
1862 return FAIL;
1863 }
1864
1865 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1866 {
1867 first_error (_("expected }"));
1868 return FAIL;
1869 }
1870
1871 if (reg_incr == -1)
1872 reg_incr = 1;
1873
1874 if (eltype)
1875 *eltype = firsttype.eltype;
1876
1877 *pbase = base_reg;
1878 *str = ptr;
1879
1880 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1881 }
1882
1883 /* Parse an explicit relocation suffix on an expression. This is
1884 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1885 arm_reloc_hsh contains no entries, so this function can only
1886 succeed if there is no () after the word. Returns -1 on error,
1887 BFD_RELOC_UNUSED if there wasn't any suffix. */
1888 static int
1889 parse_reloc (char **str)
1890 {
1891 struct reloc_entry *r;
1892 char *p, *q;
1893
1894 if (**str != '(')
1895 return BFD_RELOC_UNUSED;
1896
1897 p = *str + 1;
1898 q = p;
1899
1900 while (*q && *q != ')' && *q != ',')
1901 q++;
1902 if (*q != ')')
1903 return -1;
1904
1905 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1906 return -1;
1907
1908 *str = q + 1;
1909 return r->reloc;
1910 }
1911
1912 /* Directives: register aliases. */
1913
1914 static struct reg_entry *
1915 insert_reg_alias (char *str, int number, int type)
1916 {
1917 struct reg_entry *new;
1918 const char *name;
1919
1920 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1921 {
1922 if (new->builtin)
1923 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1924
1925 /* Only warn about a redefinition if it's not defined as the
1926 same register. */
1927 else if (new->number != number || new->type != type)
1928 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1929
1930 return 0;
1931 }
1932
1933 name = xstrdup (str);
1934 new = xmalloc (sizeof (struct reg_entry));
1935
1936 new->name = name;
1937 new->number = number;
1938 new->type = type;
1939 new->builtin = FALSE;
1940 new->neon = NULL;
1941
1942 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1943 abort ();
1944
1945 return new;
1946 }
1947
1948 static void
1949 insert_neon_reg_alias (char *str, int number, int type,
1950 struct neon_typed_alias *atype)
1951 {
1952 struct reg_entry *reg = insert_reg_alias (str, number, type);
1953
1954 if (!reg)
1955 {
1956 first_error (_("attempt to redefine typed alias"));
1957 return;
1958 }
1959
1960 if (atype)
1961 {
1962 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1963 *reg->neon = *atype;
1964 }
1965 }
1966
1967 /* Look for the .req directive. This is of the form:
1968
1969 new_register_name .req existing_register_name
1970
1971 If we find one, or if it looks sufficiently like one that we want to
1972 handle any error here, return non-zero. Otherwise return zero. */
1973
1974 static int
1975 create_register_alias (char * newname, char *p)
1976 {
1977 struct reg_entry *old;
1978 char *oldname, *nbuf;
1979 size_t nlen;
1980
1981 /* The input scrubber ensures that whitespace after the mnemonic is
1982 collapsed to single spaces. */
1983 oldname = p;
1984 if (strncmp (oldname, " .req ", 6) != 0)
1985 return 0;
1986
1987 oldname += 6;
1988 if (*oldname == '\0')
1989 return 0;
1990
1991 old = hash_find (arm_reg_hsh, oldname);
1992 if (!old)
1993 {
1994 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1995 return 1;
1996 }
1997
1998 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1999 the desired alias name, and p points to its end. If not, then
2000 the desired alias name is in the global original_case_string. */
2001 #ifdef TC_CASE_SENSITIVE
2002 nlen = p - newname;
2003 #else
2004 newname = original_case_string;
2005 nlen = strlen (newname);
2006 #endif
2007
2008 nbuf = alloca (nlen + 1);
2009 memcpy (nbuf, newname, nlen);
2010 nbuf[nlen] = '\0';
2011
2012 /* Create aliases under the new name as stated; an all-lowercase
2013 version of the new name; and an all-uppercase version of the new
2014 name. */
2015 insert_reg_alias (nbuf, old->number, old->type);
2016
2017 for (p = nbuf; *p; p++)
2018 *p = TOUPPER (*p);
2019
2020 if (strncmp (nbuf, newname, nlen))
2021 insert_reg_alias (nbuf, old->number, old->type);
2022
2023 for (p = nbuf; *p; p++)
2024 *p = TOLOWER (*p);
2025
2026 if (strncmp (nbuf, newname, nlen))
2027 insert_reg_alias (nbuf, old->number, old->type);
2028
2029 return 1;
2030 }
2031
2032 /* Create a Neon typed/indexed register alias using directives, e.g.:
2033 X .dn d5.s32[1]
2034 Y .qn 6.s16
2035 Z .dn d7
2036 T .dn Z[0]
2037 These typed registers can be used instead of the types specified after the
2038 Neon mnemonic, so long as all operands given have types. Types can also be
2039 specified directly, e.g.:
2040 vadd d0.s32, d1.s32, d2.s32
2041 */
2042
2043 static int
2044 create_neon_reg_alias (char *newname, char *p)
2045 {
2046 enum arm_reg_type basetype;
2047 struct reg_entry *basereg;
2048 struct reg_entry mybasereg;
2049 struct neon_type ntype;
2050 struct neon_typed_alias typeinfo;
2051 char *namebuf, *nameend;
2052 int namelen;
2053
2054 typeinfo.defined = 0;
2055 typeinfo.eltype.type = NT_invtype;
2056 typeinfo.eltype.size = -1;
2057 typeinfo.index = -1;
2058
2059 nameend = p;
2060
2061 if (strncmp (p, " .dn ", 5) == 0)
2062 basetype = REG_TYPE_VFD;
2063 else if (strncmp (p, " .qn ", 5) == 0)
2064 basetype = REG_TYPE_NQ;
2065 else
2066 return 0;
2067
2068 p += 5;
2069
2070 if (*p == '\0')
2071 return 0;
2072
2073 basereg = arm_reg_parse_multi (&p);
2074
2075 if (basereg && basereg->type != basetype)
2076 {
2077 as_bad (_("bad type for register"));
2078 return 0;
2079 }
2080
2081 if (basereg == NULL)
2082 {
2083 expressionS exp;
2084 /* Try parsing as an integer. */
2085 my_get_expression (&exp, &p, GE_NO_PREFIX);
2086 if (exp.X_op != O_constant)
2087 {
2088 as_bad (_("expression must be constant"));
2089 return 0;
2090 }
2091 basereg = &mybasereg;
2092 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2093 : exp.X_add_number;
2094 basereg->neon = 0;
2095 }
2096
2097 if (basereg->neon)
2098 typeinfo = *basereg->neon;
2099
2100 if (parse_neon_type (&ntype, &p) == SUCCESS)
2101 {
2102 /* We got a type. */
2103 if (typeinfo.defined & NTA_HASTYPE)
2104 {
2105 as_bad (_("can't redefine the type of a register alias"));
2106 return 0;
2107 }
2108
2109 typeinfo.defined |= NTA_HASTYPE;
2110 if (ntype.elems != 1)
2111 {
2112 as_bad (_("you must specify a single type only"));
2113 return 0;
2114 }
2115 typeinfo.eltype = ntype.el[0];
2116 }
2117
2118 if (skip_past_char (&p, '[') == SUCCESS)
2119 {
2120 expressionS exp;
2121 /* We got a scalar index. */
2122
2123 if (typeinfo.defined & NTA_HASINDEX)
2124 {
2125 as_bad (_("can't redefine the index of a scalar alias"));
2126 return 0;
2127 }
2128
2129 my_get_expression (&exp, &p, GE_NO_PREFIX);
2130
2131 if (exp.X_op != O_constant)
2132 {
2133 as_bad (_("scalar index must be constant"));
2134 return 0;
2135 }
2136
2137 typeinfo.defined |= NTA_HASINDEX;
2138 typeinfo.index = exp.X_add_number;
2139
2140 if (skip_past_char (&p, ']') == FAIL)
2141 {
2142 as_bad (_("expecting ]"));
2143 return 0;
2144 }
2145 }
2146
2147 namelen = nameend - newname;
2148 namebuf = alloca (namelen + 1);
2149 strncpy (namebuf, newname, namelen);
2150 namebuf[namelen] = '\0';
2151
2152 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2153 typeinfo.defined != 0 ? &typeinfo : NULL);
2154
2155 /* Insert name in all uppercase. */
2156 for (p = namebuf; *p; p++)
2157 *p = TOUPPER (*p);
2158
2159 if (strncmp (namebuf, newname, namelen))
2160 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2161 typeinfo.defined != 0 ? &typeinfo : NULL);
2162
2163 /* Insert name in all lowercase. */
2164 for (p = namebuf; *p; p++)
2165 *p = TOLOWER (*p);
2166
2167 if (strncmp (namebuf, newname, namelen))
2168 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2169 typeinfo.defined != 0 ? &typeinfo : NULL);
2170
2171 return 1;
2172 }
2173
2174 /* Should never be called, as .req goes between the alias and the
2175 register name, not at the beginning of the line. */
2176 static void
2177 s_req (int a ATTRIBUTE_UNUSED)
2178 {
2179 as_bad (_("invalid syntax for .req directive"));
2180 }
2181
2182 static void
2183 s_dn (int a ATTRIBUTE_UNUSED)
2184 {
2185 as_bad (_("invalid syntax for .dn directive"));
2186 }
2187
2188 static void
2189 s_qn (int a ATTRIBUTE_UNUSED)
2190 {
2191 as_bad (_("invalid syntax for .qn directive"));
2192 }
2193
2194 /* The .unreq directive deletes an alias which was previously defined
2195 by .req. For example:
2196
2197 my_alias .req r11
2198 .unreq my_alias */
2199
2200 static void
2201 s_unreq (int a ATTRIBUTE_UNUSED)
2202 {
2203 char * name;
2204 char saved_char;
2205
2206 name = input_line_pointer;
2207
2208 while (*input_line_pointer != 0
2209 && *input_line_pointer != ' '
2210 && *input_line_pointer != '\n')
2211 ++input_line_pointer;
2212
2213 saved_char = *input_line_pointer;
2214 *input_line_pointer = 0;
2215
2216 if (!*name)
2217 as_bad (_("invalid syntax for .unreq directive"));
2218 else
2219 {
2220 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2221
2222 if (!reg)
2223 as_bad (_("unknown register alias '%s'"), name);
2224 else if (reg->builtin)
2225 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2226 name);
2227 else
2228 {
2229 hash_delete (arm_reg_hsh, name);
2230 free ((char *) reg->name);
2231 if (reg->neon)
2232 free (reg->neon);
2233 free (reg);
2234 }
2235 }
2236
2237 *input_line_pointer = saved_char;
2238 demand_empty_rest_of_line ();
2239 }
2240
2241 /* Directives: Instruction set selection. */
2242
2243 #ifdef OBJ_ELF
2244 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2245 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2246 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2247 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2248
2249 static enum mstate mapstate = MAP_UNDEFINED;
2250
2251 static void
2252 mapping_state (enum mstate state)
2253 {
2254 symbolS * symbolP;
2255 const char * symname;
2256 int type;
2257
2258 if (mapstate == state)
2259 /* The mapping symbol has already been emitted.
2260 There is nothing else to do. */
2261 return;
2262
2263 mapstate = state;
2264
2265 switch (state)
2266 {
2267 case MAP_DATA:
2268 symname = "$d";
2269 type = BSF_NO_FLAGS;
2270 break;
2271 case MAP_ARM:
2272 symname = "$a";
2273 type = BSF_NO_FLAGS;
2274 break;
2275 case MAP_THUMB:
2276 symname = "$t";
2277 type = BSF_NO_FLAGS;
2278 break;
2279 case MAP_UNDEFINED:
2280 return;
2281 default:
2282 abort ();
2283 }
2284
2285 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2286
2287 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2288 symbol_table_insert (symbolP);
2289 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2290
2291 switch (state)
2292 {
2293 case MAP_ARM:
2294 THUMB_SET_FUNC (symbolP, 0);
2295 ARM_SET_THUMB (symbolP, 0);
2296 ARM_SET_INTERWORK (symbolP, support_interwork);
2297 break;
2298
2299 case MAP_THUMB:
2300 THUMB_SET_FUNC (symbolP, 1);
2301 ARM_SET_THUMB (symbolP, 1);
2302 ARM_SET_INTERWORK (symbolP, support_interwork);
2303 break;
2304
2305 case MAP_DATA:
2306 default:
2307 return;
2308 }
2309 }
2310 #else
2311 #define mapping_state(x) /* nothing */
2312 #endif
2313
2314 /* Find the real, Thumb encoded start of a Thumb function. */
2315
2316 static symbolS *
2317 find_real_start (symbolS * symbolP)
2318 {
2319 char * real_start;
2320 const char * name = S_GET_NAME (symbolP);
2321 symbolS * new_target;
2322
2323 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2324 #define STUB_NAME ".real_start_of"
2325
2326 if (name == NULL)
2327 abort ();
2328
2329 /* The compiler may generate BL instructions to local labels because
2330 it needs to perform a branch to a far away location. These labels
2331 do not have a corresponding ".real_start_of" label. We check
2332 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2333 the ".real_start_of" convention for nonlocal branches. */
2334 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2335 return symbolP;
2336
2337 real_start = ACONCAT ((STUB_NAME, name, NULL));
2338 new_target = symbol_find (real_start);
2339
2340 if (new_target == NULL)
2341 {
2342 as_warn ("Failed to find real start of function: %s\n", name);
2343 new_target = symbolP;
2344 }
2345
2346 return new_target;
2347 }
2348
2349 static void
2350 opcode_select (int width)
2351 {
2352 switch (width)
2353 {
2354 case 16:
2355 if (! thumb_mode)
2356 {
2357 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2358 as_bad (_("selected processor does not support THUMB opcodes"));
2359
2360 thumb_mode = 1;
2361 /* No need to force the alignment, since we will have been
2362 coming from ARM mode, which is word-aligned. */
2363 record_alignment (now_seg, 1);
2364 }
2365 mapping_state (MAP_THUMB);
2366 break;
2367
2368 case 32:
2369 if (thumb_mode)
2370 {
2371 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2372 as_bad (_("selected processor does not support ARM opcodes"));
2373
2374 thumb_mode = 0;
2375
2376 if (!need_pass_2)
2377 frag_align (2, 0, 0);
2378
2379 record_alignment (now_seg, 1);
2380 }
2381 mapping_state (MAP_ARM);
2382 break;
2383
2384 default:
2385 as_bad (_("invalid instruction size selected (%d)"), width);
2386 }
2387 }
2388
2389 static void
2390 s_arm (int ignore ATTRIBUTE_UNUSED)
2391 {
2392 opcode_select (32);
2393 demand_empty_rest_of_line ();
2394 }
2395
2396 static void
2397 s_thumb (int ignore ATTRIBUTE_UNUSED)
2398 {
2399 opcode_select (16);
2400 demand_empty_rest_of_line ();
2401 }
2402
2403 static void
2404 s_code (int unused ATTRIBUTE_UNUSED)
2405 {
2406 int temp;
2407
2408 temp = get_absolute_expression ();
2409 switch (temp)
2410 {
2411 case 16:
2412 case 32:
2413 opcode_select (temp);
2414 break;
2415
2416 default:
2417 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2418 }
2419 }
2420
2421 static void
2422 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2423 {
2424 /* If we are not already in thumb mode go into it, EVEN if
2425 the target processor does not support thumb instructions.
2426 This is used by gcc/config/arm/lib1funcs.asm for example
2427 to compile interworking support functions even if the
2428 target processor should not support interworking. */
2429 if (! thumb_mode)
2430 {
2431 thumb_mode = 2;
2432 record_alignment (now_seg, 1);
2433 }
2434
2435 demand_empty_rest_of_line ();
2436 }
2437
2438 static void
2439 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2440 {
2441 s_thumb (0);
2442
2443 /* The following label is the name/address of the start of a Thumb function.
2444 We need to know this for the interworking support. */
2445 label_is_thumb_function_name = TRUE;
2446 }
2447
2448 /* Perform a .set directive, but also mark the alias as
2449 being a thumb function. */
2450
2451 static void
2452 s_thumb_set (int equiv)
2453 {
2454 /* XXX the following is a duplicate of the code for s_set() in read.c
2455 We cannot just call that code as we need to get at the symbol that
2456 is created. */
2457 char * name;
2458 char delim;
2459 char * end_name;
2460 symbolS * symbolP;
2461
2462 /* Especial apologies for the random logic:
2463 This just grew, and could be parsed much more simply!
2464 Dean - in haste. */
2465 name = input_line_pointer;
2466 delim = get_symbol_end ();
2467 end_name = input_line_pointer;
2468 *end_name = delim;
2469
2470 if (*input_line_pointer != ',')
2471 {
2472 *end_name = 0;
2473 as_bad (_("expected comma after name \"%s\""), name);
2474 *end_name = delim;
2475 ignore_rest_of_line ();
2476 return;
2477 }
2478
2479 input_line_pointer++;
2480 *end_name = 0;
2481
2482 if (name[0] == '.' && name[1] == '\0')
2483 {
2484 /* XXX - this should not happen to .thumb_set. */
2485 abort ();
2486 }
2487
2488 if ((symbolP = symbol_find (name)) == NULL
2489 && (symbolP = md_undefined_symbol (name)) == NULL)
2490 {
2491 #ifndef NO_LISTING
2492 /* When doing symbol listings, play games with dummy fragments living
2493 outside the normal fragment chain to record the file and line info
2494 for this symbol. */
2495 if (listing & LISTING_SYMBOLS)
2496 {
2497 extern struct list_info_struct * listing_tail;
2498 fragS * dummy_frag = xmalloc (sizeof (fragS));
2499
2500 memset (dummy_frag, 0, sizeof (fragS));
2501 dummy_frag->fr_type = rs_fill;
2502 dummy_frag->line = listing_tail;
2503 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2504 dummy_frag->fr_symbol = symbolP;
2505 }
2506 else
2507 #endif
2508 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2509
2510 #ifdef OBJ_COFF
2511 /* "set" symbols are local unless otherwise specified. */
2512 SF_SET_LOCAL (symbolP);
2513 #endif /* OBJ_COFF */
2514 } /* Make a new symbol. */
2515
2516 symbol_table_insert (symbolP);
2517
2518 * end_name = delim;
2519
2520 if (equiv
2521 && S_IS_DEFINED (symbolP)
2522 && S_GET_SEGMENT (symbolP) != reg_section)
2523 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2524
2525 pseudo_set (symbolP);
2526
2527 demand_empty_rest_of_line ();
2528
2529 /* XXX Now we come to the Thumb specific bit of code. */
2530
2531 THUMB_SET_FUNC (symbolP, 1);
2532 ARM_SET_THUMB (symbolP, 1);
2533 #if defined OBJ_ELF || defined OBJ_COFF
2534 ARM_SET_INTERWORK (symbolP, support_interwork);
2535 #endif
2536 }
2537
2538 /* Directives: Mode selection. */
2539
2540 /* .syntax [unified|divided] - choose the new unified syntax
2541 (same for Arm and Thumb encoding, modulo slight differences in what
2542 can be represented) or the old divergent syntax for each mode. */
2543 static void
2544 s_syntax (int unused ATTRIBUTE_UNUSED)
2545 {
2546 char *name, delim;
2547
2548 name = input_line_pointer;
2549 delim = get_symbol_end ();
2550
2551 if (!strcasecmp (name, "unified"))
2552 unified_syntax = TRUE;
2553 else if (!strcasecmp (name, "divided"))
2554 unified_syntax = FALSE;
2555 else
2556 {
2557 as_bad (_("unrecognized syntax mode \"%s\""), name);
2558 return;
2559 }
2560 *input_line_pointer = delim;
2561 demand_empty_rest_of_line ();
2562 }
2563
2564 /* Directives: sectioning and alignment. */
2565
2566 /* Same as s_align_ptwo but align 0 => align 2. */
2567
2568 static void
2569 s_align (int unused ATTRIBUTE_UNUSED)
2570 {
2571 int temp;
2572 long temp_fill;
2573 long max_alignment = 15;
2574
2575 temp = get_absolute_expression ();
2576 if (temp > max_alignment)
2577 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2578 else if (temp < 0)
2579 {
2580 as_bad (_("alignment negative. 0 assumed."));
2581 temp = 0;
2582 }
2583
2584 if (*input_line_pointer == ',')
2585 {
2586 input_line_pointer++;
2587 temp_fill = get_absolute_expression ();
2588 }
2589 else
2590 temp_fill = 0;
2591
2592 if (!temp)
2593 temp = 2;
2594
2595 /* Only make a frag if we HAVE to. */
2596 if (temp && !need_pass_2)
2597 frag_align (temp, (int) temp_fill, 0);
2598 demand_empty_rest_of_line ();
2599
2600 record_alignment (now_seg, temp);
2601 }
2602
2603 static void
2604 s_bss (int ignore ATTRIBUTE_UNUSED)
2605 {
2606 /* We don't support putting frags in the BSS segment, we fake it by
2607 marking in_bss, then looking at s_skip for clues. */
2608 subseg_set (bss_section, 0);
2609 demand_empty_rest_of_line ();
2610 mapping_state (MAP_DATA);
2611 }
2612
2613 static void
2614 s_even (int ignore ATTRIBUTE_UNUSED)
2615 {
2616 /* Never make frag if expect extra pass. */
2617 if (!need_pass_2)
2618 frag_align (1, 0, 0);
2619
2620 record_alignment (now_seg, 1);
2621
2622 demand_empty_rest_of_line ();
2623 }
2624
2625 /* Directives: Literal pools. */
2626
2627 static literal_pool *
2628 find_literal_pool (void)
2629 {
2630 literal_pool * pool;
2631
2632 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2633 {
2634 if (pool->section == now_seg
2635 && pool->sub_section == now_subseg)
2636 break;
2637 }
2638
2639 return pool;
2640 }
2641
2642 static literal_pool *
2643 find_or_make_literal_pool (void)
2644 {
2645 /* Next literal pool ID number. */
2646 static unsigned int latest_pool_num = 1;
2647 literal_pool * pool;
2648
2649 pool = find_literal_pool ();
2650
2651 if (pool == NULL)
2652 {
2653 /* Create a new pool. */
2654 pool = xmalloc (sizeof (* pool));
2655 if (! pool)
2656 return NULL;
2657
2658 pool->next_free_entry = 0;
2659 pool->section = now_seg;
2660 pool->sub_section = now_subseg;
2661 pool->next = list_of_pools;
2662 pool->symbol = NULL;
2663
2664 /* Add it to the list. */
2665 list_of_pools = pool;
2666 }
2667
2668 /* New pools, and emptied pools, will have a NULL symbol. */
2669 if (pool->symbol == NULL)
2670 {
2671 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2672 (valueT) 0, &zero_address_frag);
2673 pool->id = latest_pool_num ++;
2674 }
2675
2676 /* Done. */
2677 return pool;
2678 }
2679
2680 /* Add the literal in the global 'inst'
2681 structure to the relevent literal pool. */
2682
2683 static int
2684 add_to_lit_pool (void)
2685 {
2686 literal_pool * pool;
2687 unsigned int entry;
2688
2689 pool = find_or_make_literal_pool ();
2690
2691 /* Check if this literal value is already in the pool. */
2692 for (entry = 0; entry < pool->next_free_entry; entry ++)
2693 {
2694 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2695 && (inst.reloc.exp.X_op == O_constant)
2696 && (pool->literals[entry].X_add_number
2697 == inst.reloc.exp.X_add_number)
2698 && (pool->literals[entry].X_unsigned
2699 == inst.reloc.exp.X_unsigned))
2700 break;
2701
2702 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2703 && (inst.reloc.exp.X_op == O_symbol)
2704 && (pool->literals[entry].X_add_number
2705 == inst.reloc.exp.X_add_number)
2706 && (pool->literals[entry].X_add_symbol
2707 == inst.reloc.exp.X_add_symbol)
2708 && (pool->literals[entry].X_op_symbol
2709 == inst.reloc.exp.X_op_symbol))
2710 break;
2711 }
2712
2713 /* Do we need to create a new entry? */
2714 if (entry == pool->next_free_entry)
2715 {
2716 if (entry >= MAX_LITERAL_POOL_SIZE)
2717 {
2718 inst.error = _("literal pool overflow");
2719 return FAIL;
2720 }
2721
2722 pool->literals[entry] = inst.reloc.exp;
2723 pool->next_free_entry += 1;
2724 }
2725
2726 inst.reloc.exp.X_op = O_symbol;
2727 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2728 inst.reloc.exp.X_add_symbol = pool->symbol;
2729
2730 return SUCCESS;
2731 }
2732
2733 /* Can't use symbol_new here, so have to create a symbol and then at
2734 a later date assign it a value. Thats what these functions do. */
2735
2736 static void
2737 symbol_locate (symbolS * symbolP,
2738 const char * name, /* It is copied, the caller can modify. */
2739 segT segment, /* Segment identifier (SEG_<something>). */
2740 valueT valu, /* Symbol value. */
2741 fragS * frag) /* Associated fragment. */
2742 {
2743 unsigned int name_length;
2744 char * preserved_copy_of_name;
2745
2746 name_length = strlen (name) + 1; /* +1 for \0. */
2747 obstack_grow (&notes, name, name_length);
2748 preserved_copy_of_name = obstack_finish (&notes);
2749
2750 #ifdef tc_canonicalize_symbol_name
2751 preserved_copy_of_name =
2752 tc_canonicalize_symbol_name (preserved_copy_of_name);
2753 #endif
2754
2755 S_SET_NAME (symbolP, preserved_copy_of_name);
2756
2757 S_SET_SEGMENT (symbolP, segment);
2758 S_SET_VALUE (symbolP, valu);
2759 symbol_clear_list_pointers (symbolP);
2760
2761 symbol_set_frag (symbolP, frag);
2762
2763 /* Link to end of symbol chain. */
2764 {
2765 extern int symbol_table_frozen;
2766
2767 if (symbol_table_frozen)
2768 abort ();
2769 }
2770
2771 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2772
2773 obj_symbol_new_hook (symbolP);
2774
2775 #ifdef tc_symbol_new_hook
2776 tc_symbol_new_hook (symbolP);
2777 #endif
2778
2779 #ifdef DEBUG_SYMS
2780 verify_symbol_chain (symbol_rootP, symbol_lastP);
2781 #endif /* DEBUG_SYMS */
2782 }
2783
2784
2785 static void
2786 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2787 {
2788 unsigned int entry;
2789 literal_pool * pool;
2790 char sym_name[20];
2791
2792 pool = find_literal_pool ();
2793 if (pool == NULL
2794 || pool->symbol == NULL
2795 || pool->next_free_entry == 0)
2796 return;
2797
2798 mapping_state (MAP_DATA);
2799
2800 /* Align pool as you have word accesses.
2801 Only make a frag if we have to. */
2802 if (!need_pass_2)
2803 frag_align (2, 0, 0);
2804
2805 record_alignment (now_seg, 2);
2806
2807 sprintf (sym_name, "$$lit_\002%x", pool->id);
2808
2809 symbol_locate (pool->symbol, sym_name, now_seg,
2810 (valueT) frag_now_fix (), frag_now);
2811 symbol_table_insert (pool->symbol);
2812
2813 ARM_SET_THUMB (pool->symbol, thumb_mode);
2814
2815 #if defined OBJ_COFF || defined OBJ_ELF
2816 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2817 #endif
2818
2819 for (entry = 0; entry < pool->next_free_entry; entry ++)
2820 /* First output the expression in the instruction to the pool. */
2821 emit_expr (&(pool->literals[entry]), 4); /* .word */
2822
2823 /* Mark the pool as empty. */
2824 pool->next_free_entry = 0;
2825 pool->symbol = NULL;
2826 }
2827
2828 #ifdef OBJ_ELF
2829 /* Forward declarations for functions below, in the MD interface
2830 section. */
2831 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2832 static valueT create_unwind_entry (int);
2833 static void start_unwind_section (const segT, int);
2834 static void add_unwind_opcode (valueT, int);
2835 static void flush_pending_unwind (void);
2836
2837 /* Directives: Data. */
2838
2839 static void
2840 s_arm_elf_cons (int nbytes)
2841 {
2842 expressionS exp;
2843
2844 #ifdef md_flush_pending_output
2845 md_flush_pending_output ();
2846 #endif
2847
2848 if (is_it_end_of_statement ())
2849 {
2850 demand_empty_rest_of_line ();
2851 return;
2852 }
2853
2854 #ifdef md_cons_align
2855 md_cons_align (nbytes);
2856 #endif
2857
2858 mapping_state (MAP_DATA);
2859 do
2860 {
2861 int reloc;
2862 char *base = input_line_pointer;
2863
2864 expression (& exp);
2865
2866 if (exp.X_op != O_symbol)
2867 emit_expr (&exp, (unsigned int) nbytes);
2868 else
2869 {
2870 char *before_reloc = input_line_pointer;
2871 reloc = parse_reloc (&input_line_pointer);
2872 if (reloc == -1)
2873 {
2874 as_bad (_("unrecognized relocation suffix"));
2875 ignore_rest_of_line ();
2876 return;
2877 }
2878 else if (reloc == BFD_RELOC_UNUSED)
2879 emit_expr (&exp, (unsigned int) nbytes);
2880 else
2881 {
2882 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2883 int size = bfd_get_reloc_size (howto);
2884
2885 if (reloc == BFD_RELOC_ARM_PLT32)
2886 {
2887 as_bad (_("(plt) is only valid on branch targets"));
2888 reloc = BFD_RELOC_UNUSED;
2889 size = 0;
2890 }
2891
2892 if (size > nbytes)
2893 as_bad (_("%s relocations do not fit in %d bytes"),
2894 howto->name, nbytes);
2895 else
2896 {
2897 /* We've parsed an expression stopping at O_symbol.
2898 But there may be more expression left now that we
2899 have parsed the relocation marker. Parse it again.
2900 XXX Surely there is a cleaner way to do this. */
2901 char *p = input_line_pointer;
2902 int offset;
2903 char *save_buf = alloca (input_line_pointer - base);
2904 memcpy (save_buf, base, input_line_pointer - base);
2905 memmove (base + (input_line_pointer - before_reloc),
2906 base, before_reloc - base);
2907
2908 input_line_pointer = base + (input_line_pointer-before_reloc);
2909 expression (&exp);
2910 memcpy (base, save_buf, p - base);
2911
2912 offset = nbytes - size;
2913 p = frag_more ((int) nbytes);
2914 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2915 size, &exp, 0, reloc);
2916 }
2917 }
2918 }
2919 }
2920 while (*input_line_pointer++ == ',');
2921
2922 /* Put terminator back into stream. */
2923 input_line_pointer --;
2924 demand_empty_rest_of_line ();
2925 }
2926
2927
2928 /* Parse a .rel31 directive. */
2929
2930 static void
2931 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2932 {
2933 expressionS exp;
2934 char *p;
2935 valueT highbit;
2936
2937 highbit = 0;
2938 if (*input_line_pointer == '1')
2939 highbit = 0x80000000;
2940 else if (*input_line_pointer != '0')
2941 as_bad (_("expected 0 or 1"));
2942
2943 input_line_pointer++;
2944 if (*input_line_pointer != ',')
2945 as_bad (_("missing comma"));
2946 input_line_pointer++;
2947
2948 #ifdef md_flush_pending_output
2949 md_flush_pending_output ();
2950 #endif
2951
2952 #ifdef md_cons_align
2953 md_cons_align (4);
2954 #endif
2955
2956 mapping_state (MAP_DATA);
2957
2958 expression (&exp);
2959
2960 p = frag_more (4);
2961 md_number_to_chars (p, highbit, 4);
2962 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2963 BFD_RELOC_ARM_PREL31);
2964
2965 demand_empty_rest_of_line ();
2966 }
2967
2968 /* Directives: AEABI stack-unwind tables. */
2969
2970 /* Parse an unwind_fnstart directive. Simply records the current location. */
2971
2972 static void
2973 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2974 {
2975 demand_empty_rest_of_line ();
2976 /* Mark the start of the function. */
2977 unwind.proc_start = expr_build_dot ();
2978
2979 /* Reset the rest of the unwind info. */
2980 unwind.opcode_count = 0;
2981 unwind.table_entry = NULL;
2982 unwind.personality_routine = NULL;
2983 unwind.personality_index = -1;
2984 unwind.frame_size = 0;
2985 unwind.fp_offset = 0;
2986 unwind.fp_reg = 13;
2987 unwind.fp_used = 0;
2988 unwind.sp_restored = 0;
2989 }
2990
2991
2992 /* Parse a handlerdata directive. Creates the exception handling table entry
2993 for the function. */
2994
2995 static void
2996 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2997 {
2998 demand_empty_rest_of_line ();
2999 if (unwind.table_entry)
3000 as_bad (_("dupicate .handlerdata directive"));
3001
3002 create_unwind_entry (1);
3003 }
3004
3005 /* Parse an unwind_fnend directive. Generates the index table entry. */
3006
3007 static void
3008 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3009 {
3010 long where;
3011 char *ptr;
3012 valueT val;
3013
3014 demand_empty_rest_of_line ();
3015
3016 /* Add eh table entry. */
3017 if (unwind.table_entry == NULL)
3018 val = create_unwind_entry (0);
3019 else
3020 val = 0;
3021
3022 /* Add index table entry. This is two words. */
3023 start_unwind_section (unwind.saved_seg, 1);
3024 frag_align (2, 0, 0);
3025 record_alignment (now_seg, 2);
3026
3027 ptr = frag_more (8);
3028 where = frag_now_fix () - 8;
3029
3030 /* Self relative offset of the function start. */
3031 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3032 BFD_RELOC_ARM_PREL31);
3033
3034 /* Indicate dependency on EHABI-defined personality routines to the
3035 linker, if it hasn't been done already. */
3036 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3037 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3038 {
3039 static const char *const name[] = {
3040 "__aeabi_unwind_cpp_pr0",
3041 "__aeabi_unwind_cpp_pr1",
3042 "__aeabi_unwind_cpp_pr2"
3043 };
3044 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3045 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3046 marked_pr_dependency |= 1 << unwind.personality_index;
3047 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3048 = marked_pr_dependency;
3049 }
3050
3051 if (val)
3052 /* Inline exception table entry. */
3053 md_number_to_chars (ptr + 4, val, 4);
3054 else
3055 /* Self relative offset of the table entry. */
3056 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3057 BFD_RELOC_ARM_PREL31);
3058
3059 /* Restore the original section. */
3060 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3061 }
3062
3063
3064 /* Parse an unwind_cantunwind directive. */
3065
3066 static void
3067 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3068 {
3069 demand_empty_rest_of_line ();
3070 if (unwind.personality_routine || unwind.personality_index != -1)
3071 as_bad (_("personality routine specified for cantunwind frame"));
3072
3073 unwind.personality_index = -2;
3074 }
3075
3076
3077 /* Parse a personalityindex directive. */
3078
3079 static void
3080 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3081 {
3082 expressionS exp;
3083
3084 if (unwind.personality_routine || unwind.personality_index != -1)
3085 as_bad (_("duplicate .personalityindex directive"));
3086
3087 expression (&exp);
3088
3089 if (exp.X_op != O_constant
3090 || exp.X_add_number < 0 || exp.X_add_number > 15)
3091 {
3092 as_bad (_("bad personality routine number"));
3093 ignore_rest_of_line ();
3094 return;
3095 }
3096
3097 unwind.personality_index = exp.X_add_number;
3098
3099 demand_empty_rest_of_line ();
3100 }
3101
3102
3103 /* Parse a personality directive. */
3104
3105 static void
3106 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3107 {
3108 char *name, *p, c;
3109
3110 if (unwind.personality_routine || unwind.personality_index != -1)
3111 as_bad (_("duplicate .personality directive"));
3112
3113 name = input_line_pointer;
3114 c = get_symbol_end ();
3115 p = input_line_pointer;
3116 unwind.personality_routine = symbol_find_or_make (name);
3117 *p = c;
3118 demand_empty_rest_of_line ();
3119 }
3120
3121
3122 /* Parse a directive saving core registers. */
3123
3124 static void
3125 s_arm_unwind_save_core (void)
3126 {
3127 valueT op;
3128 long range;
3129 int n;
3130
3131 range = parse_reg_list (&input_line_pointer);
3132 if (range == FAIL)
3133 {
3134 as_bad (_("expected register list"));
3135 ignore_rest_of_line ();
3136 return;
3137 }
3138
3139 demand_empty_rest_of_line ();
3140
3141 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3142 into .unwind_save {..., sp...}. We aren't bothered about the value of
3143 ip because it is clobbered by calls. */
3144 if (unwind.sp_restored && unwind.fp_reg == 12
3145 && (range & 0x3000) == 0x1000)
3146 {
3147 unwind.opcode_count--;
3148 unwind.sp_restored = 0;
3149 range = (range | 0x2000) & ~0x1000;
3150 unwind.pending_offset = 0;
3151 }
3152
3153 /* Pop r4-r15. */
3154 if (range & 0xfff0)
3155 {
3156 /* See if we can use the short opcodes. These pop a block of up to 8
3157 registers starting with r4, plus maybe r14. */
3158 for (n = 0; n < 8; n++)
3159 {
3160 /* Break at the first non-saved register. */
3161 if ((range & (1 << (n + 4))) == 0)
3162 break;
3163 }
3164 /* See if there are any other bits set. */
3165 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3166 {
3167 /* Use the long form. */
3168 op = 0x8000 | ((range >> 4) & 0xfff);
3169 add_unwind_opcode (op, 2);
3170 }
3171 else
3172 {
3173 /* Use the short form. */
3174 if (range & 0x4000)
3175 op = 0xa8; /* Pop r14. */
3176 else
3177 op = 0xa0; /* Do not pop r14. */
3178 op |= (n - 1);
3179 add_unwind_opcode (op, 1);
3180 }
3181 }
3182
3183 /* Pop r0-r3. */
3184 if (range & 0xf)
3185 {
3186 op = 0xb100 | (range & 0xf);
3187 add_unwind_opcode (op, 2);
3188 }
3189
3190 /* Record the number of bytes pushed. */
3191 for (n = 0; n < 16; n++)
3192 {
3193 if (range & (1 << n))
3194 unwind.frame_size += 4;
3195 }
3196 }
3197
3198
3199 /* Parse a directive saving FPA registers. */
3200
3201 static void
3202 s_arm_unwind_save_fpa (int reg)
3203 {
3204 expressionS exp;
3205 int num_regs;
3206 valueT op;
3207
3208 /* Get Number of registers to transfer. */
3209 if (skip_past_comma (&input_line_pointer) != FAIL)
3210 expression (&exp);
3211 else
3212 exp.X_op = O_illegal;
3213
3214 if (exp.X_op != O_constant)
3215 {
3216 as_bad (_("expected , <constant>"));
3217 ignore_rest_of_line ();
3218 return;
3219 }
3220
3221 num_regs = exp.X_add_number;
3222
3223 if (num_regs < 1 || num_regs > 4)
3224 {
3225 as_bad (_("number of registers must be in the range [1:4]"));
3226 ignore_rest_of_line ();
3227 return;
3228 }
3229
3230 demand_empty_rest_of_line ();
3231
3232 if (reg == 4)
3233 {
3234 /* Short form. */
3235 op = 0xb4 | (num_regs - 1);
3236 add_unwind_opcode (op, 1);
3237 }
3238 else
3239 {
3240 /* Long form. */
3241 op = 0xc800 | (reg << 4) | (num_regs - 1);
3242 add_unwind_opcode (op, 2);
3243 }
3244 unwind.frame_size += num_regs * 12;
3245 }
3246
3247
3248 /* Parse a directive saving VFP registers. */
3249
3250 static void
3251 s_arm_unwind_save_vfp (void)
3252 {
3253 int count;
3254 unsigned int reg;
3255 valueT op;
3256
3257 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3258 if (count == FAIL)
3259 {
3260 as_bad (_("expected register list"));
3261 ignore_rest_of_line ();
3262 return;
3263 }
3264
3265 demand_empty_rest_of_line ();
3266
3267 if (reg == 8)
3268 {
3269 /* Short form. */
3270 op = 0xb8 | (count - 1);
3271 add_unwind_opcode (op, 1);
3272 }
3273 else
3274 {
3275 /* Long form. */
3276 op = 0xb300 | (reg << 4) | (count - 1);
3277 add_unwind_opcode (op, 2);
3278 }
3279 unwind.frame_size += count * 8 + 4;
3280 }
3281
3282
3283 /* Parse a directive saving iWMMXt data registers. */
3284
3285 static void
3286 s_arm_unwind_save_mmxwr (void)
3287 {
3288 int reg;
3289 int hi_reg;
3290 int i;
3291 unsigned mask = 0;
3292 valueT op;
3293
3294 if (*input_line_pointer == '{')
3295 input_line_pointer++;
3296
3297 do
3298 {
3299 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3300
3301 if (reg == FAIL)
3302 {
3303 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3304 goto error;
3305 }
3306
3307 if (mask >> reg)
3308 as_tsktsk (_("register list not in ascending order"));
3309 mask |= 1 << reg;
3310
3311 if (*input_line_pointer == '-')
3312 {
3313 input_line_pointer++;
3314 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3315 if (hi_reg == FAIL)
3316 {
3317 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3318 goto error;
3319 }
3320 else if (reg >= hi_reg)
3321 {
3322 as_bad (_("bad register range"));
3323 goto error;
3324 }
3325 for (; reg < hi_reg; reg++)
3326 mask |= 1 << reg;
3327 }
3328 }
3329 while (skip_past_comma (&input_line_pointer) != FAIL);
3330
3331 if (*input_line_pointer == '}')
3332 input_line_pointer++;
3333
3334 demand_empty_rest_of_line ();
3335
3336 /* Generate any deferred opcodes because we're going to be looking at
3337 the list. */
3338 flush_pending_unwind ();
3339
3340 for (i = 0; i < 16; i++)
3341 {
3342 if (mask & (1 << i))
3343 unwind.frame_size += 8;
3344 }
3345
3346 /* Attempt to combine with a previous opcode. We do this because gcc
3347 likes to output separate unwind directives for a single block of
3348 registers. */
3349 if (unwind.opcode_count > 0)
3350 {
3351 i = unwind.opcodes[unwind.opcode_count - 1];
3352 if ((i & 0xf8) == 0xc0)
3353 {
3354 i &= 7;
3355 /* Only merge if the blocks are contiguous. */
3356 if (i < 6)
3357 {
3358 if ((mask & 0xfe00) == (1 << 9))
3359 {
3360 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3361 unwind.opcode_count--;
3362 }
3363 }
3364 else if (i == 6 && unwind.opcode_count >= 2)
3365 {
3366 i = unwind.opcodes[unwind.opcode_count - 2];
3367 reg = i >> 4;
3368 i &= 0xf;
3369
3370 op = 0xffff << (reg - 1);
3371 if (reg > 0
3372 || ((mask & op) == (1u << (reg - 1))))
3373 {
3374 op = (1 << (reg + i + 1)) - 1;
3375 op &= ~((1 << reg) - 1);
3376 mask |= op;
3377 unwind.opcode_count -= 2;
3378 }
3379 }
3380 }
3381 }
3382
3383 hi_reg = 15;
3384 /* We want to generate opcodes in the order the registers have been
3385 saved, ie. descending order. */
3386 for (reg = 15; reg >= -1; reg--)
3387 {
3388 /* Save registers in blocks. */
3389 if (reg < 0
3390 || !(mask & (1 << reg)))
3391 {
3392 /* We found an unsaved reg. Generate opcodes to save the
3393 preceeding block. */
3394 if (reg != hi_reg)
3395 {
3396 if (reg == 9)
3397 {
3398 /* Short form. */
3399 op = 0xc0 | (hi_reg - 10);
3400 add_unwind_opcode (op, 1);
3401 }
3402 else
3403 {
3404 /* Long form. */
3405 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3406 add_unwind_opcode (op, 2);
3407 }
3408 }
3409 hi_reg = reg - 1;
3410 }
3411 }
3412
3413 return;
3414 error:
3415 ignore_rest_of_line ();
3416 }
3417
3418 static void
3419 s_arm_unwind_save_mmxwcg (void)
3420 {
3421 int reg;
3422 int hi_reg;
3423 unsigned mask = 0;
3424 valueT op;
3425
3426 if (*input_line_pointer == '{')
3427 input_line_pointer++;
3428
3429 do
3430 {
3431 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3432
3433 if (reg == FAIL)
3434 {
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3436 goto error;
3437 }
3438
3439 reg -= 8;
3440 if (mask >> reg)
3441 as_tsktsk (_("register list not in ascending order"));
3442 mask |= 1 << reg;
3443
3444 if (*input_line_pointer == '-')
3445 {
3446 input_line_pointer++;
3447 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3448 if (hi_reg == FAIL)
3449 {
3450 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3451 goto error;
3452 }
3453 else if (reg >= hi_reg)
3454 {
3455 as_bad (_("bad register range"));
3456 goto error;
3457 }
3458 for (; reg < hi_reg; reg++)
3459 mask |= 1 << reg;
3460 }
3461 }
3462 while (skip_past_comma (&input_line_pointer) != FAIL);
3463
3464 if (*input_line_pointer == '}')
3465 input_line_pointer++;
3466
3467 demand_empty_rest_of_line ();
3468
3469 /* Generate any deferred opcodes because we're going to be looking at
3470 the list. */
3471 flush_pending_unwind ();
3472
3473 for (reg = 0; reg < 16; reg++)
3474 {
3475 if (mask & (1 << reg))
3476 unwind.frame_size += 4;
3477 }
3478 op = 0xc700 | mask;
3479 add_unwind_opcode (op, 2);
3480 return;
3481 error:
3482 ignore_rest_of_line ();
3483 }
3484
3485
3486 /* Parse an unwind_save directive. */
3487
3488 static void
3489 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3490 {
3491 char *peek;
3492 struct reg_entry *reg;
3493 bfd_boolean had_brace = FALSE;
3494
3495 /* Figure out what sort of save we have. */
3496 peek = input_line_pointer;
3497
3498 if (*peek == '{')
3499 {
3500 had_brace = TRUE;
3501 peek++;
3502 }
3503
3504 reg = arm_reg_parse_multi (&peek);
3505
3506 if (!reg)
3507 {
3508 as_bad (_("register expected"));
3509 ignore_rest_of_line ();
3510 return;
3511 }
3512
3513 switch (reg->type)
3514 {
3515 case REG_TYPE_FN:
3516 if (had_brace)
3517 {
3518 as_bad (_("FPA .unwind_save does not take a register list"));
3519 ignore_rest_of_line ();
3520 return;
3521 }
3522 s_arm_unwind_save_fpa (reg->number);
3523 return;
3524
3525 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3526 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3527 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3528 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3529
3530 default:
3531 as_bad (_(".unwind_save does not support this kind of register"));
3532 ignore_rest_of_line ();
3533 }
3534 }
3535
3536
3537 /* Parse an unwind_movsp directive. */
3538
3539 static void
3540 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3541 {
3542 int reg;
3543 valueT op;
3544
3545 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3546 if (reg == FAIL)
3547 {
3548 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3549 ignore_rest_of_line ();
3550 return;
3551 }
3552 demand_empty_rest_of_line ();
3553
3554 if (reg == REG_SP || reg == REG_PC)
3555 {
3556 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3557 return;
3558 }
3559
3560 if (unwind.fp_reg != REG_SP)
3561 as_bad (_("unexpected .unwind_movsp directive"));
3562
3563 /* Generate opcode to restore the value. */
3564 op = 0x90 | reg;
3565 add_unwind_opcode (op, 1);
3566
3567 /* Record the information for later. */
3568 unwind.fp_reg = reg;
3569 unwind.fp_offset = unwind.frame_size;
3570 unwind.sp_restored = 1;
3571 }
3572
3573 /* Parse an unwind_pad directive. */
3574
3575 static void
3576 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3577 {
3578 int offset;
3579
3580 if (immediate_for_directive (&offset) == FAIL)
3581 return;
3582
3583 if (offset & 3)
3584 {
3585 as_bad (_("stack increment must be multiple of 4"));
3586 ignore_rest_of_line ();
3587 return;
3588 }
3589
3590 /* Don't generate any opcodes, just record the details for later. */
3591 unwind.frame_size += offset;
3592 unwind.pending_offset += offset;
3593
3594 demand_empty_rest_of_line ();
3595 }
3596
3597 /* Parse an unwind_setfp directive. */
3598
3599 static void
3600 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3601 {
3602 int sp_reg;
3603 int fp_reg;
3604 int offset;
3605
3606 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3607 if (skip_past_comma (&input_line_pointer) == FAIL)
3608 sp_reg = FAIL;
3609 else
3610 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3611
3612 if (fp_reg == FAIL || sp_reg == FAIL)
3613 {
3614 as_bad (_("expected <reg>, <reg>"));
3615 ignore_rest_of_line ();
3616 return;
3617 }
3618
3619 /* Optional constant. */
3620 if (skip_past_comma (&input_line_pointer) != FAIL)
3621 {
3622 if (immediate_for_directive (&offset) == FAIL)
3623 return;
3624 }
3625 else
3626 offset = 0;
3627
3628 demand_empty_rest_of_line ();
3629
3630 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3631 {
3632 as_bad (_("register must be either sp or set by a previous"
3633 "unwind_movsp directive"));
3634 return;
3635 }
3636
3637 /* Don't generate any opcodes, just record the information for later. */
3638 unwind.fp_reg = fp_reg;
3639 unwind.fp_used = 1;
3640 if (sp_reg == 13)
3641 unwind.fp_offset = unwind.frame_size - offset;
3642 else
3643 unwind.fp_offset -= offset;
3644 }
3645
3646 /* Parse an unwind_raw directive. */
3647
3648 static void
3649 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3650 {
3651 expressionS exp;
3652 /* This is an arbitrary limit. */
3653 unsigned char op[16];
3654 int count;
3655
3656 expression (&exp);
3657 if (exp.X_op == O_constant
3658 && skip_past_comma (&input_line_pointer) != FAIL)
3659 {
3660 unwind.frame_size += exp.X_add_number;
3661 expression (&exp);
3662 }
3663 else
3664 exp.X_op = O_illegal;
3665
3666 if (exp.X_op != O_constant)
3667 {
3668 as_bad (_("expected <offset>, <opcode>"));
3669 ignore_rest_of_line ();
3670 return;
3671 }
3672
3673 count = 0;
3674
3675 /* Parse the opcode. */
3676 for (;;)
3677 {
3678 if (count >= 16)
3679 {
3680 as_bad (_("unwind opcode too long"));
3681 ignore_rest_of_line ();
3682 }
3683 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3684 {
3685 as_bad (_("invalid unwind opcode"));
3686 ignore_rest_of_line ();
3687 return;
3688 }
3689 op[count++] = exp.X_add_number;
3690
3691 /* Parse the next byte. */
3692 if (skip_past_comma (&input_line_pointer) == FAIL)
3693 break;
3694
3695 expression (&exp);
3696 }
3697
3698 /* Add the opcode bytes in reverse order. */
3699 while (count--)
3700 add_unwind_opcode (op[count], 1);
3701
3702 demand_empty_rest_of_line ();
3703 }
3704
3705
3706 /* Parse a .eabi_attribute directive. */
3707
3708 static void
3709 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 expressionS exp;
3712 bfd_boolean is_string;
3713 int tag;
3714 unsigned int i = 0;
3715 char *s = NULL;
3716 char saved_char;
3717
3718 expression (& exp);
3719 if (exp.X_op != O_constant)
3720 goto bad;
3721
3722 tag = exp.X_add_number;
3723 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3724 is_string = 1;
3725 else
3726 is_string = 0;
3727
3728 if (skip_past_comma (&input_line_pointer) == FAIL)
3729 goto bad;
3730 if (tag == 32 || !is_string)
3731 {
3732 expression (& exp);
3733 if (exp.X_op != O_constant)
3734 {
3735 as_bad (_("expected numeric constant"));
3736 ignore_rest_of_line ();
3737 return;
3738 }
3739 i = exp.X_add_number;
3740 }
3741 if (tag == Tag_compatibility
3742 && skip_past_comma (&input_line_pointer) == FAIL)
3743 {
3744 as_bad (_("expected comma"));
3745 ignore_rest_of_line ();
3746 return;
3747 }
3748 if (is_string)
3749 {
3750 skip_whitespace(input_line_pointer);
3751 if (*input_line_pointer != '"')
3752 goto bad_string;
3753 input_line_pointer++;
3754 s = input_line_pointer;
3755 while (*input_line_pointer && *input_line_pointer != '"')
3756 input_line_pointer++;
3757 if (*input_line_pointer != '"')
3758 goto bad_string;
3759 saved_char = *input_line_pointer;
3760 *input_line_pointer = 0;
3761 }
3762 else
3763 {
3764 s = NULL;
3765 saved_char = 0;
3766 }
3767
3768 if (tag == Tag_compatibility)
3769 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3770 else if (is_string)
3771 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3772 else
3773 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3774
3775 if (s)
3776 {
3777 *input_line_pointer = saved_char;
3778 input_line_pointer++;
3779 }
3780 demand_empty_rest_of_line ();
3781 return;
3782 bad_string:
3783 as_bad (_("bad string constant"));
3784 ignore_rest_of_line ();
3785 return;
3786 bad:
3787 as_bad (_("expected <tag> , <value>"));
3788 ignore_rest_of_line ();
3789 }
3790 #endif /* OBJ_ELF */
3791
3792 static void s_arm_arch (int);
3793 static void s_arm_cpu (int);
3794 static void s_arm_fpu (int);
3795
3796 /* This table describes all the machine specific pseudo-ops the assembler
3797 has to support. The fields are:
3798 pseudo-op name without dot
3799 function to call to execute this pseudo-op
3800 Integer arg to pass to the function. */
3801
3802 const pseudo_typeS md_pseudo_table[] =
3803 {
3804 /* Never called because '.req' does not start a line. */
3805 { "req", s_req, 0 },
3806 /* Following two are likewise never called. */
3807 { "dn", s_dn, 0 },
3808 { "qn", s_qn, 0 },
3809 { "unreq", s_unreq, 0 },
3810 { "bss", s_bss, 0 },
3811 { "align", s_align, 0 },
3812 { "arm", s_arm, 0 },
3813 { "thumb", s_thumb, 0 },
3814 { "code", s_code, 0 },
3815 { "force_thumb", s_force_thumb, 0 },
3816 { "thumb_func", s_thumb_func, 0 },
3817 { "thumb_set", s_thumb_set, 0 },
3818 { "even", s_even, 0 },
3819 { "ltorg", s_ltorg, 0 },
3820 { "pool", s_ltorg, 0 },
3821 { "syntax", s_syntax, 0 },
3822 { "cpu", s_arm_cpu, 0 },
3823 { "arch", s_arm_arch, 0 },
3824 { "fpu", s_arm_fpu, 0 },
3825 #ifdef OBJ_ELF
3826 { "word", s_arm_elf_cons, 4 },
3827 { "long", s_arm_elf_cons, 4 },
3828 { "rel31", s_arm_rel31, 0 },
3829 { "fnstart", s_arm_unwind_fnstart, 0 },
3830 { "fnend", s_arm_unwind_fnend, 0 },
3831 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3832 { "personality", s_arm_unwind_personality, 0 },
3833 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3834 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3835 { "save", s_arm_unwind_save, 0 },
3836 { "movsp", s_arm_unwind_movsp, 0 },
3837 { "pad", s_arm_unwind_pad, 0 },
3838 { "setfp", s_arm_unwind_setfp, 0 },
3839 { "unwind_raw", s_arm_unwind_raw, 0 },
3840 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3841 #else
3842 { "word", cons, 4},
3843 #endif
3844 { "extend", float_cons, 'x' },
3845 { "ldouble", float_cons, 'x' },
3846 { "packed", float_cons, 'p' },
3847 { 0, 0, 0 }
3848 };
3849 \f
3850 /* Parser functions used exclusively in instruction operands. */
3851
3852 /* Generic immediate-value read function for use in insn parsing.
3853 STR points to the beginning of the immediate (the leading #);
3854 VAL receives the value; if the value is outside [MIN, MAX]
3855 issue an error. PREFIX_OPT is true if the immediate prefix is
3856 optional. */
3857
3858 static int
3859 parse_immediate (char **str, int *val, int min, int max,
3860 bfd_boolean prefix_opt)
3861 {
3862 expressionS exp;
3863 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3864 if (exp.X_op != O_constant)
3865 {
3866 inst.error = _("constant expression required");
3867 return FAIL;
3868 }
3869
3870 if (exp.X_add_number < min || exp.X_add_number > max)
3871 {
3872 inst.error = _("immediate value out of range");
3873 return FAIL;
3874 }
3875
3876 *val = exp.X_add_number;
3877 return SUCCESS;
3878 }
3879
3880 /* Less-generic immediate-value read function with the possibility of loading a
3881 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3882 instructions. Puts the result directly in inst.operands[i]. */
3883
3884 static int
3885 parse_big_immediate (char **str, int i)
3886 {
3887 expressionS exp;
3888 char *ptr = *str;
3889
3890 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3891
3892 if (exp.X_op == O_constant)
3893 inst.operands[i].imm = exp.X_add_number;
3894 else if (exp.X_op == O_big
3895 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3896 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3897 {
3898 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3899 /* Bignums have their least significant bits in
3900 generic_bignum[0]. Make sure we put 32 bits in imm and
3901 32 bits in reg, in a (hopefully) portable way. */
3902 assert (parts != 0);
3903 inst.operands[i].imm = 0;
3904 for (j = 0; j < parts; j++, idx++)
3905 inst.operands[i].imm |= generic_bignum[idx]
3906 << (LITTLENUM_NUMBER_OF_BITS * j);
3907 inst.operands[i].reg = 0;
3908 for (j = 0; j < parts; j++, idx++)
3909 inst.operands[i].reg |= generic_bignum[idx]
3910 << (LITTLENUM_NUMBER_OF_BITS * j);
3911 inst.operands[i].regisimm = 1;
3912 }
3913 else
3914 return FAIL;
3915
3916 *str = ptr;
3917
3918 return SUCCESS;
3919 }
3920
3921 /* Returns the pseudo-register number of an FPA immediate constant,
3922 or FAIL if there isn't a valid constant here. */
3923
3924 static int
3925 parse_fpa_immediate (char ** str)
3926 {
3927 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3928 char * save_in;
3929 expressionS exp;
3930 int i;
3931 int j;
3932
3933 /* First try and match exact strings, this is to guarantee
3934 that some formats will work even for cross assembly. */
3935
3936 for (i = 0; fp_const[i]; i++)
3937 {
3938 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3939 {
3940 char *start = *str;
3941
3942 *str += strlen (fp_const[i]);
3943 if (is_end_of_line[(unsigned char) **str])
3944 return i + 8;
3945 *str = start;
3946 }
3947 }
3948
3949 /* Just because we didn't get a match doesn't mean that the constant
3950 isn't valid, just that it is in a format that we don't
3951 automatically recognize. Try parsing it with the standard
3952 expression routines. */
3953
3954 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3955
3956 /* Look for a raw floating point number. */
3957 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3958 && is_end_of_line[(unsigned char) *save_in])
3959 {
3960 for (i = 0; i < NUM_FLOAT_VALS; i++)
3961 {
3962 for (j = 0; j < MAX_LITTLENUMS; j++)
3963 {
3964 if (words[j] != fp_values[i][j])
3965 break;
3966 }
3967
3968 if (j == MAX_LITTLENUMS)
3969 {
3970 *str = save_in;
3971 return i + 8;
3972 }
3973 }
3974 }
3975
3976 /* Try and parse a more complex expression, this will probably fail
3977 unless the code uses a floating point prefix (eg "0f"). */
3978 save_in = input_line_pointer;
3979 input_line_pointer = *str;
3980 if (expression (&exp) == absolute_section
3981 && exp.X_op == O_big
3982 && exp.X_add_number < 0)
3983 {
3984 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3985 Ditto for 15. */
3986 if (gen_to_words (words, 5, (long) 15) == 0)
3987 {
3988 for (i = 0; i < NUM_FLOAT_VALS; i++)
3989 {
3990 for (j = 0; j < MAX_LITTLENUMS; j++)
3991 {
3992 if (words[j] != fp_values[i][j])
3993 break;
3994 }
3995
3996 if (j == MAX_LITTLENUMS)
3997 {
3998 *str = input_line_pointer;
3999 input_line_pointer = save_in;
4000 return i + 8;
4001 }
4002 }
4003 }
4004 }
4005
4006 *str = input_line_pointer;
4007 input_line_pointer = save_in;
4008 inst.error = _("invalid FPA immediate expression");
4009 return FAIL;
4010 }
4011
4012 /* Returns 1 if a number has "quarter-precision" float format
4013 0baBbbbbbc defgh000 00000000 00000000. */
4014
4015 static int
4016 is_quarter_float (unsigned imm)
4017 {
4018 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4019 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4020 }
4021
4022 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4023 0baBbbbbbc defgh000 00000000 00000000.
4024 The minus-zero case needs special handling, since it can't be encoded in the
4025 "quarter-precision" float format, but can nonetheless be loaded as an integer
4026 constant. */
4027
4028 static unsigned
4029 parse_qfloat_immediate (char **ccp, int *immed)
4030 {
4031 char *str = *ccp;
4032 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4033
4034 skip_past_char (&str, '#');
4035
4036 if ((str = atof_ieee (str, 's', words)) != NULL)
4037 {
4038 unsigned fpword = 0;
4039 int i;
4040
4041 /* Our FP word must be 32 bits (single-precision FP). */
4042 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4043 {
4044 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4045 fpword |= words[i];
4046 }
4047
4048 if (is_quarter_float (fpword) || fpword == 0x80000000)
4049 *immed = fpword;
4050 else
4051 return FAIL;
4052
4053 *ccp = str;
4054
4055 return SUCCESS;
4056 }
4057
4058 return FAIL;
4059 }
4060
4061 /* Shift operands. */
4062 enum shift_kind
4063 {
4064 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4065 };
4066
4067 struct asm_shift_name
4068 {
4069 const char *name;
4070 enum shift_kind kind;
4071 };
4072
4073 /* Third argument to parse_shift. */
4074 enum parse_shift_mode
4075 {
4076 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4077 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4078 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4079 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4080 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4081 };
4082
4083 /* Parse a <shift> specifier on an ARM data processing instruction.
4084 This has three forms:
4085
4086 (LSL|LSR|ASL|ASR|ROR) Rs
4087 (LSL|LSR|ASL|ASR|ROR) #imm
4088 RRX
4089
4090 Note that ASL is assimilated to LSL in the instruction encoding, and
4091 RRX to ROR #0 (which cannot be written as such). */
4092
4093 static int
4094 parse_shift (char **str, int i, enum parse_shift_mode mode)
4095 {
4096 const struct asm_shift_name *shift_name;
4097 enum shift_kind shift;
4098 char *s = *str;
4099 char *p = s;
4100 int reg;
4101
4102 for (p = *str; ISALPHA (*p); p++)
4103 ;
4104
4105 if (p == *str)
4106 {
4107 inst.error = _("shift expression expected");
4108 return FAIL;
4109 }
4110
4111 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4112
4113 if (shift_name == NULL)
4114 {
4115 inst.error = _("shift expression expected");
4116 return FAIL;
4117 }
4118
4119 shift = shift_name->kind;
4120
4121 switch (mode)
4122 {
4123 case NO_SHIFT_RESTRICT:
4124 case SHIFT_IMMEDIATE: break;
4125
4126 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4127 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4128 {
4129 inst.error = _("'LSL' or 'ASR' required");
4130 return FAIL;
4131 }
4132 break;
4133
4134 case SHIFT_LSL_IMMEDIATE:
4135 if (shift != SHIFT_LSL)
4136 {
4137 inst.error = _("'LSL' required");
4138 return FAIL;
4139 }
4140 break;
4141
4142 case SHIFT_ASR_IMMEDIATE:
4143 if (shift != SHIFT_ASR)
4144 {
4145 inst.error = _("'ASR' required");
4146 return FAIL;
4147 }
4148 break;
4149
4150 default: abort ();
4151 }
4152
4153 if (shift != SHIFT_RRX)
4154 {
4155 /* Whitespace can appear here if the next thing is a bare digit. */
4156 skip_whitespace (p);
4157
4158 if (mode == NO_SHIFT_RESTRICT
4159 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4160 {
4161 inst.operands[i].imm = reg;
4162 inst.operands[i].immisreg = 1;
4163 }
4164 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4165 return FAIL;
4166 }
4167 inst.operands[i].shift_kind = shift;
4168 inst.operands[i].shifted = 1;
4169 *str = p;
4170 return SUCCESS;
4171 }
4172
4173 /* Parse a <shifter_operand> for an ARM data processing instruction:
4174
4175 #<immediate>
4176 #<immediate>, <rotate>
4177 <Rm>
4178 <Rm>, <shift>
4179
4180 where <shift> is defined by parse_shift above, and <rotate> is a
4181 multiple of 2 between 0 and 30. Validation of immediate operands
4182 is deferred to md_apply_fix. */
4183
4184 static int
4185 parse_shifter_operand (char **str, int i)
4186 {
4187 int value;
4188 expressionS expr;
4189
4190 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4191 {
4192 inst.operands[i].reg = value;
4193 inst.operands[i].isreg = 1;
4194
4195 /* parse_shift will override this if appropriate */
4196 inst.reloc.exp.X_op = O_constant;
4197 inst.reloc.exp.X_add_number = 0;
4198
4199 if (skip_past_comma (str) == FAIL)
4200 return SUCCESS;
4201
4202 /* Shift operation on register. */
4203 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4204 }
4205
4206 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4207 return FAIL;
4208
4209 if (skip_past_comma (str) == SUCCESS)
4210 {
4211 /* #x, y -- ie explicit rotation by Y. */
4212 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4213 return FAIL;
4214
4215 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4216 {
4217 inst.error = _("constant expression expected");
4218 return FAIL;
4219 }
4220
4221 value = expr.X_add_number;
4222 if (value < 0 || value > 30 || value % 2 != 0)
4223 {
4224 inst.error = _("invalid rotation");
4225 return FAIL;
4226 }
4227 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4228 {
4229 inst.error = _("invalid constant");
4230 return FAIL;
4231 }
4232
4233 /* Convert to decoded value. md_apply_fix will put it back. */
4234 inst.reloc.exp.X_add_number
4235 = (((inst.reloc.exp.X_add_number << (32 - value))
4236 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4237 }
4238
4239 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4240 inst.reloc.pc_rel = 0;
4241 return SUCCESS;
4242 }
4243
4244 /* Parse all forms of an ARM address expression. Information is written
4245 to inst.operands[i] and/or inst.reloc.
4246
4247 Preindexed addressing (.preind=1):
4248
4249 [Rn, #offset] .reg=Rn .reloc.exp=offset
4250 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4251 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4252 .shift_kind=shift .reloc.exp=shift_imm
4253
4254 These three may have a trailing ! which causes .writeback to be set also.
4255
4256 Postindexed addressing (.postind=1, .writeback=1):
4257
4258 [Rn], #offset .reg=Rn .reloc.exp=offset
4259 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4260 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4261 .shift_kind=shift .reloc.exp=shift_imm
4262
4263 Unindexed addressing (.preind=0, .postind=0):
4264
4265 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4266
4267 Other:
4268
4269 [Rn]{!} shorthand for [Rn,#0]{!}
4270 =immediate .isreg=0 .reloc.exp=immediate
4271 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4272
4273 It is the caller's responsibility to check for addressing modes not
4274 supported by the instruction, and to set inst.reloc.type. */
4275
4276 static int
4277 parse_address (char **str, int i)
4278 {
4279 char *p = *str;
4280 int reg;
4281
4282 if (skip_past_char (&p, '[') == FAIL)
4283 {
4284 if (skip_past_char (&p, '=') == FAIL)
4285 {
4286 /* bare address - translate to PC-relative offset */
4287 inst.reloc.pc_rel = 1;
4288 inst.operands[i].reg = REG_PC;
4289 inst.operands[i].isreg = 1;
4290 inst.operands[i].preind = 1;
4291 }
4292 /* else a load-constant pseudo op, no special treatment needed here */
4293
4294 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4295 return FAIL;
4296
4297 *str = p;
4298 return SUCCESS;
4299 }
4300
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4302 {
4303 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4304 return FAIL;
4305 }
4306 inst.operands[i].reg = reg;
4307 inst.operands[i].isreg = 1;
4308
4309 if (skip_past_comma (&p) == SUCCESS)
4310 {
4311 inst.operands[i].preind = 1;
4312
4313 if (*p == '+') p++;
4314 else if (*p == '-') p++, inst.operands[i].negative = 1;
4315
4316 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4317 {
4318 inst.operands[i].imm = reg;
4319 inst.operands[i].immisreg = 1;
4320
4321 if (skip_past_comma (&p) == SUCCESS)
4322 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4323 return FAIL;
4324 }
4325 else if (skip_past_char (&p, ':') == SUCCESS)
4326 {
4327 /* FIXME: '@' should be used here, but it's filtered out by generic
4328 code before we get to see it here. This may be subject to
4329 change. */
4330 expressionS exp;
4331 my_get_expression (&exp, &p, GE_NO_PREFIX);
4332 if (exp.X_op != O_constant)
4333 {
4334 inst.error = _("alignment must be constant");
4335 return FAIL;
4336 }
4337 inst.operands[i].imm = exp.X_add_number << 8;
4338 inst.operands[i].immisalign = 1;
4339 /* Alignments are not pre-indexes. */
4340 inst.operands[i].preind = 0;
4341 }
4342 else
4343 {
4344 if (inst.operands[i].negative)
4345 {
4346 inst.operands[i].negative = 0;
4347 p--;
4348 }
4349 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4350 return FAIL;
4351 }
4352 }
4353
4354 if (skip_past_char (&p, ']') == FAIL)
4355 {
4356 inst.error = _("']' expected");
4357 return FAIL;
4358 }
4359
4360 if (skip_past_char (&p, '!') == SUCCESS)
4361 inst.operands[i].writeback = 1;
4362
4363 else if (skip_past_comma (&p) == SUCCESS)
4364 {
4365 if (skip_past_char (&p, '{') == SUCCESS)
4366 {
4367 /* [Rn], {expr} - unindexed, with option */
4368 if (parse_immediate (&p, &inst.operands[i].imm,
4369 0, 255, TRUE) == FAIL)
4370 return FAIL;
4371
4372 if (skip_past_char (&p, '}') == FAIL)
4373 {
4374 inst.error = _("'}' expected at end of 'option' field");
4375 return FAIL;
4376 }
4377 if (inst.operands[i].preind)
4378 {
4379 inst.error = _("cannot combine index with option");
4380 return FAIL;
4381 }
4382 *str = p;
4383 return SUCCESS;
4384 }
4385 else
4386 {
4387 inst.operands[i].postind = 1;
4388 inst.operands[i].writeback = 1;
4389
4390 if (inst.operands[i].preind)
4391 {
4392 inst.error = _("cannot combine pre- and post-indexing");
4393 return FAIL;
4394 }
4395
4396 if (*p == '+') p++;
4397 else if (*p == '-') p++, inst.operands[i].negative = 1;
4398
4399 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4400 {
4401 /* We might be using the immediate for alignment already. If we
4402 are, OR the register number into the low-order bits. */
4403 if (inst.operands[i].immisalign)
4404 inst.operands[i].imm |= reg;
4405 else
4406 inst.operands[i].imm = reg;
4407 inst.operands[i].immisreg = 1;
4408
4409 if (skip_past_comma (&p) == SUCCESS)
4410 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4411 return FAIL;
4412 }
4413 else
4414 {
4415 if (inst.operands[i].negative)
4416 {
4417 inst.operands[i].negative = 0;
4418 p--;
4419 }
4420 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4421 return FAIL;
4422 }
4423 }
4424 }
4425
4426 /* If at this point neither .preind nor .postind is set, we have a
4427 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4428 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4429 {
4430 inst.operands[i].preind = 1;
4431 inst.reloc.exp.X_op = O_constant;
4432 inst.reloc.exp.X_add_number = 0;
4433 }
4434 *str = p;
4435 return SUCCESS;
4436 }
4437
4438 /* Parse an operand for a MOVW or MOVT instruction. */
4439 static int
4440 parse_half (char **str)
4441 {
4442 char * p;
4443
4444 p = *str;
4445 skip_past_char (&p, '#');
4446 if (strncasecmp (p, ":lower16:", 9) == 0)
4447 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4448 else if (strncasecmp (p, ":upper16:", 9) == 0)
4449 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4450
4451 if (inst.reloc.type != BFD_RELOC_UNUSED)
4452 {
4453 p += 9;
4454 skip_whitespace(p);
4455 }
4456
4457 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4458 return FAIL;
4459
4460 if (inst.reloc.type == BFD_RELOC_UNUSED)
4461 {
4462 if (inst.reloc.exp.X_op != O_constant)
4463 {
4464 inst.error = _("constant expression expected");
4465 return FAIL;
4466 }
4467 if (inst.reloc.exp.X_add_number < 0
4468 || inst.reloc.exp.X_add_number > 0xffff)
4469 {
4470 inst.error = _("immediate value out of range");
4471 return FAIL;
4472 }
4473 }
4474 *str = p;
4475 return SUCCESS;
4476 }
4477
4478 /* Miscellaneous. */
4479
4480 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4481 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4482 static int
4483 parse_psr (char **str)
4484 {
4485 char *p;
4486 unsigned long psr_field;
4487 const struct asm_psr *psr;
4488 char *start;
4489
4490 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4491 feature for ease of use and backwards compatibility. */
4492 p = *str;
4493 if (strncasecmp (p, "SPSR", 4) == 0)
4494 psr_field = SPSR_BIT;
4495 else if (strncasecmp (p, "CPSR", 4) == 0)
4496 psr_field = 0;
4497 else
4498 {
4499 start = p;
4500 do
4501 p++;
4502 while (ISALNUM (*p) || *p == '_');
4503
4504 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4505 if (!psr)
4506 return FAIL;
4507
4508 *str = p;
4509 return psr->field;
4510 }
4511
4512 p += 4;
4513 if (*p == '_')
4514 {
4515 /* A suffix follows. */
4516 p++;
4517 start = p;
4518
4519 do
4520 p++;
4521 while (ISALNUM (*p) || *p == '_');
4522
4523 psr = hash_find_n (arm_psr_hsh, start, p - start);
4524 if (!psr)
4525 goto error;
4526
4527 psr_field |= psr->field;
4528 }
4529 else
4530 {
4531 if (ISALNUM (*p))
4532 goto error; /* Garbage after "[CS]PSR". */
4533
4534 psr_field |= (PSR_c | PSR_f);
4535 }
4536 *str = p;
4537 return psr_field;
4538
4539 error:
4540 inst.error = _("flag for {c}psr instruction expected");
4541 return FAIL;
4542 }
4543
4544 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4545 value suitable for splatting into the AIF field of the instruction. */
4546
4547 static int
4548 parse_cps_flags (char **str)
4549 {
4550 int val = 0;
4551 int saw_a_flag = 0;
4552 char *s = *str;
4553
4554 for (;;)
4555 switch (*s++)
4556 {
4557 case '\0': case ',':
4558 goto done;
4559
4560 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4561 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4562 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4563
4564 default:
4565 inst.error = _("unrecognized CPS flag");
4566 return FAIL;
4567 }
4568
4569 done:
4570 if (saw_a_flag == 0)
4571 {
4572 inst.error = _("missing CPS flags");
4573 return FAIL;
4574 }
4575
4576 *str = s - 1;
4577 return val;
4578 }
4579
4580 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4581 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4582
4583 static int
4584 parse_endian_specifier (char **str)
4585 {
4586 int little_endian;
4587 char *s = *str;
4588
4589 if (strncasecmp (s, "BE", 2))
4590 little_endian = 0;
4591 else if (strncasecmp (s, "LE", 2))
4592 little_endian = 1;
4593 else
4594 {
4595 inst.error = _("valid endian specifiers are be or le");
4596 return FAIL;
4597 }
4598
4599 if (ISALNUM (s[2]) || s[2] == '_')
4600 {
4601 inst.error = _("valid endian specifiers are be or le");
4602 return FAIL;
4603 }
4604
4605 *str = s + 2;
4606 return little_endian;
4607 }
4608
4609 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4610 value suitable for poking into the rotate field of an sxt or sxta
4611 instruction, or FAIL on error. */
4612
4613 static int
4614 parse_ror (char **str)
4615 {
4616 int rot;
4617 char *s = *str;
4618
4619 if (strncasecmp (s, "ROR", 3) == 0)
4620 s += 3;
4621 else
4622 {
4623 inst.error = _("missing rotation field after comma");
4624 return FAIL;
4625 }
4626
4627 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4628 return FAIL;
4629
4630 switch (rot)
4631 {
4632 case 0: *str = s; return 0x0;
4633 case 8: *str = s; return 0x1;
4634 case 16: *str = s; return 0x2;
4635 case 24: *str = s; return 0x3;
4636
4637 default:
4638 inst.error = _("rotation can only be 0, 8, 16, or 24");
4639 return FAIL;
4640 }
4641 }
4642
4643 /* Parse a conditional code (from conds[] below). The value returned is in the
4644 range 0 .. 14, or FAIL. */
4645 static int
4646 parse_cond (char **str)
4647 {
4648 char *p, *q;
4649 const struct asm_cond *c;
4650
4651 p = q = *str;
4652 while (ISALPHA (*q))
4653 q++;
4654
4655 c = hash_find_n (arm_cond_hsh, p, q - p);
4656 if (!c)
4657 {
4658 inst.error = _("condition required");
4659 return FAIL;
4660 }
4661
4662 *str = q;
4663 return c->value;
4664 }
4665
4666 /* Parse an option for a barrier instruction. Returns the encoding for the
4667 option, or FAIL. */
4668 static int
4669 parse_barrier (char **str)
4670 {
4671 char *p, *q;
4672 const struct asm_barrier_opt *o;
4673
4674 p = q = *str;
4675 while (ISALPHA (*q))
4676 q++;
4677
4678 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4679 if (!o)
4680 return FAIL;
4681
4682 *str = q;
4683 return o->value;
4684 }
4685
4686 /* Parse the operands of a table branch instruction. Similar to a memory
4687 operand. */
4688 static int
4689 parse_tb (char **str)
4690 {
4691 char * p = *str;
4692 int reg;
4693
4694 if (skip_past_char (&p, '[') == FAIL)
4695 {
4696 inst.error = _("'[' expected");
4697 return FAIL;
4698 }
4699
4700 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4701 {
4702 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4703 return FAIL;
4704 }
4705 inst.operands[0].reg = reg;
4706
4707 if (skip_past_comma (&p) == FAIL)
4708 {
4709 inst.error = _("',' expected");
4710 return FAIL;
4711 }
4712
4713 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4714 {
4715 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4716 return FAIL;
4717 }
4718 inst.operands[0].imm = reg;
4719
4720 if (skip_past_comma (&p) == SUCCESS)
4721 {
4722 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4723 return FAIL;
4724 if (inst.reloc.exp.X_add_number != 1)
4725 {
4726 inst.error = _("invalid shift");
4727 return FAIL;
4728 }
4729 inst.operands[0].shifted = 1;
4730 }
4731
4732 if (skip_past_char (&p, ']') == FAIL)
4733 {
4734 inst.error = _("']' expected");
4735 return FAIL;
4736 }
4737 *str = p;
4738 return SUCCESS;
4739 }
4740
4741 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4742 information on the types the operands can take and how they are encoded.
4743 Note particularly the abuse of ".regisimm" to signify a Neon register.
4744 Up to three operands may be read; this function handles setting the
4745 ".present" field for each operand itself.
4746 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4747 else returns FAIL. */
4748
4749 static int
4750 parse_neon_mov (char **str, int *which_operand)
4751 {
4752 int i = *which_operand, val;
4753 enum arm_reg_type rtype;
4754 char *ptr = *str;
4755 struct neon_type_el optype;
4756
4757 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4758 {
4759 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4760 inst.operands[i].reg = val;
4761 inst.operands[i].isscalar = 1;
4762 inst.operands[i].vectype = optype;
4763 inst.operands[i++].present = 1;
4764
4765 if (skip_past_comma (&ptr) == FAIL)
4766 goto wanted_comma;
4767
4768 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4769 goto wanted_arm;
4770
4771 inst.operands[i].reg = val;
4772 inst.operands[i].isreg = 1;
4773 inst.operands[i].present = 1;
4774 }
4775 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4776 != FAIL)
4777 {
4778 /* Cases 0, 1, 2, 3, 5 (D only). */
4779 if (skip_past_comma (&ptr) == FAIL)
4780 goto wanted_comma;
4781
4782 inst.operands[i].reg = val;
4783 inst.operands[i].isreg = 1;
4784 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4785 inst.operands[i].vectype = optype;
4786 inst.operands[i++].present = 1;
4787
4788 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4789 {
4790 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4791 inst.operands[i-1].regisimm = 1;
4792 inst.operands[i].reg = val;
4793 inst.operands[i].isreg = 1;
4794 inst.operands[i++].present = 1;
4795
4796 if (rtype == REG_TYPE_NQ)
4797 {
4798 first_error (_("can't use Neon quad register here"));
4799 return FAIL;
4800 }
4801 if (skip_past_comma (&ptr) == FAIL)
4802 goto wanted_comma;
4803 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4804 goto wanted_arm;
4805 inst.operands[i].reg = val;
4806 inst.operands[i].isreg = 1;
4807 inst.operands[i].present = 1;
4808 }
4809 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4810 {
4811 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4812 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4813 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4814 goto bad_cond;
4815 }
4816 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4817 {
4818 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4819 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4820 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4821 goto bad_cond;
4822 }
4823 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4824 != FAIL)
4825 {
4826 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4827 Case 1: VMOV<c><q> <Dd>, <Dm> */
4828 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4829 goto bad_cond;
4830
4831 inst.operands[i].reg = val;
4832 inst.operands[i].isreg = 1;
4833 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4834 inst.operands[i].vectype = optype;
4835 inst.operands[i].present = 1;
4836 }
4837 else
4838 {
4839 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4840 return FAIL;
4841 }
4842 }
4843 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4844 {
4845 /* Cases 6, 7. */
4846 inst.operands[i].reg = val;
4847 inst.operands[i].isreg = 1;
4848 inst.operands[i++].present = 1;
4849
4850 if (skip_past_comma (&ptr) == FAIL)
4851 goto wanted_comma;
4852
4853 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4854 {
4855 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4856 inst.operands[i].reg = val;
4857 inst.operands[i].isscalar = 1;
4858 inst.operands[i].present = 1;
4859 inst.operands[i].vectype = optype;
4860 }
4861 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4862 {
4863 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4864 inst.operands[i].reg = val;
4865 inst.operands[i].isreg = 1;
4866 inst.operands[i++].present = 1;
4867
4868 if (skip_past_comma (&ptr) == FAIL)
4869 goto wanted_comma;
4870
4871 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4872 == FAIL)
4873 {
4874 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4875 return FAIL;
4876 }
4877
4878 inst.operands[i].reg = val;
4879 inst.operands[i].isreg = 1;
4880 inst.operands[i].regisimm = 1;
4881 inst.operands[i].vectype = optype;
4882 inst.operands[i].present = 1;
4883 }
4884 }
4885 else
4886 {
4887 first_error (_("parse error"));
4888 return FAIL;
4889 }
4890
4891 /* Successfully parsed the operands. Update args. */
4892 *which_operand = i;
4893 *str = ptr;
4894 return SUCCESS;
4895
4896 wanted_comma:
4897 first_error (_("expected comma"));
4898 return FAIL;
4899
4900 wanted_arm:
4901 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4902 return FAIL;
4903
4904 bad_cond:
4905 first_error (_("instruction cannot be conditionalized"));
4906 return FAIL;
4907 }
4908
4909 /* Matcher codes for parse_operands. */
4910 enum operand_parse_code
4911 {
4912 OP_stop, /* end of line */
4913
4914 OP_RR, /* ARM register */
4915 OP_RRnpc, /* ARM register, not r15 */
4916 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4917 OP_RRw, /* ARM register, not r15, optional trailing ! */
4918 OP_RCP, /* Coprocessor number */
4919 OP_RCN, /* Coprocessor register */
4920 OP_RF, /* FPA register */
4921 OP_RVS, /* VFP single precision register */
4922 OP_RVD, /* VFP double precision register (0..15) */
4923 OP_RND, /* Neon double precision register (0..31) */
4924 OP_RNQ, /* Neon quad precision register */
4925 OP_RNDQ, /* Neon double or quad precision register */
4926 OP_RNSC, /* Neon scalar D[X] */
4927 OP_RVC, /* VFP control register */
4928 OP_RMF, /* Maverick F register */
4929 OP_RMD, /* Maverick D register */
4930 OP_RMFX, /* Maverick FX register */
4931 OP_RMDX, /* Maverick DX register */
4932 OP_RMAX, /* Maverick AX register */
4933 OP_RMDS, /* Maverick DSPSC register */
4934 OP_RIWR, /* iWMMXt wR register */
4935 OP_RIWC, /* iWMMXt wC register */
4936 OP_RIWG, /* iWMMXt wCG register */
4937 OP_RXA, /* XScale accumulator register */
4938
4939 OP_REGLST, /* ARM register list */
4940 OP_VRSLST, /* VFP single-precision register list */
4941 OP_VRDLST, /* VFP double-precision register list */
4942 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4943 OP_NSTRLST, /* Neon element/structure list */
4944
4945 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4946 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4947 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4948 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4949 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4950 OP_VMOV, /* Neon VMOV operands. */
4951 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4952 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4953
4954 OP_I0, /* immediate zero */
4955 OP_I7, /* immediate value 0 .. 7 */
4956 OP_I15, /* 0 .. 15 */
4957 OP_I16, /* 1 .. 16 */
4958 OP_I16z, /* 0 .. 16 */
4959 OP_I31, /* 0 .. 31 */
4960 OP_I31w, /* 0 .. 31, optional trailing ! */
4961 OP_I32, /* 1 .. 32 */
4962 OP_I32z, /* 0 .. 32 */
4963 OP_I63, /* 0 .. 63 */
4964 OP_I63s, /* -64 .. 63 */
4965 OP_I64, /* 1 .. 64 */
4966 OP_I64z, /* 0 .. 64 */
4967 OP_I255, /* 0 .. 255 */
4968
4969 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4970 OP_I7b, /* 0 .. 7 */
4971 OP_I15b, /* 0 .. 15 */
4972 OP_I31b, /* 0 .. 31 */
4973
4974 OP_SH, /* shifter operand */
4975 OP_ADDR, /* Memory address expression (any mode) */
4976 OP_EXP, /* arbitrary expression */
4977 OP_EXPi, /* same, with optional immediate prefix */
4978 OP_EXPr, /* same, with optional relocation suffix */
4979 OP_HALF, /* 0 .. 65535 or low/high reloc. */
4980
4981 OP_CPSF, /* CPS flags */
4982 OP_ENDI, /* Endianness specifier */
4983 OP_PSR, /* CPSR/SPSR mask for msr */
4984 OP_COND, /* conditional code */
4985 OP_TB, /* Table branch. */
4986
4987 OP_RRnpc_I0, /* ARM register or literal 0 */
4988 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4989 OP_RR_EXi, /* ARM register or expression with imm prefix */
4990 OP_RF_IF, /* FPA register or immediate */
4991 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4992
4993 /* Optional operands. */
4994 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4995 OP_oI31b, /* 0 .. 31 */
4996 OP_oI32b, /* 1 .. 32 */
4997 OP_oIffffb, /* 0 .. 65535 */
4998 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4999
5000 OP_oRR, /* ARM register */
5001 OP_oRRnpc, /* ARM register, not the PC */
5002 OP_oRND, /* Optional Neon double precision register */
5003 OP_oRNQ, /* Optional Neon quad precision register */
5004 OP_oRNDQ, /* Optional Neon double or quad precision register */
5005 OP_oSHll, /* LSL immediate */
5006 OP_oSHar, /* ASR immediate */
5007 OP_oSHllar, /* LSL or ASR immediate */
5008 OP_oROR, /* ROR 0/8/16/24 */
5009 OP_oBARRIER, /* Option argument for a barrier instruction. */
5010
5011 OP_FIRST_OPTIONAL = OP_oI7b
5012 };
5013
5014 /* Generic instruction operand parser. This does no encoding and no
5015 semantic validation; it merely squirrels values away in the inst
5016 structure. Returns SUCCESS or FAIL depending on whether the
5017 specified grammar matched. */
5018 static int
5019 parse_operands (char *str, const unsigned char *pattern)
5020 {
5021 unsigned const char *upat = pattern;
5022 char *backtrack_pos = 0;
5023 const char *backtrack_error = 0;
5024 int i, val, backtrack_index = 0;
5025 enum arm_reg_type rtype;
5026
5027 #define po_char_or_fail(chr) do { \
5028 if (skip_past_char (&str, chr) == FAIL) \
5029 goto bad_args; \
5030 } while (0)
5031
5032 #define po_reg_or_fail(regtype) do { \
5033 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5034 &inst.operands[i].vectype); \
5035 if (val == FAIL) \
5036 { \
5037 first_error (_(reg_expected_msgs[regtype])); \
5038 goto failure; \
5039 } \
5040 inst.operands[i].reg = val; \
5041 inst.operands[i].isreg = 1; \
5042 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5043 } while (0)
5044
5045 #define po_reg_or_goto(regtype, label) do { \
5046 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5047 &inst.operands[i].vectype); \
5048 if (val == FAIL) \
5049 goto label; \
5050 \
5051 inst.operands[i].reg = val; \
5052 inst.operands[i].isreg = 1; \
5053 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5054 } while (0)
5055
5056 #define po_imm_or_fail(min, max, popt) do { \
5057 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5058 goto failure; \
5059 inst.operands[i].imm = val; \
5060 } while (0)
5061
5062 #define po_scalar_or_goto(elsz, label) do { \
5063 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5064 if (val == FAIL) \
5065 goto label; \
5066 inst.operands[i].reg = val; \
5067 inst.operands[i].isscalar = 1; \
5068 } while (0)
5069
5070 #define po_misc_or_fail(expr) do { \
5071 if (expr) \
5072 goto failure; \
5073 } while (0)
5074
5075 skip_whitespace (str);
5076
5077 for (i = 0; upat[i] != OP_stop; i++)
5078 {
5079 if (upat[i] >= OP_FIRST_OPTIONAL)
5080 {
5081 /* Remember where we are in case we need to backtrack. */
5082 assert (!backtrack_pos);
5083 backtrack_pos = str;
5084 backtrack_error = inst.error;
5085 backtrack_index = i;
5086 }
5087
5088 if (i > 0)
5089 po_char_or_fail (',');
5090
5091 switch (upat[i])
5092 {
5093 /* Registers */
5094 case OP_oRRnpc:
5095 case OP_RRnpc:
5096 case OP_oRR:
5097 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5098 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5099 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5100 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5101 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5102 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5103 case OP_oRND:
5104 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5105 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5106 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5107 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5108 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5109 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5110 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5111 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5112 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5113 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5114 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5115 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5116 case OP_oRNQ:
5117 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5118 case OP_oRNDQ:
5119 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5120
5121 /* Neon scalar. Using an element size of 8 means that some invalid
5122 scalars are accepted here, so deal with those in later code. */
5123 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5124
5125 /* WARNING: We can expand to two operands here. This has the potential
5126 to totally confuse the backtracking mechanism! It will be OK at
5127 least as long as we don't try to use optional args as well,
5128 though. */
5129 case OP_NILO:
5130 {
5131 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5132 i++;
5133 skip_past_comma (&str);
5134 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5135 break;
5136 one_reg_only:
5137 /* Optional register operand was omitted. Unfortunately, it's in
5138 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5139 here (this is a bit grotty). */
5140 inst.operands[i] = inst.operands[i-1];
5141 inst.operands[i-1].present = 0;
5142 break;
5143 try_imm:
5144 /* Immediate gets verified properly later, so accept any now. */
5145 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5146 }
5147 break;
5148
5149 case OP_RNDQ_I0:
5150 {
5151 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5152 break;
5153 try_imm0:
5154 po_imm_or_fail (0, 0, TRUE);
5155 }
5156 break;
5157
5158 case OP_RR_RNSC:
5159 {
5160 po_scalar_or_goto (8, try_rr);
5161 break;
5162 try_rr:
5163 po_reg_or_fail (REG_TYPE_RN);
5164 }
5165 break;
5166
5167 case OP_RNDQ_RNSC:
5168 {
5169 po_scalar_or_goto (8, try_ndq);
5170 break;
5171 try_ndq:
5172 po_reg_or_fail (REG_TYPE_NDQ);
5173 }
5174 break;
5175
5176 case OP_RND_RNSC:
5177 {
5178 po_scalar_or_goto (8, try_vfd);
5179 break;
5180 try_vfd:
5181 po_reg_or_fail (REG_TYPE_VFD);
5182 }
5183 break;
5184
5185 case OP_VMOV:
5186 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5187 not careful then bad things might happen. */
5188 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5189 break;
5190
5191 case OP_RNDQ_IMVNb:
5192 {
5193 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5194 break;
5195 try_mvnimm:
5196 /* There's a possibility of getting a 64-bit immediate here, so
5197 we need special handling. */
5198 if (parse_big_immediate (&str, i) == FAIL)
5199 {
5200 inst.error = _("immediate value is out of range");
5201 goto failure;
5202 }
5203 }
5204 break;
5205
5206 case OP_RNDQ_I63b:
5207 {
5208 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5209 break;
5210 try_shimm:
5211 po_imm_or_fail (0, 63, TRUE);
5212 }
5213 break;
5214
5215 case OP_RRnpcb:
5216 po_char_or_fail ('[');
5217 po_reg_or_fail (REG_TYPE_RN);
5218 po_char_or_fail (']');
5219 break;
5220
5221 case OP_RRw:
5222 po_reg_or_fail (REG_TYPE_RN);
5223 if (skip_past_char (&str, '!') == SUCCESS)
5224 inst.operands[i].writeback = 1;
5225 break;
5226
5227 /* Immediates */
5228 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5229 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5230 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5231 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5232 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5233 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5234 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5235 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5236 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5237 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5238 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5239 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5240
5241 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5242 case OP_oI7b:
5243 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5244 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5245 case OP_oI31b:
5246 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5247 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5248 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5249
5250 /* Immediate variants */
5251 case OP_oI255c:
5252 po_char_or_fail ('{');
5253 po_imm_or_fail (0, 255, TRUE);
5254 po_char_or_fail ('}');
5255 break;
5256
5257 case OP_I31w:
5258 /* The expression parser chokes on a trailing !, so we have
5259 to find it first and zap it. */
5260 {
5261 char *s = str;
5262 while (*s && *s != ',')
5263 s++;
5264 if (s[-1] == '!')
5265 {
5266 s[-1] = '\0';
5267 inst.operands[i].writeback = 1;
5268 }
5269 po_imm_or_fail (0, 31, TRUE);
5270 if (str == s - 1)
5271 str = s;
5272 }
5273 break;
5274
5275 /* Expressions */
5276 case OP_EXPi: EXPi:
5277 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5278 GE_OPT_PREFIX));
5279 break;
5280
5281 case OP_EXP:
5282 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5283 GE_NO_PREFIX));
5284 break;
5285
5286 case OP_EXPr: EXPr:
5287 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5288 GE_NO_PREFIX));
5289 if (inst.reloc.exp.X_op == O_symbol)
5290 {
5291 val = parse_reloc (&str);
5292 if (val == -1)
5293 {
5294 inst.error = _("unrecognized relocation suffix");
5295 goto failure;
5296 }
5297 else if (val != BFD_RELOC_UNUSED)
5298 {
5299 inst.operands[i].imm = val;
5300 inst.operands[i].hasreloc = 1;
5301 }
5302 }
5303 break;
5304
5305 /* Operand for MOVW or MOVT. */
5306 case OP_HALF:
5307 po_misc_or_fail (parse_half (&str));
5308 break;
5309
5310 /* Register or expression */
5311 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5312 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5313
5314 /* Register or immediate */
5315 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5316 I0: po_imm_or_fail (0, 0, FALSE); break;
5317
5318 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5319 IF:
5320 if (!is_immediate_prefix (*str))
5321 goto bad_args;
5322 str++;
5323 val = parse_fpa_immediate (&str);
5324 if (val == FAIL)
5325 goto failure;
5326 /* FPA immediates are encoded as registers 8-15.
5327 parse_fpa_immediate has already applied the offset. */
5328 inst.operands[i].reg = val;
5329 inst.operands[i].isreg = 1;
5330 break;
5331
5332 /* Two kinds of register */
5333 case OP_RIWR_RIWC:
5334 {
5335 struct reg_entry *rege = arm_reg_parse_multi (&str);
5336 if (rege->type != REG_TYPE_MMXWR
5337 && rege->type != REG_TYPE_MMXWC
5338 && rege->type != REG_TYPE_MMXWCG)
5339 {
5340 inst.error = _("iWMMXt data or control register expected");
5341 goto failure;
5342 }
5343 inst.operands[i].reg = rege->number;
5344 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5345 }
5346 break;
5347
5348 /* Misc */
5349 case OP_CPSF: val = parse_cps_flags (&str); break;
5350 case OP_ENDI: val = parse_endian_specifier (&str); break;
5351 case OP_oROR: val = parse_ror (&str); break;
5352 case OP_PSR: val = parse_psr (&str); break;
5353 case OP_COND: val = parse_cond (&str); break;
5354 case OP_oBARRIER:val = parse_barrier (&str); break;
5355
5356 case OP_TB:
5357 po_misc_or_fail (parse_tb (&str));
5358 break;
5359
5360 /* Register lists */
5361 case OP_REGLST:
5362 val = parse_reg_list (&str);
5363 if (*str == '^')
5364 {
5365 inst.operands[1].writeback = 1;
5366 str++;
5367 }
5368 break;
5369
5370 case OP_VRSLST:
5371 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5372 break;
5373
5374 case OP_VRDLST:
5375 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5376 break;
5377
5378 case OP_NRDLST:
5379 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5380 REGLIST_NEON_D);
5381 break;
5382
5383 case OP_NSTRLST:
5384 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5385 &inst.operands[i].vectype);
5386 break;
5387
5388 /* Addressing modes */
5389 case OP_ADDR:
5390 po_misc_or_fail (parse_address (&str, i));
5391 break;
5392
5393 case OP_SH:
5394 po_misc_or_fail (parse_shifter_operand (&str, i));
5395 break;
5396
5397 case OP_oSHll:
5398 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5399 break;
5400
5401 case OP_oSHar:
5402 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5403 break;
5404
5405 case OP_oSHllar:
5406 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5407 break;
5408
5409 default:
5410 as_fatal ("unhandled operand code %d", upat[i]);
5411 }
5412
5413 /* Various value-based sanity checks and shared operations. We
5414 do not signal immediate failures for the register constraints;
5415 this allows a syntax error to take precedence. */
5416 switch (upat[i])
5417 {
5418 case OP_oRRnpc:
5419 case OP_RRnpc:
5420 case OP_RRnpcb:
5421 case OP_RRw:
5422 case OP_RRnpc_I0:
5423 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5424 inst.error = BAD_PC;
5425 break;
5426
5427 case OP_CPSF:
5428 case OP_ENDI:
5429 case OP_oROR:
5430 case OP_PSR:
5431 case OP_COND:
5432 case OP_oBARRIER:
5433 case OP_REGLST:
5434 case OP_VRSLST:
5435 case OP_VRDLST:
5436 case OP_NRDLST:
5437 case OP_NSTRLST:
5438 if (val == FAIL)
5439 goto failure;
5440 inst.operands[i].imm = val;
5441 break;
5442
5443 default:
5444 break;
5445 }
5446
5447 /* If we get here, this operand was successfully parsed. */
5448 inst.operands[i].present = 1;
5449 continue;
5450
5451 bad_args:
5452 inst.error = BAD_ARGS;
5453
5454 failure:
5455 if (!backtrack_pos)
5456 {
5457 /* The parse routine should already have set inst.error, but set a
5458 defaut here just in case. */
5459 if (!inst.error)
5460 inst.error = _("syntax error");
5461 return FAIL;
5462 }
5463
5464 /* Do not backtrack over a trailing optional argument that
5465 absorbed some text. We will only fail again, with the
5466 'garbage following instruction' error message, which is
5467 probably less helpful than the current one. */
5468 if (backtrack_index == i && backtrack_pos != str
5469 && upat[i+1] == OP_stop)
5470 {
5471 if (!inst.error)
5472 inst.error = _("syntax error");
5473 return FAIL;
5474 }
5475
5476 /* Try again, skipping the optional argument at backtrack_pos. */
5477 str = backtrack_pos;
5478 inst.error = backtrack_error;
5479 inst.operands[backtrack_index].present = 0;
5480 i = backtrack_index;
5481 backtrack_pos = 0;
5482 }
5483
5484 /* Check that we have parsed all the arguments. */
5485 if (*str != '\0' && !inst.error)
5486 inst.error = _("garbage following instruction");
5487
5488 return inst.error ? FAIL : SUCCESS;
5489 }
5490
5491 #undef po_char_or_fail
5492 #undef po_reg_or_fail
5493 #undef po_reg_or_goto
5494 #undef po_imm_or_fail
5495 #undef po_scalar_or_fail
5496 \f
5497 /* Shorthand macro for instruction encoding functions issuing errors. */
5498 #define constraint(expr, err) do { \
5499 if (expr) \
5500 { \
5501 inst.error = err; \
5502 return; \
5503 } \
5504 } while (0)
5505
5506 /* Functions for operand encoding. ARM, then Thumb. */
5507
5508 #define rotate_left(v, n) (v << n | v >> (32 - n))
5509
5510 /* If VAL can be encoded in the immediate field of an ARM instruction,
5511 return the encoded form. Otherwise, return FAIL. */
5512
5513 static unsigned int
5514 encode_arm_immediate (unsigned int val)
5515 {
5516 unsigned int a, i;
5517
5518 for (i = 0; i < 32; i += 2)
5519 if ((a = rotate_left (val, i)) <= 0xff)
5520 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5521
5522 return FAIL;
5523 }
5524
5525 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5526 return the encoded form. Otherwise, return FAIL. */
5527 static unsigned int
5528 encode_thumb32_immediate (unsigned int val)
5529 {
5530 unsigned int a, i;
5531
5532 if (val <= 0xff)
5533 return val;
5534
5535 for (i = 1; i <= 24; i++)
5536 {
5537 a = val >> i;
5538 if ((val & ~(0xff << i)) == 0)
5539 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5540 }
5541
5542 a = val & 0xff;
5543 if (val == ((a << 16) | a))
5544 return 0x100 | a;
5545 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5546 return 0x300 | a;
5547
5548 a = val & 0xff00;
5549 if (val == ((a << 16) | a))
5550 return 0x200 | (a >> 8);
5551
5552 return FAIL;
5553 }
5554 /* Encode a VFP SP or DP register number into inst.instruction. */
5555
5556 static void
5557 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5558 {
5559 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5560 && reg > 15)
5561 {
5562 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5563 {
5564 if (thumb_mode)
5565 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5566 fpu_vfp_ext_v3);
5567 else
5568 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5569 fpu_vfp_ext_v3);
5570 }
5571 else
5572 {
5573 first_error (_("D register out of range for selected VFP version"));
5574 return;
5575 }
5576 }
5577
5578 switch (pos)
5579 {
5580 case VFP_REG_Sd:
5581 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5582 break;
5583
5584 case VFP_REG_Sn:
5585 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5586 break;
5587
5588 case VFP_REG_Sm:
5589 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5590 break;
5591
5592 case VFP_REG_Dd:
5593 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5594 break;
5595
5596 case VFP_REG_Dn:
5597 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5598 break;
5599
5600 case VFP_REG_Dm:
5601 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5602 break;
5603
5604 default:
5605 abort ();
5606 }
5607 }
5608
5609 /* Encode a <shift> in an ARM-format instruction. The immediate,
5610 if any, is handled by md_apply_fix. */
5611 static void
5612 encode_arm_shift (int i)
5613 {
5614 if (inst.operands[i].shift_kind == SHIFT_RRX)
5615 inst.instruction |= SHIFT_ROR << 5;
5616 else
5617 {
5618 inst.instruction |= inst.operands[i].shift_kind << 5;
5619 if (inst.operands[i].immisreg)
5620 {
5621 inst.instruction |= SHIFT_BY_REG;
5622 inst.instruction |= inst.operands[i].imm << 8;
5623 }
5624 else
5625 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5626 }
5627 }
5628
5629 static void
5630 encode_arm_shifter_operand (int i)
5631 {
5632 if (inst.operands[i].isreg)
5633 {
5634 inst.instruction |= inst.operands[i].reg;
5635 encode_arm_shift (i);
5636 }
5637 else
5638 inst.instruction |= INST_IMMEDIATE;
5639 }
5640
5641 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5642 static void
5643 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5644 {
5645 assert (inst.operands[i].isreg);
5646 inst.instruction |= inst.operands[i].reg << 16;
5647
5648 if (inst.operands[i].preind)
5649 {
5650 if (is_t)
5651 {
5652 inst.error = _("instruction does not accept preindexed addressing");
5653 return;
5654 }
5655 inst.instruction |= PRE_INDEX;
5656 if (inst.operands[i].writeback)
5657 inst.instruction |= WRITE_BACK;
5658
5659 }
5660 else if (inst.operands[i].postind)
5661 {
5662 assert (inst.operands[i].writeback);
5663 if (is_t)
5664 inst.instruction |= WRITE_BACK;
5665 }
5666 else /* unindexed - only for coprocessor */
5667 {
5668 inst.error = _("instruction does not accept unindexed addressing");
5669 return;
5670 }
5671
5672 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5673 && (((inst.instruction & 0x000f0000) >> 16)
5674 == ((inst.instruction & 0x0000f000) >> 12)))
5675 as_warn ((inst.instruction & LOAD_BIT)
5676 ? _("destination register same as write-back base")
5677 : _("source register same as write-back base"));
5678 }
5679
5680 /* inst.operands[i] was set up by parse_address. Encode it into an
5681 ARM-format mode 2 load or store instruction. If is_t is true,
5682 reject forms that cannot be used with a T instruction (i.e. not
5683 post-indexed). */
5684 static void
5685 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5686 {
5687 encode_arm_addr_mode_common (i, is_t);
5688
5689 if (inst.operands[i].immisreg)
5690 {
5691 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5692 inst.instruction |= inst.operands[i].imm;
5693 if (!inst.operands[i].negative)
5694 inst.instruction |= INDEX_UP;
5695 if (inst.operands[i].shifted)
5696 {
5697 if (inst.operands[i].shift_kind == SHIFT_RRX)
5698 inst.instruction |= SHIFT_ROR << 5;
5699 else
5700 {
5701 inst.instruction |= inst.operands[i].shift_kind << 5;
5702 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5703 }
5704 }
5705 }
5706 else /* immediate offset in inst.reloc */
5707 {
5708 if (inst.reloc.type == BFD_RELOC_UNUSED)
5709 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5710 }
5711 }
5712
5713 /* inst.operands[i] was set up by parse_address. Encode it into an
5714 ARM-format mode 3 load or store instruction. Reject forms that
5715 cannot be used with such instructions. If is_t is true, reject
5716 forms that cannot be used with a T instruction (i.e. not
5717 post-indexed). */
5718 static void
5719 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5720 {
5721 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5722 {
5723 inst.error = _("instruction does not accept scaled register index");
5724 return;
5725 }
5726
5727 encode_arm_addr_mode_common (i, is_t);
5728
5729 if (inst.operands[i].immisreg)
5730 {
5731 inst.instruction |= inst.operands[i].imm;
5732 if (!inst.operands[i].negative)
5733 inst.instruction |= INDEX_UP;
5734 }
5735 else /* immediate offset in inst.reloc */
5736 {
5737 inst.instruction |= HWOFFSET_IMM;
5738 if (inst.reloc.type == BFD_RELOC_UNUSED)
5739 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5740 }
5741 }
5742
5743 /* inst.operands[i] was set up by parse_address. Encode it into an
5744 ARM-format instruction. Reject all forms which cannot be encoded
5745 into a coprocessor load/store instruction. If wb_ok is false,
5746 reject use of writeback; if unind_ok is false, reject use of
5747 unindexed addressing. If reloc_override is not 0, use it instead
5748 of BFD_ARM_CP_OFF_IMM. */
5749
5750 static int
5751 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5752 {
5753 inst.instruction |= inst.operands[i].reg << 16;
5754
5755 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5756
5757 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5758 {
5759 assert (!inst.operands[i].writeback);
5760 if (!unind_ok)
5761 {
5762 inst.error = _("instruction does not support unindexed addressing");
5763 return FAIL;
5764 }
5765 inst.instruction |= inst.operands[i].imm;
5766 inst.instruction |= INDEX_UP;
5767 return SUCCESS;
5768 }
5769
5770 if (inst.operands[i].preind)
5771 inst.instruction |= PRE_INDEX;
5772
5773 if (inst.operands[i].writeback)
5774 {
5775 if (inst.operands[i].reg == REG_PC)
5776 {
5777 inst.error = _("pc may not be used with write-back");
5778 return FAIL;
5779 }
5780 if (!wb_ok)
5781 {
5782 inst.error = _("instruction does not support writeback");
5783 return FAIL;
5784 }
5785 inst.instruction |= WRITE_BACK;
5786 }
5787
5788 if (reloc_override)
5789 inst.reloc.type = reloc_override;
5790 else if (thumb_mode)
5791 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5792 else
5793 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5794 return SUCCESS;
5795 }
5796
5797 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5798 Determine whether it can be performed with a move instruction; if
5799 it can, convert inst.instruction to that move instruction and
5800 return 1; if it can't, convert inst.instruction to a literal-pool
5801 load and return 0. If this is not a valid thing to do in the
5802 current context, set inst.error and return 1.
5803
5804 inst.operands[i] describes the destination register. */
5805
5806 static int
5807 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5808 {
5809 unsigned long tbit;
5810
5811 if (thumb_p)
5812 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5813 else
5814 tbit = LOAD_BIT;
5815
5816 if ((inst.instruction & tbit) == 0)
5817 {
5818 inst.error = _("invalid pseudo operation");
5819 return 1;
5820 }
5821 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5822 {
5823 inst.error = _("constant expression expected");
5824 return 1;
5825 }
5826 if (inst.reloc.exp.X_op == O_constant)
5827 {
5828 if (thumb_p)
5829 {
5830 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5831 {
5832 /* This can be done with a mov(1) instruction. */
5833 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5834 inst.instruction |= inst.reloc.exp.X_add_number;
5835 return 1;
5836 }
5837 }
5838 else
5839 {
5840 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5841 if (value != FAIL)
5842 {
5843 /* This can be done with a mov instruction. */
5844 inst.instruction &= LITERAL_MASK;
5845 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5846 inst.instruction |= value & 0xfff;
5847 return 1;
5848 }
5849
5850 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5851 if (value != FAIL)
5852 {
5853 /* This can be done with a mvn instruction. */
5854 inst.instruction &= LITERAL_MASK;
5855 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5856 inst.instruction |= value & 0xfff;
5857 return 1;
5858 }
5859 }
5860 }
5861
5862 if (add_to_lit_pool () == FAIL)
5863 {
5864 inst.error = _("literal pool insertion failed");
5865 return 1;
5866 }
5867 inst.operands[1].reg = REG_PC;
5868 inst.operands[1].isreg = 1;
5869 inst.operands[1].preind = 1;
5870 inst.reloc.pc_rel = 1;
5871 inst.reloc.type = (thumb_p
5872 ? BFD_RELOC_ARM_THUMB_OFFSET
5873 : (mode_3
5874 ? BFD_RELOC_ARM_HWLITERAL
5875 : BFD_RELOC_ARM_LITERAL));
5876 return 0;
5877 }
5878
5879 /* Functions for instruction encoding, sorted by subarchitecture.
5880 First some generics; their names are taken from the conventional
5881 bit positions for register arguments in ARM format instructions. */
5882
5883 static void
5884 do_noargs (void)
5885 {
5886 }
5887
5888 static void
5889 do_rd (void)
5890 {
5891 inst.instruction |= inst.operands[0].reg << 12;
5892 }
5893
5894 static void
5895 do_rd_rm (void)
5896 {
5897 inst.instruction |= inst.operands[0].reg << 12;
5898 inst.instruction |= inst.operands[1].reg;
5899 }
5900
5901 static void
5902 do_rd_rn (void)
5903 {
5904 inst.instruction |= inst.operands[0].reg << 12;
5905 inst.instruction |= inst.operands[1].reg << 16;
5906 }
5907
5908 static void
5909 do_rn_rd (void)
5910 {
5911 inst.instruction |= inst.operands[0].reg << 16;
5912 inst.instruction |= inst.operands[1].reg << 12;
5913 }
5914
5915 static void
5916 do_rd_rm_rn (void)
5917 {
5918 unsigned Rn = inst.operands[2].reg;
5919 /* Enforce restrictions on SWP instruction. */
5920 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5921 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5922 _("Rn must not overlap other operands"));
5923 inst.instruction |= inst.operands[0].reg << 12;
5924 inst.instruction |= inst.operands[1].reg;
5925 inst.instruction |= Rn << 16;
5926 }
5927
5928 static void
5929 do_rd_rn_rm (void)
5930 {
5931 inst.instruction |= inst.operands[0].reg << 12;
5932 inst.instruction |= inst.operands[1].reg << 16;
5933 inst.instruction |= inst.operands[2].reg;
5934 }
5935
5936 static void
5937 do_rm_rd_rn (void)
5938 {
5939 inst.instruction |= inst.operands[0].reg;
5940 inst.instruction |= inst.operands[1].reg << 12;
5941 inst.instruction |= inst.operands[2].reg << 16;
5942 }
5943
5944 static void
5945 do_imm0 (void)
5946 {
5947 inst.instruction |= inst.operands[0].imm;
5948 }
5949
5950 static void
5951 do_rd_cpaddr (void)
5952 {
5953 inst.instruction |= inst.operands[0].reg << 12;
5954 encode_arm_cp_address (1, TRUE, TRUE, 0);
5955 }
5956
5957 /* ARM instructions, in alphabetical order by function name (except
5958 that wrapper functions appear immediately after the function they
5959 wrap). */
5960
5961 /* This is a pseudo-op of the form "adr rd, label" to be converted
5962 into a relative address of the form "add rd, pc, #label-.-8". */
5963
5964 static void
5965 do_adr (void)
5966 {
5967 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5968
5969 /* Frag hacking will turn this into a sub instruction if the offset turns
5970 out to be negative. */
5971 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5972 inst.reloc.pc_rel = 1;
5973 inst.reloc.exp.X_add_number -= 8;
5974 }
5975
5976 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5977 into a relative address of the form:
5978 add rd, pc, #low(label-.-8)"
5979 add rd, rd, #high(label-.-8)" */
5980
5981 static void
5982 do_adrl (void)
5983 {
5984 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5985
5986 /* Frag hacking will turn this into a sub instruction if the offset turns
5987 out to be negative. */
5988 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5989 inst.reloc.pc_rel = 1;
5990 inst.size = INSN_SIZE * 2;
5991 inst.reloc.exp.X_add_number -= 8;
5992 }
5993
5994 static void
5995 do_arit (void)
5996 {
5997 if (!inst.operands[1].present)
5998 inst.operands[1].reg = inst.operands[0].reg;
5999 inst.instruction |= inst.operands[0].reg << 12;
6000 inst.instruction |= inst.operands[1].reg << 16;
6001 encode_arm_shifter_operand (2);
6002 }
6003
6004 static void
6005 do_barrier (void)
6006 {
6007 if (inst.operands[0].present)
6008 {
6009 constraint ((inst.instruction & 0xf0) != 0x40
6010 && inst.operands[0].imm != 0xf,
6011 "bad barrier type");
6012 inst.instruction |= inst.operands[0].imm;
6013 }
6014 else
6015 inst.instruction |= 0xf;
6016 }
6017
6018 static void
6019 do_bfc (void)
6020 {
6021 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6022 constraint (msb > 32, _("bit-field extends past end of register"));
6023 /* The instruction encoding stores the LSB and MSB,
6024 not the LSB and width. */
6025 inst.instruction |= inst.operands[0].reg << 12;
6026 inst.instruction |= inst.operands[1].imm << 7;
6027 inst.instruction |= (msb - 1) << 16;
6028 }
6029
6030 static void
6031 do_bfi (void)
6032 {
6033 unsigned int msb;
6034
6035 /* #0 in second position is alternative syntax for bfc, which is
6036 the same instruction but with REG_PC in the Rm field. */
6037 if (!inst.operands[1].isreg)
6038 inst.operands[1].reg = REG_PC;
6039
6040 msb = inst.operands[2].imm + inst.operands[3].imm;
6041 constraint (msb > 32, _("bit-field extends past end of register"));
6042 /* The instruction encoding stores the LSB and MSB,
6043 not the LSB and width. */
6044 inst.instruction |= inst.operands[0].reg << 12;
6045 inst.instruction |= inst.operands[1].reg;
6046 inst.instruction |= inst.operands[2].imm << 7;
6047 inst.instruction |= (msb - 1) << 16;
6048 }
6049
6050 static void
6051 do_bfx (void)
6052 {
6053 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6054 _("bit-field extends past end of register"));
6055 inst.instruction |= inst.operands[0].reg << 12;
6056 inst.instruction |= inst.operands[1].reg;
6057 inst.instruction |= inst.operands[2].imm << 7;
6058 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6059 }
6060
6061 /* ARM V5 breakpoint instruction (argument parse)
6062 BKPT <16 bit unsigned immediate>
6063 Instruction is not conditional.
6064 The bit pattern given in insns[] has the COND_ALWAYS condition,
6065 and it is an error if the caller tried to override that. */
6066
6067 static void
6068 do_bkpt (void)
6069 {
6070 /* Top 12 of 16 bits to bits 19:8. */
6071 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6072
6073 /* Bottom 4 of 16 bits to bits 3:0. */
6074 inst.instruction |= inst.operands[0].imm & 0xf;
6075 }
6076
6077 static void
6078 encode_branch (int default_reloc)
6079 {
6080 if (inst.operands[0].hasreloc)
6081 {
6082 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6083 _("the only suffix valid here is '(plt)'"));
6084 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6085 }
6086 else
6087 {
6088 inst.reloc.type = default_reloc;
6089 }
6090 inst.reloc.pc_rel = 1;
6091 }
6092
6093 static void
6094 do_branch (void)
6095 {
6096 #ifdef OBJ_ELF
6097 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6098 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6099 else
6100 #endif
6101 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6102 }
6103
6104 static void
6105 do_bl (void)
6106 {
6107 #ifdef OBJ_ELF
6108 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6109 {
6110 if (inst.cond == COND_ALWAYS)
6111 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6112 else
6113 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6114 }
6115 else
6116 #endif
6117 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6118 }
6119
6120 /* ARM V5 branch-link-exchange instruction (argument parse)
6121 BLX <target_addr> ie BLX(1)
6122 BLX{<condition>} <Rm> ie BLX(2)
6123 Unfortunately, there are two different opcodes for this mnemonic.
6124 So, the insns[].value is not used, and the code here zaps values
6125 into inst.instruction.
6126 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6127
6128 static void
6129 do_blx (void)
6130 {
6131 if (inst.operands[0].isreg)
6132 {
6133 /* Arg is a register; the opcode provided by insns[] is correct.
6134 It is not illegal to do "blx pc", just useless. */
6135 if (inst.operands[0].reg == REG_PC)
6136 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6137
6138 inst.instruction |= inst.operands[0].reg;
6139 }
6140 else
6141 {
6142 /* Arg is an address; this instruction cannot be executed
6143 conditionally, and the opcode must be adjusted. */
6144 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6145 inst.instruction = 0xfa000000;
6146 #ifdef OBJ_ELF
6147 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6148 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6149 else
6150 #endif
6151 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6152 }
6153 }
6154
6155 static void
6156 do_bx (void)
6157 {
6158 if (inst.operands[0].reg == REG_PC)
6159 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6160
6161 inst.instruction |= inst.operands[0].reg;
6162 }
6163
6164
6165 /* ARM v5TEJ. Jump to Jazelle code. */
6166
6167 static void
6168 do_bxj (void)
6169 {
6170 if (inst.operands[0].reg == REG_PC)
6171 as_tsktsk (_("use of r15 in bxj is not really useful"));
6172
6173 inst.instruction |= inst.operands[0].reg;
6174 }
6175
6176 /* Co-processor data operation:
6177 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6178 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6179 static void
6180 do_cdp (void)
6181 {
6182 inst.instruction |= inst.operands[0].reg << 8;
6183 inst.instruction |= inst.operands[1].imm << 20;
6184 inst.instruction |= inst.operands[2].reg << 12;
6185 inst.instruction |= inst.operands[3].reg << 16;
6186 inst.instruction |= inst.operands[4].reg;
6187 inst.instruction |= inst.operands[5].imm << 5;
6188 }
6189
6190 static void
6191 do_cmp (void)
6192 {
6193 inst.instruction |= inst.operands[0].reg << 16;
6194 encode_arm_shifter_operand (1);
6195 }
6196
6197 /* Transfer between coprocessor and ARM registers.
6198 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6199 MRC2
6200 MCR{cond}
6201 MCR2
6202
6203 No special properties. */
6204
6205 static void
6206 do_co_reg (void)
6207 {
6208 inst.instruction |= inst.operands[0].reg << 8;
6209 inst.instruction |= inst.operands[1].imm << 21;
6210 inst.instruction |= inst.operands[2].reg << 12;
6211 inst.instruction |= inst.operands[3].reg << 16;
6212 inst.instruction |= inst.operands[4].reg;
6213 inst.instruction |= inst.operands[5].imm << 5;
6214 }
6215
6216 /* Transfer between coprocessor register and pair of ARM registers.
6217 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6218 MCRR2
6219 MRRC{cond}
6220 MRRC2
6221
6222 Two XScale instructions are special cases of these:
6223
6224 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6225 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6226
6227 Result unpredicatable if Rd or Rn is R15. */
6228
6229 static void
6230 do_co_reg2c (void)
6231 {
6232 inst.instruction |= inst.operands[0].reg << 8;
6233 inst.instruction |= inst.operands[1].imm << 4;
6234 inst.instruction |= inst.operands[2].reg << 12;
6235 inst.instruction |= inst.operands[3].reg << 16;
6236 inst.instruction |= inst.operands[4].reg;
6237 }
6238
6239 static void
6240 do_cpsi (void)
6241 {
6242 inst.instruction |= inst.operands[0].imm << 6;
6243 inst.instruction |= inst.operands[1].imm;
6244 }
6245
6246 static void
6247 do_dbg (void)
6248 {
6249 inst.instruction |= inst.operands[0].imm;
6250 }
6251
6252 static void
6253 do_it (void)
6254 {
6255 /* There is no IT instruction in ARM mode. We
6256 process it but do not generate code for it. */
6257 inst.size = 0;
6258 }
6259
6260 static void
6261 do_ldmstm (void)
6262 {
6263 int base_reg = inst.operands[0].reg;
6264 int range = inst.operands[1].imm;
6265
6266 inst.instruction |= base_reg << 16;
6267 inst.instruction |= range;
6268
6269 if (inst.operands[1].writeback)
6270 inst.instruction |= LDM_TYPE_2_OR_3;
6271
6272 if (inst.operands[0].writeback)
6273 {
6274 inst.instruction |= WRITE_BACK;
6275 /* Check for unpredictable uses of writeback. */
6276 if (inst.instruction & LOAD_BIT)
6277 {
6278 /* Not allowed in LDM type 2. */
6279 if ((inst.instruction & LDM_TYPE_2_OR_3)
6280 && ((range & (1 << REG_PC)) == 0))
6281 as_warn (_("writeback of base register is UNPREDICTABLE"));
6282 /* Only allowed if base reg not in list for other types. */
6283 else if (range & (1 << base_reg))
6284 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6285 }
6286 else /* STM. */
6287 {
6288 /* Not allowed for type 2. */
6289 if (inst.instruction & LDM_TYPE_2_OR_3)
6290 as_warn (_("writeback of base register is UNPREDICTABLE"));
6291 /* Only allowed if base reg not in list, or first in list. */
6292 else if ((range & (1 << base_reg))
6293 && (range & ((1 << base_reg) - 1)))
6294 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6295 }
6296 }
6297 }
6298
6299 /* ARMv5TE load-consecutive (argument parse)
6300 Mode is like LDRH.
6301
6302 LDRccD R, mode
6303 STRccD R, mode. */
6304
6305 static void
6306 do_ldrd (void)
6307 {
6308 constraint (inst.operands[0].reg % 2 != 0,
6309 _("first destination register must be even"));
6310 constraint (inst.operands[1].present
6311 && inst.operands[1].reg != inst.operands[0].reg + 1,
6312 _("can only load two consecutive registers"));
6313 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6314 constraint (!inst.operands[2].isreg, _("'[' expected"));
6315
6316 if (!inst.operands[1].present)
6317 inst.operands[1].reg = inst.operands[0].reg + 1;
6318
6319 if (inst.instruction & LOAD_BIT)
6320 {
6321 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6322 register and the first register written; we have to diagnose
6323 overlap between the base and the second register written here. */
6324
6325 if (inst.operands[2].reg == inst.operands[1].reg
6326 && (inst.operands[2].writeback || inst.operands[2].postind))
6327 as_warn (_("base register written back, and overlaps "
6328 "second destination register"));
6329
6330 /* For an index-register load, the index register must not overlap the
6331 destination (even if not write-back). */
6332 else if (inst.operands[2].immisreg
6333 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6334 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6335 as_warn (_("index register overlaps destination register"));
6336 }
6337
6338 inst.instruction |= inst.operands[0].reg << 12;
6339 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6340 }
6341
6342 static void
6343 do_ldrex (void)
6344 {
6345 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6346 || inst.operands[1].postind || inst.operands[1].writeback
6347 || inst.operands[1].immisreg || inst.operands[1].shifted
6348 || inst.operands[1].negative
6349 /* This can arise if the programmer has written
6350 strex rN, rM, foo
6351 or if they have mistakenly used a register name as the last
6352 operand, eg:
6353 strex rN, rM, rX
6354 It is very difficult to distinguish between these two cases
6355 because "rX" might actually be a label. ie the register
6356 name has been occluded by a symbol of the same name. So we
6357 just generate a general 'bad addressing mode' type error
6358 message and leave it up to the programmer to discover the
6359 true cause and fix their mistake. */
6360 || (inst.operands[1].reg == REG_PC),
6361 BAD_ADDR_MODE);
6362
6363 constraint (inst.reloc.exp.X_op != O_constant
6364 || inst.reloc.exp.X_add_number != 0,
6365 _("offset must be zero in ARM encoding"));
6366
6367 inst.instruction |= inst.operands[0].reg << 12;
6368 inst.instruction |= inst.operands[1].reg << 16;
6369 inst.reloc.type = BFD_RELOC_UNUSED;
6370 }
6371
6372 static void
6373 do_ldrexd (void)
6374 {
6375 constraint (inst.operands[0].reg % 2 != 0,
6376 _("even register required"));
6377 constraint (inst.operands[1].present
6378 && inst.operands[1].reg != inst.operands[0].reg + 1,
6379 _("can only load two consecutive registers"));
6380 /* If op 1 were present and equal to PC, this function wouldn't
6381 have been called in the first place. */
6382 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6383
6384 inst.instruction |= inst.operands[0].reg << 12;
6385 inst.instruction |= inst.operands[2].reg << 16;
6386 }
6387
6388 static void
6389 do_ldst (void)
6390 {
6391 inst.instruction |= inst.operands[0].reg << 12;
6392 if (!inst.operands[1].isreg)
6393 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6394 return;
6395 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6396 }
6397
6398 static void
6399 do_ldstt (void)
6400 {
6401 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6402 reject [Rn,...]. */
6403 if (inst.operands[1].preind)
6404 {
6405 constraint (inst.reloc.exp.X_op != O_constant ||
6406 inst.reloc.exp.X_add_number != 0,
6407 _("this instruction requires a post-indexed address"));
6408
6409 inst.operands[1].preind = 0;
6410 inst.operands[1].postind = 1;
6411 inst.operands[1].writeback = 1;
6412 }
6413 inst.instruction |= inst.operands[0].reg << 12;
6414 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6415 }
6416
6417 /* Halfword and signed-byte load/store operations. */
6418
6419 static void
6420 do_ldstv4 (void)
6421 {
6422 inst.instruction |= inst.operands[0].reg << 12;
6423 if (!inst.operands[1].isreg)
6424 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6425 return;
6426 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6427 }
6428
6429 static void
6430 do_ldsttv4 (void)
6431 {
6432 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6433 reject [Rn,...]. */
6434 if (inst.operands[1].preind)
6435 {
6436 constraint (inst.reloc.exp.X_op != O_constant ||
6437 inst.reloc.exp.X_add_number != 0,
6438 _("this instruction requires a post-indexed address"));
6439
6440 inst.operands[1].preind = 0;
6441 inst.operands[1].postind = 1;
6442 inst.operands[1].writeback = 1;
6443 }
6444 inst.instruction |= inst.operands[0].reg << 12;
6445 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6446 }
6447
6448 /* Co-processor register load/store.
6449 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6450 static void
6451 do_lstc (void)
6452 {
6453 inst.instruction |= inst.operands[0].reg << 8;
6454 inst.instruction |= inst.operands[1].reg << 12;
6455 encode_arm_cp_address (2, TRUE, TRUE, 0);
6456 }
6457
6458 static void
6459 do_mlas (void)
6460 {
6461 /* This restriction does not apply to mls (nor to mla in v6, but
6462 that's hard to detect at present). */
6463 if (inst.operands[0].reg == inst.operands[1].reg
6464 && !(inst.instruction & 0x00400000))
6465 as_tsktsk (_("rd and rm should be different in mla"));
6466
6467 inst.instruction |= inst.operands[0].reg << 16;
6468 inst.instruction |= inst.operands[1].reg;
6469 inst.instruction |= inst.operands[2].reg << 8;
6470 inst.instruction |= inst.operands[3].reg << 12;
6471
6472 }
6473
6474 static void
6475 do_mov (void)
6476 {
6477 inst.instruction |= inst.operands[0].reg << 12;
6478 encode_arm_shifter_operand (1);
6479 }
6480
6481 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6482 static void
6483 do_mov16 (void)
6484 {
6485 bfd_vma imm;
6486 bfd_boolean top;
6487
6488 top = (inst.instruction & 0x00400000) != 0;
6489 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
6490 _(":lower16: not allowed this instruction"));
6491 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
6492 _(":upper16: not allowed instruction"));
6493 inst.instruction |= inst.operands[0].reg << 12;
6494 if (inst.reloc.type == BFD_RELOC_UNUSED)
6495 {
6496 imm = inst.reloc.exp.X_add_number;
6497 /* The value is in two pieces: 0:11, 16:19. */
6498 inst.instruction |= (imm & 0x00000fff);
6499 inst.instruction |= (imm & 0x0000f000) << 4;
6500 }
6501 }
6502
6503 static void
6504 do_mrs (void)
6505 {
6506 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6507 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6508 != (PSR_c|PSR_f),
6509 _("'CPSR' or 'SPSR' expected"));
6510 inst.instruction |= inst.operands[0].reg << 12;
6511 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6512 }
6513
6514 /* Two possible forms:
6515 "{C|S}PSR_<field>, Rm",
6516 "{C|S}PSR_f, #expression". */
6517
6518 static void
6519 do_msr (void)
6520 {
6521 inst.instruction |= inst.operands[0].imm;
6522 if (inst.operands[1].isreg)
6523 inst.instruction |= inst.operands[1].reg;
6524 else
6525 {
6526 inst.instruction |= INST_IMMEDIATE;
6527 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6528 inst.reloc.pc_rel = 0;
6529 }
6530 }
6531
6532 static void
6533 do_mul (void)
6534 {
6535 if (!inst.operands[2].present)
6536 inst.operands[2].reg = inst.operands[0].reg;
6537 inst.instruction |= inst.operands[0].reg << 16;
6538 inst.instruction |= inst.operands[1].reg;
6539 inst.instruction |= inst.operands[2].reg << 8;
6540
6541 if (inst.operands[0].reg == inst.operands[1].reg)
6542 as_tsktsk (_("rd and rm should be different in mul"));
6543 }
6544
6545 /* Long Multiply Parser
6546 UMULL RdLo, RdHi, Rm, Rs
6547 SMULL RdLo, RdHi, Rm, Rs
6548 UMLAL RdLo, RdHi, Rm, Rs
6549 SMLAL RdLo, RdHi, Rm, Rs. */
6550
6551 static void
6552 do_mull (void)
6553 {
6554 inst.instruction |= inst.operands[0].reg << 12;
6555 inst.instruction |= inst.operands[1].reg << 16;
6556 inst.instruction |= inst.operands[2].reg;
6557 inst.instruction |= inst.operands[3].reg << 8;
6558
6559 /* rdhi, rdlo and rm must all be different. */
6560 if (inst.operands[0].reg == inst.operands[1].reg
6561 || inst.operands[0].reg == inst.operands[2].reg
6562 || inst.operands[1].reg == inst.operands[2].reg)
6563 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6564 }
6565
6566 static void
6567 do_nop (void)
6568 {
6569 if (inst.operands[0].present)
6570 {
6571 /* Architectural NOP hints are CPSR sets with no bits selected. */
6572 inst.instruction &= 0xf0000000;
6573 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6574 }
6575 }
6576
6577 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6578 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6579 Condition defaults to COND_ALWAYS.
6580 Error if Rd, Rn or Rm are R15. */
6581
6582 static void
6583 do_pkhbt (void)
6584 {
6585 inst.instruction |= inst.operands[0].reg << 12;
6586 inst.instruction |= inst.operands[1].reg << 16;
6587 inst.instruction |= inst.operands[2].reg;
6588 if (inst.operands[3].present)
6589 encode_arm_shift (3);
6590 }
6591
6592 /* ARM V6 PKHTB (Argument Parse). */
6593
6594 static void
6595 do_pkhtb (void)
6596 {
6597 if (!inst.operands[3].present)
6598 {
6599 /* If the shift specifier is omitted, turn the instruction
6600 into pkhbt rd, rm, rn. */
6601 inst.instruction &= 0xfff00010;
6602 inst.instruction |= inst.operands[0].reg << 12;
6603 inst.instruction |= inst.operands[1].reg;
6604 inst.instruction |= inst.operands[2].reg << 16;
6605 }
6606 else
6607 {
6608 inst.instruction |= inst.operands[0].reg << 12;
6609 inst.instruction |= inst.operands[1].reg << 16;
6610 inst.instruction |= inst.operands[2].reg;
6611 encode_arm_shift (3);
6612 }
6613 }
6614
6615 /* ARMv5TE: Preload-Cache
6616
6617 PLD <addr_mode>
6618
6619 Syntactically, like LDR with B=1, W=0, L=1. */
6620
6621 static void
6622 do_pld (void)
6623 {
6624 constraint (!inst.operands[0].isreg,
6625 _("'[' expected after PLD mnemonic"));
6626 constraint (inst.operands[0].postind,
6627 _("post-indexed expression used in preload instruction"));
6628 constraint (inst.operands[0].writeback,
6629 _("writeback used in preload instruction"));
6630 constraint (!inst.operands[0].preind,
6631 _("unindexed addressing used in preload instruction"));
6632 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6633 }
6634
6635 /* ARMv7: PLI <addr_mode> */
6636 static void
6637 do_pli (void)
6638 {
6639 constraint (!inst.operands[0].isreg,
6640 _("'[' expected after PLI mnemonic"));
6641 constraint (inst.operands[0].postind,
6642 _("post-indexed expression used in preload instruction"));
6643 constraint (inst.operands[0].writeback,
6644 _("writeback used in preload instruction"));
6645 constraint (!inst.operands[0].preind,
6646 _("unindexed addressing used in preload instruction"));
6647 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6648 inst.instruction &= ~PRE_INDEX;
6649 }
6650
6651 static void
6652 do_push_pop (void)
6653 {
6654 inst.operands[1] = inst.operands[0];
6655 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6656 inst.operands[0].isreg = 1;
6657 inst.operands[0].writeback = 1;
6658 inst.operands[0].reg = REG_SP;
6659 do_ldmstm ();
6660 }
6661
6662 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6663 word at the specified address and the following word
6664 respectively.
6665 Unconditionally executed.
6666 Error if Rn is R15. */
6667
6668 static void
6669 do_rfe (void)
6670 {
6671 inst.instruction |= inst.operands[0].reg << 16;
6672 if (inst.operands[0].writeback)
6673 inst.instruction |= WRITE_BACK;
6674 }
6675
6676 /* ARM V6 ssat (argument parse). */
6677
6678 static void
6679 do_ssat (void)
6680 {
6681 inst.instruction |= inst.operands[0].reg << 12;
6682 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6683 inst.instruction |= inst.operands[2].reg;
6684
6685 if (inst.operands[3].present)
6686 encode_arm_shift (3);
6687 }
6688
6689 /* ARM V6 usat (argument parse). */
6690
6691 static void
6692 do_usat (void)
6693 {
6694 inst.instruction |= inst.operands[0].reg << 12;
6695 inst.instruction |= inst.operands[1].imm << 16;
6696 inst.instruction |= inst.operands[2].reg;
6697
6698 if (inst.operands[3].present)
6699 encode_arm_shift (3);
6700 }
6701
6702 /* ARM V6 ssat16 (argument parse). */
6703
6704 static void
6705 do_ssat16 (void)
6706 {
6707 inst.instruction |= inst.operands[0].reg << 12;
6708 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6709 inst.instruction |= inst.operands[2].reg;
6710 }
6711
6712 static void
6713 do_usat16 (void)
6714 {
6715 inst.instruction |= inst.operands[0].reg << 12;
6716 inst.instruction |= inst.operands[1].imm << 16;
6717 inst.instruction |= inst.operands[2].reg;
6718 }
6719
6720 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6721 preserving the other bits.
6722
6723 setend <endian_specifier>, where <endian_specifier> is either
6724 BE or LE. */
6725
6726 static void
6727 do_setend (void)
6728 {
6729 if (inst.operands[0].imm)
6730 inst.instruction |= 0x200;
6731 }
6732
6733 static void
6734 do_shift (void)
6735 {
6736 unsigned int Rm = (inst.operands[1].present
6737 ? inst.operands[1].reg
6738 : inst.operands[0].reg);
6739
6740 inst.instruction |= inst.operands[0].reg << 12;
6741 inst.instruction |= Rm;
6742 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6743 {
6744 inst.instruction |= inst.operands[2].reg << 8;
6745 inst.instruction |= SHIFT_BY_REG;
6746 }
6747 else
6748 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6749 }
6750
6751 static void
6752 do_smc (void)
6753 {
6754 inst.reloc.type = BFD_RELOC_ARM_SMC;
6755 inst.reloc.pc_rel = 0;
6756 }
6757
6758 static void
6759 do_swi (void)
6760 {
6761 inst.reloc.type = BFD_RELOC_ARM_SWI;
6762 inst.reloc.pc_rel = 0;
6763 }
6764
6765 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6766 SMLAxy{cond} Rd,Rm,Rs,Rn
6767 SMLAWy{cond} Rd,Rm,Rs,Rn
6768 Error if any register is R15. */
6769
6770 static void
6771 do_smla (void)
6772 {
6773 inst.instruction |= inst.operands[0].reg << 16;
6774 inst.instruction |= inst.operands[1].reg;
6775 inst.instruction |= inst.operands[2].reg << 8;
6776 inst.instruction |= inst.operands[3].reg << 12;
6777 }
6778
6779 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6780 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6781 Error if any register is R15.
6782 Warning if Rdlo == Rdhi. */
6783
6784 static void
6785 do_smlal (void)
6786 {
6787 inst.instruction |= inst.operands[0].reg << 12;
6788 inst.instruction |= inst.operands[1].reg << 16;
6789 inst.instruction |= inst.operands[2].reg;
6790 inst.instruction |= inst.operands[3].reg << 8;
6791
6792 if (inst.operands[0].reg == inst.operands[1].reg)
6793 as_tsktsk (_("rdhi and rdlo must be different"));
6794 }
6795
6796 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6797 SMULxy{cond} Rd,Rm,Rs
6798 Error if any register is R15. */
6799
6800 static void
6801 do_smul (void)
6802 {
6803 inst.instruction |= inst.operands[0].reg << 16;
6804 inst.instruction |= inst.operands[1].reg;
6805 inst.instruction |= inst.operands[2].reg << 8;
6806 }
6807
6808 /* ARM V6 srs (argument parse). */
6809
6810 static void
6811 do_srs (void)
6812 {
6813 inst.instruction |= inst.operands[0].imm;
6814 if (inst.operands[0].writeback)
6815 inst.instruction |= WRITE_BACK;
6816 }
6817
6818 /* ARM V6 strex (argument parse). */
6819
6820 static void
6821 do_strex (void)
6822 {
6823 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6824 || inst.operands[2].postind || inst.operands[2].writeback
6825 || inst.operands[2].immisreg || inst.operands[2].shifted
6826 || inst.operands[2].negative
6827 /* See comment in do_ldrex(). */
6828 || (inst.operands[2].reg == REG_PC),
6829 BAD_ADDR_MODE);
6830
6831 constraint (inst.operands[0].reg == inst.operands[1].reg
6832 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6833
6834 constraint (inst.reloc.exp.X_op != O_constant
6835 || inst.reloc.exp.X_add_number != 0,
6836 _("offset must be zero in ARM encoding"));
6837
6838 inst.instruction |= inst.operands[0].reg << 12;
6839 inst.instruction |= inst.operands[1].reg;
6840 inst.instruction |= inst.operands[2].reg << 16;
6841 inst.reloc.type = BFD_RELOC_UNUSED;
6842 }
6843
6844 static void
6845 do_strexd (void)
6846 {
6847 constraint (inst.operands[1].reg % 2 != 0,
6848 _("even register required"));
6849 constraint (inst.operands[2].present
6850 && inst.operands[2].reg != inst.operands[1].reg + 1,
6851 _("can only store two consecutive registers"));
6852 /* If op 2 were present and equal to PC, this function wouldn't
6853 have been called in the first place. */
6854 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6855
6856 constraint (inst.operands[0].reg == inst.operands[1].reg
6857 || inst.operands[0].reg == inst.operands[1].reg + 1
6858 || inst.operands[0].reg == inst.operands[3].reg,
6859 BAD_OVERLAP);
6860
6861 inst.instruction |= inst.operands[0].reg << 12;
6862 inst.instruction |= inst.operands[1].reg;
6863 inst.instruction |= inst.operands[3].reg << 16;
6864 }
6865
6866 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6867 extends it to 32-bits, and adds the result to a value in another
6868 register. You can specify a rotation by 0, 8, 16, or 24 bits
6869 before extracting the 16-bit value.
6870 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6871 Condition defaults to COND_ALWAYS.
6872 Error if any register uses R15. */
6873
6874 static void
6875 do_sxtah (void)
6876 {
6877 inst.instruction |= inst.operands[0].reg << 12;
6878 inst.instruction |= inst.operands[1].reg << 16;
6879 inst.instruction |= inst.operands[2].reg;
6880 inst.instruction |= inst.operands[3].imm << 10;
6881 }
6882
6883 /* ARM V6 SXTH.
6884
6885 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6886 Condition defaults to COND_ALWAYS.
6887 Error if any register uses R15. */
6888
6889 static void
6890 do_sxth (void)
6891 {
6892 inst.instruction |= inst.operands[0].reg << 12;
6893 inst.instruction |= inst.operands[1].reg;
6894 inst.instruction |= inst.operands[2].imm << 10;
6895 }
6896 \f
6897 /* VFP instructions. In a logical order: SP variant first, monad
6898 before dyad, arithmetic then move then load/store. */
6899
6900 static void
6901 do_vfp_sp_monadic (void)
6902 {
6903 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6904 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6905 }
6906
6907 static void
6908 do_vfp_sp_dyadic (void)
6909 {
6910 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6911 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6912 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6913 }
6914
6915 static void
6916 do_vfp_sp_compare_z (void)
6917 {
6918 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6919 }
6920
6921 static void
6922 do_vfp_dp_sp_cvt (void)
6923 {
6924 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6925 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6926 }
6927
6928 static void
6929 do_vfp_sp_dp_cvt (void)
6930 {
6931 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6932 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6933 }
6934
6935 static void
6936 do_vfp_reg_from_sp (void)
6937 {
6938 inst.instruction |= inst.operands[0].reg << 12;
6939 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6940 }
6941
6942 static void
6943 do_vfp_reg2_from_sp2 (void)
6944 {
6945 constraint (inst.operands[2].imm != 2,
6946 _("only two consecutive VFP SP registers allowed here"));
6947 inst.instruction |= inst.operands[0].reg << 12;
6948 inst.instruction |= inst.operands[1].reg << 16;
6949 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6950 }
6951
6952 static void
6953 do_vfp_sp_from_reg (void)
6954 {
6955 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6956 inst.instruction |= inst.operands[1].reg << 12;
6957 }
6958
6959 static void
6960 do_vfp_sp2_from_reg2 (void)
6961 {
6962 constraint (inst.operands[0].imm != 2,
6963 _("only two consecutive VFP SP registers allowed here"));
6964 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6965 inst.instruction |= inst.operands[1].reg << 12;
6966 inst.instruction |= inst.operands[2].reg << 16;
6967 }
6968
6969 static void
6970 do_vfp_sp_ldst (void)
6971 {
6972 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6973 encode_arm_cp_address (1, FALSE, TRUE, 0);
6974 }
6975
6976 static void
6977 do_vfp_dp_ldst (void)
6978 {
6979 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6980 encode_arm_cp_address (1, FALSE, TRUE, 0);
6981 }
6982
6983
6984 static void
6985 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6986 {
6987 if (inst.operands[0].writeback)
6988 inst.instruction |= WRITE_BACK;
6989 else
6990 constraint (ldstm_type != VFP_LDSTMIA,
6991 _("this addressing mode requires base-register writeback"));
6992 inst.instruction |= inst.operands[0].reg << 16;
6993 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6994 inst.instruction |= inst.operands[1].imm;
6995 }
6996
6997 static void
6998 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6999 {
7000 int count;
7001
7002 if (inst.operands[0].writeback)
7003 inst.instruction |= WRITE_BACK;
7004 else
7005 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7006 _("this addressing mode requires base-register writeback"));
7007
7008 inst.instruction |= inst.operands[0].reg << 16;
7009 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7010
7011 count = inst.operands[1].imm << 1;
7012 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7013 count += 1;
7014
7015 inst.instruction |= count;
7016 }
7017
7018 static void
7019 do_vfp_sp_ldstmia (void)
7020 {
7021 vfp_sp_ldstm (VFP_LDSTMIA);
7022 }
7023
7024 static void
7025 do_vfp_sp_ldstmdb (void)
7026 {
7027 vfp_sp_ldstm (VFP_LDSTMDB);
7028 }
7029
7030 static void
7031 do_vfp_dp_ldstmia (void)
7032 {
7033 vfp_dp_ldstm (VFP_LDSTMIA);
7034 }
7035
7036 static void
7037 do_vfp_dp_ldstmdb (void)
7038 {
7039 vfp_dp_ldstm (VFP_LDSTMDB);
7040 }
7041
7042 static void
7043 do_vfp_xp_ldstmia (void)
7044 {
7045 vfp_dp_ldstm (VFP_LDSTMIAX);
7046 }
7047
7048 static void
7049 do_vfp_xp_ldstmdb (void)
7050 {
7051 vfp_dp_ldstm (VFP_LDSTMDBX);
7052 }
7053
7054 static void
7055 do_vfp_dp_rd_rm (void)
7056 {
7057 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7058 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7059 }
7060
7061 static void
7062 do_vfp_dp_rn_rd (void)
7063 {
7064 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7065 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7066 }
7067
7068 static void
7069 do_vfp_dp_rd_rn (void)
7070 {
7071 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7072 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7073 }
7074
7075 static void
7076 do_vfp_dp_rd_rn_rm (void)
7077 {
7078 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7079 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7080 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7081 }
7082
7083 static void
7084 do_vfp_dp_rd (void)
7085 {
7086 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7087 }
7088
7089 static void
7090 do_vfp_dp_rm_rd_rn (void)
7091 {
7092 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7093 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7094 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7095 }
7096
7097 /* VFPv3 instructions. */
7098 static void
7099 do_vfp_sp_const (void)
7100 {
7101 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7102 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7103 inst.instruction |= (inst.operands[1].imm >> 4);
7104 }
7105
7106 static void
7107 do_vfp_dp_const (void)
7108 {
7109 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7110 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7111 inst.instruction |= (inst.operands[1].imm >> 4);
7112 }
7113
7114 static void
7115 vfp_conv (int srcsize)
7116 {
7117 unsigned immbits = srcsize - inst.operands[1].imm;
7118 inst.instruction |= (immbits & 1) << 5;
7119 inst.instruction |= (immbits >> 1);
7120 }
7121
7122 static void
7123 do_vfp_sp_conv_16 (void)
7124 {
7125 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7126 vfp_conv (16);
7127 }
7128
7129 static void
7130 do_vfp_dp_conv_16 (void)
7131 {
7132 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7133 vfp_conv (16);
7134 }
7135
7136 static void
7137 do_vfp_sp_conv_32 (void)
7138 {
7139 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7140 vfp_conv (32);
7141 }
7142
7143 static void
7144 do_vfp_dp_conv_32 (void)
7145 {
7146 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7147 vfp_conv (32);
7148 }
7149
7150 \f
7151 /* FPA instructions. Also in a logical order. */
7152
7153 static void
7154 do_fpa_cmp (void)
7155 {
7156 inst.instruction |= inst.operands[0].reg << 16;
7157 inst.instruction |= inst.operands[1].reg;
7158 }
7159
7160 static void
7161 do_fpa_ldmstm (void)
7162 {
7163 inst.instruction |= inst.operands[0].reg << 12;
7164 switch (inst.operands[1].imm)
7165 {
7166 case 1: inst.instruction |= CP_T_X; break;
7167 case 2: inst.instruction |= CP_T_Y; break;
7168 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7169 case 4: break;
7170 default: abort ();
7171 }
7172
7173 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7174 {
7175 /* The instruction specified "ea" or "fd", so we can only accept
7176 [Rn]{!}. The instruction does not really support stacking or
7177 unstacking, so we have to emulate these by setting appropriate
7178 bits and offsets. */
7179 constraint (inst.reloc.exp.X_op != O_constant
7180 || inst.reloc.exp.X_add_number != 0,
7181 _("this instruction does not support indexing"));
7182
7183 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7184 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7185
7186 if (!(inst.instruction & INDEX_UP))
7187 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7188
7189 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7190 {
7191 inst.operands[2].preind = 0;
7192 inst.operands[2].postind = 1;
7193 }
7194 }
7195
7196 encode_arm_cp_address (2, TRUE, TRUE, 0);
7197 }
7198 \f
7199 /* iWMMXt instructions: strictly in alphabetical order. */
7200
7201 static void
7202 do_iwmmxt_tandorc (void)
7203 {
7204 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7205 }
7206
7207 static void
7208 do_iwmmxt_textrc (void)
7209 {
7210 inst.instruction |= inst.operands[0].reg << 12;
7211 inst.instruction |= inst.operands[1].imm;
7212 }
7213
7214 static void
7215 do_iwmmxt_textrm (void)
7216 {
7217 inst.instruction |= inst.operands[0].reg << 12;
7218 inst.instruction |= inst.operands[1].reg << 16;
7219 inst.instruction |= inst.operands[2].imm;
7220 }
7221
7222 static void
7223 do_iwmmxt_tinsr (void)
7224 {
7225 inst.instruction |= inst.operands[0].reg << 16;
7226 inst.instruction |= inst.operands[1].reg << 12;
7227 inst.instruction |= inst.operands[2].imm;
7228 }
7229
7230 static void
7231 do_iwmmxt_tmia (void)
7232 {
7233 inst.instruction |= inst.operands[0].reg << 5;
7234 inst.instruction |= inst.operands[1].reg;
7235 inst.instruction |= inst.operands[2].reg << 12;
7236 }
7237
7238 static void
7239 do_iwmmxt_waligni (void)
7240 {
7241 inst.instruction |= inst.operands[0].reg << 12;
7242 inst.instruction |= inst.operands[1].reg << 16;
7243 inst.instruction |= inst.operands[2].reg;
7244 inst.instruction |= inst.operands[3].imm << 20;
7245 }
7246
7247 static void
7248 do_iwmmxt_wmov (void)
7249 {
7250 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7251 inst.instruction |= inst.operands[0].reg << 12;
7252 inst.instruction |= inst.operands[1].reg << 16;
7253 inst.instruction |= inst.operands[1].reg;
7254 }
7255
7256 static void
7257 do_iwmmxt_wldstbh (void)
7258 {
7259 int reloc;
7260 inst.instruction |= inst.operands[0].reg << 12;
7261 if (thumb_mode)
7262 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7263 else
7264 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7265 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7266 }
7267
7268 static void
7269 do_iwmmxt_wldstw (void)
7270 {
7271 /* RIWR_RIWC clears .isreg for a control register. */
7272 if (!inst.operands[0].isreg)
7273 {
7274 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7275 inst.instruction |= 0xf0000000;
7276 }
7277
7278 inst.instruction |= inst.operands[0].reg << 12;
7279 encode_arm_cp_address (1, TRUE, TRUE, 0);
7280 }
7281
7282 static void
7283 do_iwmmxt_wldstd (void)
7284 {
7285 inst.instruction |= inst.operands[0].reg << 12;
7286 encode_arm_cp_address (1, TRUE, FALSE, 0);
7287 }
7288
7289 static void
7290 do_iwmmxt_wshufh (void)
7291 {
7292 inst.instruction |= inst.operands[0].reg << 12;
7293 inst.instruction |= inst.operands[1].reg << 16;
7294 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7295 inst.instruction |= (inst.operands[2].imm & 0x0f);
7296 }
7297
7298 static void
7299 do_iwmmxt_wzero (void)
7300 {
7301 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7302 inst.instruction |= inst.operands[0].reg;
7303 inst.instruction |= inst.operands[0].reg << 12;
7304 inst.instruction |= inst.operands[0].reg << 16;
7305 }
7306 \f
7307 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7308 operations first, then control, shift, and load/store. */
7309
7310 /* Insns like "foo X,Y,Z". */
7311
7312 static void
7313 do_mav_triple (void)
7314 {
7315 inst.instruction |= inst.operands[0].reg << 16;
7316 inst.instruction |= inst.operands[1].reg;
7317 inst.instruction |= inst.operands[2].reg << 12;
7318 }
7319
7320 /* Insns like "foo W,X,Y,Z".
7321 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7322
7323 static void
7324 do_mav_quad (void)
7325 {
7326 inst.instruction |= inst.operands[0].reg << 5;
7327 inst.instruction |= inst.operands[1].reg << 12;
7328 inst.instruction |= inst.operands[2].reg << 16;
7329 inst.instruction |= inst.operands[3].reg;
7330 }
7331
7332 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7333 static void
7334 do_mav_dspsc (void)
7335 {
7336 inst.instruction |= inst.operands[1].reg << 12;
7337 }
7338
7339 /* Maverick shift immediate instructions.
7340 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7341 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7342
7343 static void
7344 do_mav_shift (void)
7345 {
7346 int imm = inst.operands[2].imm;
7347
7348 inst.instruction |= inst.operands[0].reg << 12;
7349 inst.instruction |= inst.operands[1].reg << 16;
7350
7351 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7352 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7353 Bit 4 should be 0. */
7354 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7355
7356 inst.instruction |= imm;
7357 }
7358 \f
7359 /* XScale instructions. Also sorted arithmetic before move. */
7360
7361 /* Xscale multiply-accumulate (argument parse)
7362 MIAcc acc0,Rm,Rs
7363 MIAPHcc acc0,Rm,Rs
7364 MIAxycc acc0,Rm,Rs. */
7365
7366 static void
7367 do_xsc_mia (void)
7368 {
7369 inst.instruction |= inst.operands[1].reg;
7370 inst.instruction |= inst.operands[2].reg << 12;
7371 }
7372
7373 /* Xscale move-accumulator-register (argument parse)
7374
7375 MARcc acc0,RdLo,RdHi. */
7376
7377 static void
7378 do_xsc_mar (void)
7379 {
7380 inst.instruction |= inst.operands[1].reg << 12;
7381 inst.instruction |= inst.operands[2].reg << 16;
7382 }
7383
7384 /* Xscale move-register-accumulator (argument parse)
7385
7386 MRAcc RdLo,RdHi,acc0. */
7387
7388 static void
7389 do_xsc_mra (void)
7390 {
7391 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7392 inst.instruction |= inst.operands[0].reg << 12;
7393 inst.instruction |= inst.operands[1].reg << 16;
7394 }
7395 \f
7396 /* Encoding functions relevant only to Thumb. */
7397
7398 /* inst.operands[i] is a shifted-register operand; encode
7399 it into inst.instruction in the format used by Thumb32. */
7400
7401 static void
7402 encode_thumb32_shifted_operand (int i)
7403 {
7404 unsigned int value = inst.reloc.exp.X_add_number;
7405 unsigned int shift = inst.operands[i].shift_kind;
7406
7407 constraint (inst.operands[i].immisreg,
7408 _("shift by register not allowed in thumb mode"));
7409 inst.instruction |= inst.operands[i].reg;
7410 if (shift == SHIFT_RRX)
7411 inst.instruction |= SHIFT_ROR << 4;
7412 else
7413 {
7414 constraint (inst.reloc.exp.X_op != O_constant,
7415 _("expression too complex"));
7416
7417 constraint (value > 32
7418 || (value == 32 && (shift == SHIFT_LSL
7419 || shift == SHIFT_ROR)),
7420 _("shift expression is too large"));
7421
7422 if (value == 0)
7423 shift = SHIFT_LSL;
7424 else if (value == 32)
7425 value = 0;
7426
7427 inst.instruction |= shift << 4;
7428 inst.instruction |= (value & 0x1c) << 10;
7429 inst.instruction |= (value & 0x03) << 6;
7430 }
7431 }
7432
7433
7434 /* inst.operands[i] was set up by parse_address. Encode it into a
7435 Thumb32 format load or store instruction. Reject forms that cannot
7436 be used with such instructions. If is_t is true, reject forms that
7437 cannot be used with a T instruction; if is_d is true, reject forms
7438 that cannot be used with a D instruction. */
7439
7440 static void
7441 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7442 {
7443 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7444
7445 constraint (!inst.operands[i].isreg,
7446 _("Instruction does not support =N addresses"));
7447
7448 inst.instruction |= inst.operands[i].reg << 16;
7449 if (inst.operands[i].immisreg)
7450 {
7451 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7452 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7453 constraint (inst.operands[i].negative,
7454 _("Thumb does not support negative register indexing"));
7455 constraint (inst.operands[i].postind,
7456 _("Thumb does not support register post-indexing"));
7457 constraint (inst.operands[i].writeback,
7458 _("Thumb does not support register indexing with writeback"));
7459 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7460 _("Thumb supports only LSL in shifted register indexing"));
7461
7462 inst.instruction |= inst.operands[i].imm;
7463 if (inst.operands[i].shifted)
7464 {
7465 constraint (inst.reloc.exp.X_op != O_constant,
7466 _("expression too complex"));
7467 constraint (inst.reloc.exp.X_add_number < 0
7468 || inst.reloc.exp.X_add_number > 3,
7469 _("shift out of range"));
7470 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7471 }
7472 inst.reloc.type = BFD_RELOC_UNUSED;
7473 }
7474 else if (inst.operands[i].preind)
7475 {
7476 constraint (is_pc && inst.operands[i].writeback,
7477 _("cannot use writeback with PC-relative addressing"));
7478 constraint (is_t && inst.operands[i].writeback,
7479 _("cannot use writeback with this instruction"));
7480
7481 if (is_d)
7482 {
7483 inst.instruction |= 0x01000000;
7484 if (inst.operands[i].writeback)
7485 inst.instruction |= 0x00200000;
7486 }
7487 else
7488 {
7489 inst.instruction |= 0x00000c00;
7490 if (inst.operands[i].writeback)
7491 inst.instruction |= 0x00000100;
7492 }
7493 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7494 }
7495 else if (inst.operands[i].postind)
7496 {
7497 assert (inst.operands[i].writeback);
7498 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7499 constraint (is_t, _("cannot use post-indexing with this instruction"));
7500
7501 if (is_d)
7502 inst.instruction |= 0x00200000;
7503 else
7504 inst.instruction |= 0x00000900;
7505 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7506 }
7507 else /* unindexed - only for coprocessor */
7508 inst.error = _("instruction does not accept unindexed addressing");
7509 }
7510
7511 /* Table of Thumb instructions which exist in both 16- and 32-bit
7512 encodings (the latter only in post-V6T2 cores). The index is the
7513 value used in the insns table below. When there is more than one
7514 possible 16-bit encoding for the instruction, this table always
7515 holds variant (1).
7516 Also contains several pseudo-instructions used during relaxation. */
7517 #define T16_32_TAB \
7518 X(adc, 4140, eb400000), \
7519 X(adcs, 4140, eb500000), \
7520 X(add, 1c00, eb000000), \
7521 X(adds, 1c00, eb100000), \
7522 X(addi, 0000, f1000000), \
7523 X(addis, 0000, f1100000), \
7524 X(add_pc,000f, f20f0000), \
7525 X(add_sp,000d, f10d0000), \
7526 X(adr, 000f, f20f0000), \
7527 X(and, 4000, ea000000), \
7528 X(ands, 4000, ea100000), \
7529 X(asr, 1000, fa40f000), \
7530 X(asrs, 1000, fa50f000), \
7531 X(b, e000, f000b000), \
7532 X(bcond, d000, f0008000), \
7533 X(bic, 4380, ea200000), \
7534 X(bics, 4380, ea300000), \
7535 X(cmn, 42c0, eb100f00), \
7536 X(cmp, 2800, ebb00f00), \
7537 X(cpsie, b660, f3af8400), \
7538 X(cpsid, b670, f3af8600), \
7539 X(cpy, 4600, ea4f0000), \
7540 X(dec_sp,80dd, f1bd0d00), \
7541 X(eor, 4040, ea800000), \
7542 X(eors, 4040, ea900000), \
7543 X(inc_sp,00dd, f10d0d00), \
7544 X(ldmia, c800, e8900000), \
7545 X(ldr, 6800, f8500000), \
7546 X(ldrb, 7800, f8100000), \
7547 X(ldrh, 8800, f8300000), \
7548 X(ldrsb, 5600, f9100000), \
7549 X(ldrsh, 5e00, f9300000), \
7550 X(ldr_pc,4800, f85f0000), \
7551 X(ldr_pc2,4800, f85f0000), \
7552 X(ldr_sp,9800, f85d0000), \
7553 X(lsl, 0000, fa00f000), \
7554 X(lsls, 0000, fa10f000), \
7555 X(lsr, 0800, fa20f000), \
7556 X(lsrs, 0800, fa30f000), \
7557 X(mov, 2000, ea4f0000), \
7558 X(movs, 2000, ea5f0000), \
7559 X(mul, 4340, fb00f000), \
7560 X(muls, 4340, ffffffff), /* no 32b muls */ \
7561 X(mvn, 43c0, ea6f0000), \
7562 X(mvns, 43c0, ea7f0000), \
7563 X(neg, 4240, f1c00000), /* rsb #0 */ \
7564 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7565 X(orr, 4300, ea400000), \
7566 X(orrs, 4300, ea500000), \
7567 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7568 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7569 X(rev, ba00, fa90f080), \
7570 X(rev16, ba40, fa90f090), \
7571 X(revsh, bac0, fa90f0b0), \
7572 X(ror, 41c0, fa60f000), \
7573 X(rors, 41c0, fa70f000), \
7574 X(sbc, 4180, eb600000), \
7575 X(sbcs, 4180, eb700000), \
7576 X(stmia, c000, e8800000), \
7577 X(str, 6000, f8400000), \
7578 X(strb, 7000, f8000000), \
7579 X(strh, 8000, f8200000), \
7580 X(str_sp,9000, f84d0000), \
7581 X(sub, 1e00, eba00000), \
7582 X(subs, 1e00, ebb00000), \
7583 X(subi, 8000, f1a00000), \
7584 X(subis, 8000, f1b00000), \
7585 X(sxtb, b240, fa4ff080), \
7586 X(sxth, b200, fa0ff080), \
7587 X(tst, 4200, ea100f00), \
7588 X(uxtb, b2c0, fa5ff080), \
7589 X(uxth, b280, fa1ff080), \
7590 X(nop, bf00, f3af8000), \
7591 X(yield, bf10, f3af8001), \
7592 X(wfe, bf20, f3af8002), \
7593 X(wfi, bf30, f3af8003), \
7594 X(sev, bf40, f3af9004), /* typo, 8004? */
7595
7596 /* To catch errors in encoding functions, the codes are all offset by
7597 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7598 as 16-bit instructions. */
7599 #define X(a,b,c) T_MNEM_##a
7600 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7601 #undef X
7602
7603 #define X(a,b,c) 0x##b
7604 static const unsigned short thumb_op16[] = { T16_32_TAB };
7605 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7606 #undef X
7607
7608 #define X(a,b,c) 0x##c
7609 static const unsigned int thumb_op32[] = { T16_32_TAB };
7610 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7611 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7612 #undef X
7613 #undef T16_32_TAB
7614
7615 /* Thumb instruction encoders, in alphabetical order. */
7616
7617 /* ADDW or SUBW. */
7618 static void
7619 do_t_add_sub_w (void)
7620 {
7621 int Rd, Rn;
7622
7623 Rd = inst.operands[0].reg;
7624 Rn = inst.operands[1].reg;
7625
7626 constraint (Rd == 15, _("PC not allowed as destination"));
7627 inst.instruction |= (Rn << 16) | (Rd << 8);
7628 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7629 }
7630
7631 /* Parse an add or subtract instruction. We get here with inst.instruction
7632 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7633
7634 static void
7635 do_t_add_sub (void)
7636 {
7637 int Rd, Rs, Rn;
7638
7639 Rd = inst.operands[0].reg;
7640 Rs = (inst.operands[1].present
7641 ? inst.operands[1].reg /* Rd, Rs, foo */
7642 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7643
7644 if (unified_syntax)
7645 {
7646 bfd_boolean flags;
7647 bfd_boolean narrow;
7648 int opcode;
7649
7650 flags = (inst.instruction == T_MNEM_adds
7651 || inst.instruction == T_MNEM_subs);
7652 if (flags)
7653 narrow = (current_it_mask == 0);
7654 else
7655 narrow = (current_it_mask != 0);
7656 if (!inst.operands[2].isreg)
7657 {
7658 opcode = 0;
7659 if (inst.size_req != 4)
7660 {
7661 int add;
7662
7663 add = (inst.instruction == T_MNEM_add
7664 || inst.instruction == T_MNEM_adds);
7665 /* Attempt to use a narrow opcode, with relaxation if
7666 appropriate. */
7667 if (Rd == REG_SP && Rs == REG_SP && !flags)
7668 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7669 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7670 opcode = T_MNEM_add_sp;
7671 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7672 opcode = T_MNEM_add_pc;
7673 else if (Rd <= 7 && Rs <= 7 && narrow)
7674 {
7675 if (flags)
7676 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7677 else
7678 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7679 }
7680 if (opcode)
7681 {
7682 inst.instruction = THUMB_OP16(opcode);
7683 inst.instruction |= (Rd << 4) | Rs;
7684 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7685 if (inst.size_req != 2)
7686 inst.relax = opcode;
7687 }
7688 else
7689 constraint (inst.size_req == 2, BAD_HIREG);
7690 }
7691 if (inst.size_req == 4
7692 || (inst.size_req != 2 && !opcode))
7693 {
7694 /* ??? Convert large immediates to addw/subw. */
7695 inst.instruction = THUMB_OP32 (inst.instruction);
7696 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7697 inst.instruction |= inst.operands[0].reg << 8;
7698 inst.instruction |= inst.operands[1].reg << 16;
7699 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7700 }
7701 }
7702 else
7703 {
7704 Rn = inst.operands[2].reg;
7705 /* See if we can do this with a 16-bit instruction. */
7706 if (!inst.operands[2].shifted && inst.size_req != 4)
7707 {
7708 if (Rd > 7 || Rs > 7 || Rn > 7)
7709 narrow = FALSE;
7710
7711 if (narrow)
7712 {
7713 inst.instruction = ((inst.instruction == T_MNEM_adds
7714 || inst.instruction == T_MNEM_add)
7715 ? T_OPCODE_ADD_R3
7716 : T_OPCODE_SUB_R3);
7717 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7718 return;
7719 }
7720
7721 if (inst.instruction == T_MNEM_add)
7722 {
7723 if (Rd == Rs)
7724 {
7725 inst.instruction = T_OPCODE_ADD_HI;
7726 inst.instruction |= (Rd & 8) << 4;
7727 inst.instruction |= (Rd & 7);
7728 inst.instruction |= Rn << 3;
7729 return;
7730 }
7731 /* ... because addition is commutative! */
7732 else if (Rd == Rn)
7733 {
7734 inst.instruction = T_OPCODE_ADD_HI;
7735 inst.instruction |= (Rd & 8) << 4;
7736 inst.instruction |= (Rd & 7);
7737 inst.instruction |= Rs << 3;
7738 return;
7739 }
7740 }
7741 }
7742 /* If we get here, it can't be done in 16 bits. */
7743 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7744 _("shift must be constant"));
7745 inst.instruction = THUMB_OP32 (inst.instruction);
7746 inst.instruction |= Rd << 8;
7747 inst.instruction |= Rs << 16;
7748 encode_thumb32_shifted_operand (2);
7749 }
7750 }
7751 else
7752 {
7753 constraint (inst.instruction == T_MNEM_adds
7754 || inst.instruction == T_MNEM_subs,
7755 BAD_THUMB32);
7756
7757 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7758 {
7759 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7760 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7761 BAD_HIREG);
7762
7763 inst.instruction = (inst.instruction == T_MNEM_add
7764 ? 0x0000 : 0x8000);
7765 inst.instruction |= (Rd << 4) | Rs;
7766 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7767 return;
7768 }
7769
7770 Rn = inst.operands[2].reg;
7771 constraint (inst.operands[2].shifted, _("unshifted register required"));
7772
7773 /* We now have Rd, Rs, and Rn set to registers. */
7774 if (Rd > 7 || Rs > 7 || Rn > 7)
7775 {
7776 /* Can't do this for SUB. */
7777 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7778 inst.instruction = T_OPCODE_ADD_HI;
7779 inst.instruction |= (Rd & 8) << 4;
7780 inst.instruction |= (Rd & 7);
7781 if (Rs == Rd)
7782 inst.instruction |= Rn << 3;
7783 else if (Rn == Rd)
7784 inst.instruction |= Rs << 3;
7785 else
7786 constraint (1, _("dest must overlap one source register"));
7787 }
7788 else
7789 {
7790 inst.instruction = (inst.instruction == T_MNEM_add
7791 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7792 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7793 }
7794 }
7795 }
7796
7797 static void
7798 do_t_adr (void)
7799 {
7800 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7801 {
7802 /* Defer to section relaxation. */
7803 inst.relax = inst.instruction;
7804 inst.instruction = THUMB_OP16 (inst.instruction);
7805 inst.instruction |= inst.operands[0].reg << 4;
7806 }
7807 else if (unified_syntax && inst.size_req != 2)
7808 {
7809 /* Generate a 32-bit opcode. */
7810 inst.instruction = THUMB_OP32 (inst.instruction);
7811 inst.instruction |= inst.operands[0].reg << 8;
7812 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7813 inst.reloc.pc_rel = 1;
7814 }
7815 else
7816 {
7817 /* Generate a 16-bit opcode. */
7818 inst.instruction = THUMB_OP16 (inst.instruction);
7819 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7820 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7821 inst.reloc.pc_rel = 1;
7822
7823 inst.instruction |= inst.operands[0].reg << 4;
7824 }
7825 }
7826
7827 /* Arithmetic instructions for which there is just one 16-bit
7828 instruction encoding, and it allows only two low registers.
7829 For maximal compatibility with ARM syntax, we allow three register
7830 operands even when Thumb-32 instructions are not available, as long
7831 as the first two are identical. For instance, both "sbc r0,r1" and
7832 "sbc r0,r0,r1" are allowed. */
7833 static void
7834 do_t_arit3 (void)
7835 {
7836 int Rd, Rs, Rn;
7837
7838 Rd = inst.operands[0].reg;
7839 Rs = (inst.operands[1].present
7840 ? inst.operands[1].reg /* Rd, Rs, foo */
7841 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7842 Rn = inst.operands[2].reg;
7843
7844 if (unified_syntax)
7845 {
7846 if (!inst.operands[2].isreg)
7847 {
7848 /* For an immediate, we always generate a 32-bit opcode;
7849 section relaxation will shrink it later if possible. */
7850 inst.instruction = THUMB_OP32 (inst.instruction);
7851 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7852 inst.instruction |= Rd << 8;
7853 inst.instruction |= Rs << 16;
7854 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7855 }
7856 else
7857 {
7858 bfd_boolean narrow;
7859
7860 /* See if we can do this with a 16-bit instruction. */
7861 if (THUMB_SETS_FLAGS (inst.instruction))
7862 narrow = current_it_mask == 0;
7863 else
7864 narrow = current_it_mask != 0;
7865
7866 if (Rd > 7 || Rn > 7 || Rs > 7)
7867 narrow = FALSE;
7868 if (inst.operands[2].shifted)
7869 narrow = FALSE;
7870 if (inst.size_req == 4)
7871 narrow = FALSE;
7872
7873 if (narrow
7874 && Rd == Rs)
7875 {
7876 inst.instruction = THUMB_OP16 (inst.instruction);
7877 inst.instruction |= Rd;
7878 inst.instruction |= Rn << 3;
7879 return;
7880 }
7881
7882 /* If we get here, it can't be done in 16 bits. */
7883 constraint (inst.operands[2].shifted
7884 && inst.operands[2].immisreg,
7885 _("shift must be constant"));
7886 inst.instruction = THUMB_OP32 (inst.instruction);
7887 inst.instruction |= Rd << 8;
7888 inst.instruction |= Rs << 16;
7889 encode_thumb32_shifted_operand (2);
7890 }
7891 }
7892 else
7893 {
7894 /* On its face this is a lie - the instruction does set the
7895 flags. However, the only supported mnemonic in this mode
7896 says it doesn't. */
7897 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7898
7899 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7900 _("unshifted register required"));
7901 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7902 constraint (Rd != Rs,
7903 _("dest and source1 must be the same register"));
7904
7905 inst.instruction = THUMB_OP16 (inst.instruction);
7906 inst.instruction |= Rd;
7907 inst.instruction |= Rn << 3;
7908 }
7909 }
7910
7911 /* Similarly, but for instructions where the arithmetic operation is
7912 commutative, so we can allow either of them to be different from
7913 the destination operand in a 16-bit instruction. For instance, all
7914 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7915 accepted. */
7916 static void
7917 do_t_arit3c (void)
7918 {
7919 int Rd, Rs, Rn;
7920
7921 Rd = inst.operands[0].reg;
7922 Rs = (inst.operands[1].present
7923 ? inst.operands[1].reg /* Rd, Rs, foo */
7924 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7925 Rn = inst.operands[2].reg;
7926
7927 if (unified_syntax)
7928 {
7929 if (!inst.operands[2].isreg)
7930 {
7931 /* For an immediate, we always generate a 32-bit opcode;
7932 section relaxation will shrink it later if possible. */
7933 inst.instruction = THUMB_OP32 (inst.instruction);
7934 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7935 inst.instruction |= Rd << 8;
7936 inst.instruction |= Rs << 16;
7937 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7938 }
7939 else
7940 {
7941 bfd_boolean narrow;
7942
7943 /* See if we can do this with a 16-bit instruction. */
7944 if (THUMB_SETS_FLAGS (inst.instruction))
7945 narrow = current_it_mask == 0;
7946 else
7947 narrow = current_it_mask != 0;
7948
7949 if (Rd > 7 || Rn > 7 || Rs > 7)
7950 narrow = FALSE;
7951 if (inst.operands[2].shifted)
7952 narrow = FALSE;
7953 if (inst.size_req == 4)
7954 narrow = FALSE;
7955
7956 if (narrow)
7957 {
7958 if (Rd == Rs)
7959 {
7960 inst.instruction = THUMB_OP16 (inst.instruction);
7961 inst.instruction |= Rd;
7962 inst.instruction |= Rn << 3;
7963 return;
7964 }
7965 if (Rd == Rn)
7966 {
7967 inst.instruction = THUMB_OP16 (inst.instruction);
7968 inst.instruction |= Rd;
7969 inst.instruction |= Rs << 3;
7970 return;
7971 }
7972 }
7973
7974 /* If we get here, it can't be done in 16 bits. */
7975 constraint (inst.operands[2].shifted
7976 && inst.operands[2].immisreg,
7977 _("shift must be constant"));
7978 inst.instruction = THUMB_OP32 (inst.instruction);
7979 inst.instruction |= Rd << 8;
7980 inst.instruction |= Rs << 16;
7981 encode_thumb32_shifted_operand (2);
7982 }
7983 }
7984 else
7985 {
7986 /* On its face this is a lie - the instruction does set the
7987 flags. However, the only supported mnemonic in this mode
7988 says it doesn't. */
7989 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7990
7991 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7992 _("unshifted register required"));
7993 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7994
7995 inst.instruction = THUMB_OP16 (inst.instruction);
7996 inst.instruction |= Rd;
7997
7998 if (Rd == Rs)
7999 inst.instruction |= Rn << 3;
8000 else if (Rd == Rn)
8001 inst.instruction |= Rs << 3;
8002 else
8003 constraint (1, _("dest must overlap one source register"));
8004 }
8005 }
8006
8007 static void
8008 do_t_barrier (void)
8009 {
8010 if (inst.operands[0].present)
8011 {
8012 constraint ((inst.instruction & 0xf0) != 0x40
8013 && inst.operands[0].imm != 0xf,
8014 "bad barrier type");
8015 inst.instruction |= inst.operands[0].imm;
8016 }
8017 else
8018 inst.instruction |= 0xf;
8019 }
8020
8021 static void
8022 do_t_bfc (void)
8023 {
8024 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8025 constraint (msb > 32, _("bit-field extends past end of register"));
8026 /* The instruction encoding stores the LSB and MSB,
8027 not the LSB and width. */
8028 inst.instruction |= inst.operands[0].reg << 8;
8029 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8030 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8031 inst.instruction |= msb - 1;
8032 }
8033
8034 static void
8035 do_t_bfi (void)
8036 {
8037 unsigned int msb;
8038
8039 /* #0 in second position is alternative syntax for bfc, which is
8040 the same instruction but with REG_PC in the Rm field. */
8041 if (!inst.operands[1].isreg)
8042 inst.operands[1].reg = REG_PC;
8043
8044 msb = inst.operands[2].imm + inst.operands[3].imm;
8045 constraint (msb > 32, _("bit-field extends past end of register"));
8046 /* The instruction encoding stores the LSB and MSB,
8047 not the LSB and width. */
8048 inst.instruction |= inst.operands[0].reg << 8;
8049 inst.instruction |= inst.operands[1].reg << 16;
8050 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8051 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8052 inst.instruction |= msb - 1;
8053 }
8054
8055 static void
8056 do_t_bfx (void)
8057 {
8058 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8059 _("bit-field extends past end of register"));
8060 inst.instruction |= inst.operands[0].reg << 8;
8061 inst.instruction |= inst.operands[1].reg << 16;
8062 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8063 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8064 inst.instruction |= inst.operands[3].imm - 1;
8065 }
8066
8067 /* ARM V5 Thumb BLX (argument parse)
8068 BLX <target_addr> which is BLX(1)
8069 BLX <Rm> which is BLX(2)
8070 Unfortunately, there are two different opcodes for this mnemonic.
8071 So, the insns[].value is not used, and the code here zaps values
8072 into inst.instruction.
8073
8074 ??? How to take advantage of the additional two bits of displacement
8075 available in Thumb32 mode? Need new relocation? */
8076
8077 static void
8078 do_t_blx (void)
8079 {
8080 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8081 if (inst.operands[0].isreg)
8082 /* We have a register, so this is BLX(2). */
8083 inst.instruction |= inst.operands[0].reg << 3;
8084 else
8085 {
8086 /* No register. This must be BLX(1). */
8087 inst.instruction = 0xf000e800;
8088 #ifdef OBJ_ELF
8089 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8090 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8091 else
8092 #endif
8093 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8094 inst.reloc.pc_rel = 1;
8095 }
8096 }
8097
8098 static void
8099 do_t_branch (void)
8100 {
8101 int opcode;
8102 int cond;
8103
8104 if (current_it_mask)
8105 {
8106 /* Conditional branches inside IT blocks are encoded as unconditional
8107 branches. */
8108 cond = COND_ALWAYS;
8109 /* A branch must be the last instruction in an IT block. */
8110 constraint (current_it_mask != 0x10, BAD_BRANCH);
8111 }
8112 else
8113 cond = inst.cond;
8114
8115 if (cond != COND_ALWAYS)
8116 opcode = T_MNEM_bcond;
8117 else
8118 opcode = inst.instruction;
8119
8120 if (unified_syntax && inst.size_req == 4)
8121 {
8122 inst.instruction = THUMB_OP32(opcode);
8123 if (cond == COND_ALWAYS)
8124 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8125 else
8126 {
8127 assert (cond != 0xF);
8128 inst.instruction |= cond << 22;
8129 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8130 }
8131 }
8132 else
8133 {
8134 inst.instruction = THUMB_OP16(opcode);
8135 if (cond == COND_ALWAYS)
8136 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8137 else
8138 {
8139 inst.instruction |= cond << 8;
8140 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8141 }
8142 /* Allow section relaxation. */
8143 if (unified_syntax && inst.size_req != 2)
8144 inst.relax = opcode;
8145 }
8146
8147 inst.reloc.pc_rel = 1;
8148 }
8149
8150 static void
8151 do_t_bkpt (void)
8152 {
8153 constraint (inst.cond != COND_ALWAYS,
8154 _("instruction is always unconditional"));
8155 if (inst.operands[0].present)
8156 {
8157 constraint (inst.operands[0].imm > 255,
8158 _("immediate value out of range"));
8159 inst.instruction |= inst.operands[0].imm;
8160 }
8161 }
8162
8163 static void
8164 do_t_branch23 (void)
8165 {
8166 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8167 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8168 inst.reloc.pc_rel = 1;
8169
8170 /* If the destination of the branch is a defined symbol which does not have
8171 the THUMB_FUNC attribute, then we must be calling a function which has
8172 the (interfacearm) attribute. We look for the Thumb entry point to that
8173 function and change the branch to refer to that function instead. */
8174 if ( inst.reloc.exp.X_op == O_symbol
8175 && inst.reloc.exp.X_add_symbol != NULL
8176 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8177 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8178 inst.reloc.exp.X_add_symbol =
8179 find_real_start (inst.reloc.exp.X_add_symbol);
8180 }
8181
8182 static void
8183 do_t_bx (void)
8184 {
8185 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8186 inst.instruction |= inst.operands[0].reg << 3;
8187 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8188 should cause the alignment to be checked once it is known. This is
8189 because BX PC only works if the instruction is word aligned. */
8190 }
8191
8192 static void
8193 do_t_bxj (void)
8194 {
8195 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8196 if (inst.operands[0].reg == REG_PC)
8197 as_tsktsk (_("use of r15 in bxj is not really useful"));
8198
8199 inst.instruction |= inst.operands[0].reg << 16;
8200 }
8201
8202 static void
8203 do_t_clz (void)
8204 {
8205 inst.instruction |= inst.operands[0].reg << 8;
8206 inst.instruction |= inst.operands[1].reg << 16;
8207 inst.instruction |= inst.operands[1].reg;
8208 }
8209
8210 static void
8211 do_t_cps (void)
8212 {
8213 constraint (current_it_mask, BAD_NOT_IT);
8214 inst.instruction |= inst.operands[0].imm;
8215 }
8216
8217 static void
8218 do_t_cpsi (void)
8219 {
8220 constraint (current_it_mask, BAD_NOT_IT);
8221 if (unified_syntax
8222 && (inst.operands[1].present || inst.size_req == 4)
8223 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8224 {
8225 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8226 inst.instruction = 0xf3af8000;
8227 inst.instruction |= imod << 9;
8228 inst.instruction |= inst.operands[0].imm << 5;
8229 if (inst.operands[1].present)
8230 inst.instruction |= 0x100 | inst.operands[1].imm;
8231 }
8232 else
8233 {
8234 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8235 && (inst.operands[0].imm & 4),
8236 _("selected processor does not support 'A' form "
8237 "of this instruction"));
8238 constraint (inst.operands[1].present || inst.size_req == 4,
8239 _("Thumb does not support the 2-argument "
8240 "form of this instruction"));
8241 inst.instruction |= inst.operands[0].imm;
8242 }
8243 }
8244
8245 /* THUMB CPY instruction (argument parse). */
8246
8247 static void
8248 do_t_cpy (void)
8249 {
8250 if (inst.size_req == 4)
8251 {
8252 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8253 inst.instruction |= inst.operands[0].reg << 8;
8254 inst.instruction |= inst.operands[1].reg;
8255 }
8256 else
8257 {
8258 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8259 inst.instruction |= (inst.operands[0].reg & 0x7);
8260 inst.instruction |= inst.operands[1].reg << 3;
8261 }
8262 }
8263
8264 static void
8265 do_t_czb (void)
8266 {
8267 constraint (current_it_mask, BAD_NOT_IT);
8268 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8269 inst.instruction |= inst.operands[0].reg;
8270 inst.reloc.pc_rel = 1;
8271 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8272 }
8273
8274 static void
8275 do_t_dbg (void)
8276 {
8277 inst.instruction |= inst.operands[0].imm;
8278 }
8279
8280 static void
8281 do_t_div (void)
8282 {
8283 if (!inst.operands[1].present)
8284 inst.operands[1].reg = inst.operands[0].reg;
8285 inst.instruction |= inst.operands[0].reg << 8;
8286 inst.instruction |= inst.operands[1].reg << 16;
8287 inst.instruction |= inst.operands[2].reg;
8288 }
8289
8290 static void
8291 do_t_hint (void)
8292 {
8293 if (unified_syntax && inst.size_req == 4)
8294 inst.instruction = THUMB_OP32 (inst.instruction);
8295 else
8296 inst.instruction = THUMB_OP16 (inst.instruction);
8297 }
8298
8299 static void
8300 do_t_it (void)
8301 {
8302 unsigned int cond = inst.operands[0].imm;
8303
8304 constraint (current_it_mask, BAD_NOT_IT);
8305 current_it_mask = (inst.instruction & 0xf) | 0x10;
8306 current_cc = cond;
8307
8308 /* If the condition is a negative condition, invert the mask. */
8309 if ((cond & 0x1) == 0x0)
8310 {
8311 unsigned int mask = inst.instruction & 0x000f;
8312
8313 if ((mask & 0x7) == 0)
8314 /* no conversion needed */;
8315 else if ((mask & 0x3) == 0)
8316 mask ^= 0x8;
8317 else if ((mask & 0x1) == 0)
8318 mask ^= 0xC;
8319 else
8320 mask ^= 0xE;
8321
8322 inst.instruction &= 0xfff0;
8323 inst.instruction |= mask;
8324 }
8325
8326 inst.instruction |= cond << 4;
8327 }
8328
8329 static void
8330 do_t_ldmstm (void)
8331 {
8332 /* This really doesn't seem worth it. */
8333 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8334 _("expression too complex"));
8335 constraint (inst.operands[1].writeback,
8336 _("Thumb load/store multiple does not support {reglist}^"));
8337
8338 if (unified_syntax)
8339 {
8340 /* See if we can use a 16-bit instruction. */
8341 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8342 && inst.size_req != 4
8343 && inst.operands[0].reg <= 7
8344 && !(inst.operands[1].imm & ~0xff)
8345 && (inst.instruction == T_MNEM_stmia
8346 ? inst.operands[0].writeback
8347 : (inst.operands[0].writeback
8348 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8349 {
8350 if (inst.instruction == T_MNEM_stmia
8351 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8352 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8353 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8354 inst.operands[0].reg);
8355
8356 inst.instruction = THUMB_OP16 (inst.instruction);
8357 inst.instruction |= inst.operands[0].reg << 8;
8358 inst.instruction |= inst.operands[1].imm;
8359 }
8360 else
8361 {
8362 if (inst.operands[1].imm & (1 << 13))
8363 as_warn (_("SP should not be in register list"));
8364 if (inst.instruction == T_MNEM_stmia)
8365 {
8366 if (inst.operands[1].imm & (1 << 15))
8367 as_warn (_("PC should not be in register list"));
8368 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8369 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8370 inst.operands[0].reg);
8371 }
8372 else
8373 {
8374 if (inst.operands[1].imm & (1 << 14)
8375 && inst.operands[1].imm & (1 << 15))
8376 as_warn (_("LR and PC should not both be in register list"));
8377 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8378 && inst.operands[0].writeback)
8379 as_warn (_("base register should not be in register list "
8380 "when written back"));
8381 }
8382 if (inst.instruction < 0xffff)
8383 inst.instruction = THUMB_OP32 (inst.instruction);
8384 inst.instruction |= inst.operands[0].reg << 16;
8385 inst.instruction |= inst.operands[1].imm;
8386 if (inst.operands[0].writeback)
8387 inst.instruction |= WRITE_BACK;
8388 }
8389 }
8390 else
8391 {
8392 constraint (inst.operands[0].reg > 7
8393 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8394 if (inst.instruction == T_MNEM_stmia)
8395 {
8396 if (!inst.operands[0].writeback)
8397 as_warn (_("this instruction will write back the base register"));
8398 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8399 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8400 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8401 inst.operands[0].reg);
8402 }
8403 else
8404 {
8405 if (!inst.operands[0].writeback
8406 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8407 as_warn (_("this instruction will write back the base register"));
8408 else if (inst.operands[0].writeback
8409 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8410 as_warn (_("this instruction will not write back the base register"));
8411 }
8412
8413 inst.instruction = THUMB_OP16 (inst.instruction);
8414 inst.instruction |= inst.operands[0].reg << 8;
8415 inst.instruction |= inst.operands[1].imm;
8416 }
8417 }
8418
8419 static void
8420 do_t_ldrex (void)
8421 {
8422 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8423 || inst.operands[1].postind || inst.operands[1].writeback
8424 || inst.operands[1].immisreg || inst.operands[1].shifted
8425 || inst.operands[1].negative,
8426 BAD_ADDR_MODE);
8427
8428 inst.instruction |= inst.operands[0].reg << 12;
8429 inst.instruction |= inst.operands[1].reg << 16;
8430 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8431 }
8432
8433 static void
8434 do_t_ldrexd (void)
8435 {
8436 if (!inst.operands[1].present)
8437 {
8438 constraint (inst.operands[0].reg == REG_LR,
8439 _("r14 not allowed as first register "
8440 "when second register is omitted"));
8441 inst.operands[1].reg = inst.operands[0].reg + 1;
8442 }
8443 constraint (inst.operands[0].reg == inst.operands[1].reg,
8444 BAD_OVERLAP);
8445
8446 inst.instruction |= inst.operands[0].reg << 12;
8447 inst.instruction |= inst.operands[1].reg << 8;
8448 inst.instruction |= inst.operands[2].reg << 16;
8449 }
8450
8451 static void
8452 do_t_ldst (void)
8453 {
8454 unsigned long opcode;
8455 int Rn;
8456
8457 opcode = inst.instruction;
8458 if (unified_syntax)
8459 {
8460 if (!inst.operands[1].isreg)
8461 {
8462 if (opcode <= 0xffff)
8463 inst.instruction = THUMB_OP32 (opcode);
8464 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8465 return;
8466 }
8467 if (inst.operands[1].isreg
8468 && !inst.operands[1].writeback
8469 && !inst.operands[1].shifted && !inst.operands[1].postind
8470 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8471 && opcode <= 0xffff
8472 && inst.size_req != 4)
8473 {
8474 /* Insn may have a 16-bit form. */
8475 Rn = inst.operands[1].reg;
8476 if (inst.operands[1].immisreg)
8477 {
8478 inst.instruction = THUMB_OP16 (opcode);
8479 /* [Rn, Ri] */
8480 if (Rn <= 7 && inst.operands[1].imm <= 7)
8481 goto op16;
8482 }
8483 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8484 && opcode != T_MNEM_ldrsb)
8485 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8486 || (Rn == REG_SP && opcode == T_MNEM_str))
8487 {
8488 /* [Rn, #const] */
8489 if (Rn > 7)
8490 {
8491 if (Rn == REG_PC)
8492 {
8493 if (inst.reloc.pc_rel)
8494 opcode = T_MNEM_ldr_pc2;
8495 else
8496 opcode = T_MNEM_ldr_pc;
8497 }
8498 else
8499 {
8500 if (opcode == T_MNEM_ldr)
8501 opcode = T_MNEM_ldr_sp;
8502 else
8503 opcode = T_MNEM_str_sp;
8504 }
8505 inst.instruction = inst.operands[0].reg << 8;
8506 }
8507 else
8508 {
8509 inst.instruction = inst.operands[0].reg;
8510 inst.instruction |= inst.operands[1].reg << 3;
8511 }
8512 inst.instruction |= THUMB_OP16 (opcode);
8513 if (inst.size_req == 2)
8514 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8515 else
8516 inst.relax = opcode;
8517 return;
8518 }
8519 }
8520 /* Definitely a 32-bit variant. */
8521 inst.instruction = THUMB_OP32 (opcode);
8522 inst.instruction |= inst.operands[0].reg << 12;
8523 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8524 return;
8525 }
8526
8527 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8528
8529 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8530 {
8531 /* Only [Rn,Rm] is acceptable. */
8532 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8533 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8534 || inst.operands[1].postind || inst.operands[1].shifted
8535 || inst.operands[1].negative,
8536 _("Thumb does not support this addressing mode"));
8537 inst.instruction = THUMB_OP16 (inst.instruction);
8538 goto op16;
8539 }
8540
8541 inst.instruction = THUMB_OP16 (inst.instruction);
8542 if (!inst.operands[1].isreg)
8543 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8544 return;
8545
8546 constraint (!inst.operands[1].preind
8547 || inst.operands[1].shifted
8548 || inst.operands[1].writeback,
8549 _("Thumb does not support this addressing mode"));
8550 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8551 {
8552 constraint (inst.instruction & 0x0600,
8553 _("byte or halfword not valid for base register"));
8554 constraint (inst.operands[1].reg == REG_PC
8555 && !(inst.instruction & THUMB_LOAD_BIT),
8556 _("r15 based store not allowed"));
8557 constraint (inst.operands[1].immisreg,
8558 _("invalid base register for register offset"));
8559
8560 if (inst.operands[1].reg == REG_PC)
8561 inst.instruction = T_OPCODE_LDR_PC;
8562 else if (inst.instruction & THUMB_LOAD_BIT)
8563 inst.instruction = T_OPCODE_LDR_SP;
8564 else
8565 inst.instruction = T_OPCODE_STR_SP;
8566
8567 inst.instruction |= inst.operands[0].reg << 8;
8568 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8569 return;
8570 }
8571
8572 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8573 if (!inst.operands[1].immisreg)
8574 {
8575 /* Immediate offset. */
8576 inst.instruction |= inst.operands[0].reg;
8577 inst.instruction |= inst.operands[1].reg << 3;
8578 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8579 return;
8580 }
8581
8582 /* Register offset. */
8583 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8584 constraint (inst.operands[1].negative,
8585 _("Thumb does not support this addressing mode"));
8586
8587 op16:
8588 switch (inst.instruction)
8589 {
8590 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8591 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8592 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8593 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8594 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8595 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8596 case 0x5600 /* ldrsb */:
8597 case 0x5e00 /* ldrsh */: break;
8598 default: abort ();
8599 }
8600
8601 inst.instruction |= inst.operands[0].reg;
8602 inst.instruction |= inst.operands[1].reg << 3;
8603 inst.instruction |= inst.operands[1].imm << 6;
8604 }
8605
8606 static void
8607 do_t_ldstd (void)
8608 {
8609 if (!inst.operands[1].present)
8610 {
8611 inst.operands[1].reg = inst.operands[0].reg + 1;
8612 constraint (inst.operands[0].reg == REG_LR,
8613 _("r14 not allowed here"));
8614 }
8615 inst.instruction |= inst.operands[0].reg << 12;
8616 inst.instruction |= inst.operands[1].reg << 8;
8617 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8618
8619 }
8620
8621 static void
8622 do_t_ldstt (void)
8623 {
8624 inst.instruction |= inst.operands[0].reg << 12;
8625 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8626 }
8627
8628 static void
8629 do_t_mla (void)
8630 {
8631 inst.instruction |= inst.operands[0].reg << 8;
8632 inst.instruction |= inst.operands[1].reg << 16;
8633 inst.instruction |= inst.operands[2].reg;
8634 inst.instruction |= inst.operands[3].reg << 12;
8635 }
8636
8637 static void
8638 do_t_mlal (void)
8639 {
8640 inst.instruction |= inst.operands[0].reg << 12;
8641 inst.instruction |= inst.operands[1].reg << 8;
8642 inst.instruction |= inst.operands[2].reg << 16;
8643 inst.instruction |= inst.operands[3].reg;
8644 }
8645
8646 static void
8647 do_t_mov_cmp (void)
8648 {
8649 if (unified_syntax)
8650 {
8651 int r0off = (inst.instruction == T_MNEM_mov
8652 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8653 unsigned long opcode;
8654 bfd_boolean narrow;
8655 bfd_boolean low_regs;
8656
8657 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8658 opcode = inst.instruction;
8659 if (current_it_mask)
8660 narrow = opcode != T_MNEM_movs;
8661 else
8662 narrow = opcode != T_MNEM_movs || low_regs;
8663 if (inst.size_req == 4
8664 || inst.operands[1].shifted)
8665 narrow = FALSE;
8666
8667 if (!inst.operands[1].isreg)
8668 {
8669 /* Immediate operand. */
8670 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8671 narrow = 0;
8672 if (low_regs && narrow)
8673 {
8674 inst.instruction = THUMB_OP16 (opcode);
8675 inst.instruction |= inst.operands[0].reg << 8;
8676 if (inst.size_req == 2)
8677 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8678 else
8679 inst.relax = opcode;
8680 }
8681 else
8682 {
8683 inst.instruction = THUMB_OP32 (inst.instruction);
8684 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8685 inst.instruction |= inst.operands[0].reg << r0off;
8686 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8687 }
8688 }
8689 else if (!narrow)
8690 {
8691 inst.instruction = THUMB_OP32 (inst.instruction);
8692 inst.instruction |= inst.operands[0].reg << r0off;
8693 encode_thumb32_shifted_operand (1);
8694 }
8695 else
8696 switch (inst.instruction)
8697 {
8698 case T_MNEM_mov:
8699 inst.instruction = T_OPCODE_MOV_HR;
8700 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8701 inst.instruction |= (inst.operands[0].reg & 0x7);
8702 inst.instruction |= inst.operands[1].reg << 3;
8703 break;
8704
8705 case T_MNEM_movs:
8706 /* We know we have low registers at this point.
8707 Generate ADD Rd, Rs, #0. */
8708 inst.instruction = T_OPCODE_ADD_I3;
8709 inst.instruction |= inst.operands[0].reg;
8710 inst.instruction |= inst.operands[1].reg << 3;
8711 break;
8712
8713 case T_MNEM_cmp:
8714 if (low_regs)
8715 {
8716 inst.instruction = T_OPCODE_CMP_LR;
8717 inst.instruction |= inst.operands[0].reg;
8718 inst.instruction |= inst.operands[1].reg << 3;
8719 }
8720 else
8721 {
8722 inst.instruction = T_OPCODE_CMP_HR;
8723 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8724 inst.instruction |= (inst.operands[0].reg & 0x7);
8725 inst.instruction |= inst.operands[1].reg << 3;
8726 }
8727 break;
8728 }
8729 return;
8730 }
8731
8732 inst.instruction = THUMB_OP16 (inst.instruction);
8733 if (inst.operands[1].isreg)
8734 {
8735 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8736 {
8737 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8738 since a MOV instruction produces unpredictable results. */
8739 if (inst.instruction == T_OPCODE_MOV_I8)
8740 inst.instruction = T_OPCODE_ADD_I3;
8741 else
8742 inst.instruction = T_OPCODE_CMP_LR;
8743
8744 inst.instruction |= inst.operands[0].reg;
8745 inst.instruction |= inst.operands[1].reg << 3;
8746 }
8747 else
8748 {
8749 if (inst.instruction == T_OPCODE_MOV_I8)
8750 inst.instruction = T_OPCODE_MOV_HR;
8751 else
8752 inst.instruction = T_OPCODE_CMP_HR;
8753 do_t_cpy ();
8754 }
8755 }
8756 else
8757 {
8758 constraint (inst.operands[0].reg > 7,
8759 _("only lo regs allowed with immediate"));
8760 inst.instruction |= inst.operands[0].reg << 8;
8761 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8762 }
8763 }
8764
8765 static void
8766 do_t_mov16 (void)
8767 {
8768 bfd_vma imm;
8769 bfd_boolean top;
8770
8771 top = (inst.instruction & 0x00800000) != 0;
8772 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
8773 {
8774 constraint (top, _(":lower16: not allowed this instruction"));
8775 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
8776 }
8777 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
8778 {
8779 constraint (!top, _(":upper16: not allowed this instruction"));
8780 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
8781 }
8782
8783 inst.instruction |= inst.operands[0].reg << 8;
8784 if (inst.reloc.type == BFD_RELOC_UNUSED)
8785 {
8786 imm = inst.reloc.exp.X_add_number;
8787 inst.instruction |= (imm & 0xf000) << 4;
8788 inst.instruction |= (imm & 0x0800) << 15;
8789 inst.instruction |= (imm & 0x0700) << 4;
8790 inst.instruction |= (imm & 0x00ff);
8791 }
8792 }
8793
8794 static void
8795 do_t_mvn_tst (void)
8796 {
8797 if (unified_syntax)
8798 {
8799 int r0off = (inst.instruction == T_MNEM_mvn
8800 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8801 bfd_boolean narrow;
8802
8803 if (inst.size_req == 4
8804 || inst.instruction > 0xffff
8805 || inst.operands[1].shifted
8806 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8807 narrow = FALSE;
8808 else if (inst.instruction == T_MNEM_cmn)
8809 narrow = TRUE;
8810 else if (THUMB_SETS_FLAGS (inst.instruction))
8811 narrow = (current_it_mask == 0);
8812 else
8813 narrow = (current_it_mask != 0);
8814
8815 if (!inst.operands[1].isreg)
8816 {
8817 /* For an immediate, we always generate a 32-bit opcode;
8818 section relaxation will shrink it later if possible. */
8819 if (inst.instruction < 0xffff)
8820 inst.instruction = THUMB_OP32 (inst.instruction);
8821 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8822 inst.instruction |= inst.operands[0].reg << r0off;
8823 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8824 }
8825 else
8826 {
8827 /* See if we can do this with a 16-bit instruction. */
8828 if (narrow)
8829 {
8830 inst.instruction = THUMB_OP16 (inst.instruction);
8831 inst.instruction |= inst.operands[0].reg;
8832 inst.instruction |= inst.operands[1].reg << 3;
8833 }
8834 else
8835 {
8836 constraint (inst.operands[1].shifted
8837 && inst.operands[1].immisreg,
8838 _("shift must be constant"));
8839 if (inst.instruction < 0xffff)
8840 inst.instruction = THUMB_OP32 (inst.instruction);
8841 inst.instruction |= inst.operands[0].reg << r0off;
8842 encode_thumb32_shifted_operand (1);
8843 }
8844 }
8845 }
8846 else
8847 {
8848 constraint (inst.instruction > 0xffff
8849 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8850 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8851 _("unshifted register required"));
8852 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8853 BAD_HIREG);
8854
8855 inst.instruction = THUMB_OP16 (inst.instruction);
8856 inst.instruction |= inst.operands[0].reg;
8857 inst.instruction |= inst.operands[1].reg << 3;
8858 }
8859 }
8860
8861 static void
8862 do_t_mrs (void)
8863 {
8864 int flags;
8865 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8866 if (flags == 0)
8867 {
8868 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8869 _("selected processor does not support "
8870 "requested special purpose register"));
8871 }
8872 else
8873 {
8874 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8875 _("selected processor does not support "
8876 "requested special purpose register %x"));
8877 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8878 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8879 _("'CPSR' or 'SPSR' expected"));
8880 }
8881
8882 inst.instruction |= inst.operands[0].reg << 8;
8883 inst.instruction |= (flags & SPSR_BIT) >> 2;
8884 inst.instruction |= inst.operands[1].imm & 0xff;
8885 }
8886
8887 static void
8888 do_t_msr (void)
8889 {
8890 int flags;
8891
8892 constraint (!inst.operands[1].isreg,
8893 _("Thumb encoding does not support an immediate here"));
8894 flags = inst.operands[0].imm;
8895 if (flags & ~0xff)
8896 {
8897 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8898 _("selected processor does not support "
8899 "requested special purpose register"));
8900 }
8901 else
8902 {
8903 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8904 _("selected processor does not support "
8905 "requested special purpose register"));
8906 flags |= PSR_f;
8907 }
8908 inst.instruction |= (flags & SPSR_BIT) >> 2;
8909 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8910 inst.instruction |= (flags & 0xff);
8911 inst.instruction |= inst.operands[1].reg << 16;
8912 }
8913
8914 static void
8915 do_t_mul (void)
8916 {
8917 if (!inst.operands[2].present)
8918 inst.operands[2].reg = inst.operands[0].reg;
8919
8920 /* There is no 32-bit MULS and no 16-bit MUL. */
8921 if (unified_syntax && inst.instruction == T_MNEM_mul)
8922 {
8923 inst.instruction = THUMB_OP32 (inst.instruction);
8924 inst.instruction |= inst.operands[0].reg << 8;
8925 inst.instruction |= inst.operands[1].reg << 16;
8926 inst.instruction |= inst.operands[2].reg << 0;
8927 }
8928 else
8929 {
8930 constraint (!unified_syntax
8931 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8932 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8933 BAD_HIREG);
8934
8935 inst.instruction = THUMB_OP16 (inst.instruction);
8936 inst.instruction |= inst.operands[0].reg;
8937
8938 if (inst.operands[0].reg == inst.operands[1].reg)
8939 inst.instruction |= inst.operands[2].reg << 3;
8940 else if (inst.operands[0].reg == inst.operands[2].reg)
8941 inst.instruction |= inst.operands[1].reg << 3;
8942 else
8943 constraint (1, _("dest must overlap one source register"));
8944 }
8945 }
8946
8947 static void
8948 do_t_mull (void)
8949 {
8950 inst.instruction |= inst.operands[0].reg << 12;
8951 inst.instruction |= inst.operands[1].reg << 8;
8952 inst.instruction |= inst.operands[2].reg << 16;
8953 inst.instruction |= inst.operands[3].reg;
8954
8955 if (inst.operands[0].reg == inst.operands[1].reg)
8956 as_tsktsk (_("rdhi and rdlo must be different"));
8957 }
8958
8959 static void
8960 do_t_nop (void)
8961 {
8962 if (unified_syntax)
8963 {
8964 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8965 {
8966 inst.instruction = THUMB_OP32 (inst.instruction);
8967 inst.instruction |= inst.operands[0].imm;
8968 }
8969 else
8970 {
8971 inst.instruction = THUMB_OP16 (inst.instruction);
8972 inst.instruction |= inst.operands[0].imm << 4;
8973 }
8974 }
8975 else
8976 {
8977 constraint (inst.operands[0].present,
8978 _("Thumb does not support NOP with hints"));
8979 inst.instruction = 0x46c0;
8980 }
8981 }
8982
8983 static void
8984 do_t_neg (void)
8985 {
8986 if (unified_syntax)
8987 {
8988 bfd_boolean narrow;
8989
8990 if (THUMB_SETS_FLAGS (inst.instruction))
8991 narrow = (current_it_mask == 0);
8992 else
8993 narrow = (current_it_mask != 0);
8994 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8995 narrow = FALSE;
8996 if (inst.size_req == 4)
8997 narrow = FALSE;
8998
8999 if (!narrow)
9000 {
9001 inst.instruction = THUMB_OP32 (inst.instruction);
9002 inst.instruction |= inst.operands[0].reg << 8;
9003 inst.instruction |= inst.operands[1].reg << 16;
9004 }
9005 else
9006 {
9007 inst.instruction = THUMB_OP16 (inst.instruction);
9008 inst.instruction |= inst.operands[0].reg;
9009 inst.instruction |= inst.operands[1].reg << 3;
9010 }
9011 }
9012 else
9013 {
9014 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9015 BAD_HIREG);
9016 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9017
9018 inst.instruction = THUMB_OP16 (inst.instruction);
9019 inst.instruction |= inst.operands[0].reg;
9020 inst.instruction |= inst.operands[1].reg << 3;
9021 }
9022 }
9023
9024 static void
9025 do_t_pkhbt (void)
9026 {
9027 inst.instruction |= inst.operands[0].reg << 8;
9028 inst.instruction |= inst.operands[1].reg << 16;
9029 inst.instruction |= inst.operands[2].reg;
9030 if (inst.operands[3].present)
9031 {
9032 unsigned int val = inst.reloc.exp.X_add_number;
9033 constraint (inst.reloc.exp.X_op != O_constant,
9034 _("expression too complex"));
9035 inst.instruction |= (val & 0x1c) << 10;
9036 inst.instruction |= (val & 0x03) << 6;
9037 }
9038 }
9039
9040 static void
9041 do_t_pkhtb (void)
9042 {
9043 if (!inst.operands[3].present)
9044 inst.instruction &= ~0x00000020;
9045 do_t_pkhbt ();
9046 }
9047
9048 static void
9049 do_t_pld (void)
9050 {
9051 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9052 }
9053
9054 static void
9055 do_t_push_pop (void)
9056 {
9057 unsigned mask;
9058
9059 constraint (inst.operands[0].writeback,
9060 _("push/pop do not support {reglist}^"));
9061 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9062 _("expression too complex"));
9063
9064 mask = inst.operands[0].imm;
9065 if ((mask & ~0xff) == 0)
9066 inst.instruction = THUMB_OP16 (inst.instruction);
9067 else if ((inst.instruction == T_MNEM_push
9068 && (mask & ~0xff) == 1 << REG_LR)
9069 || (inst.instruction == T_MNEM_pop
9070 && (mask & ~0xff) == 1 << REG_PC))
9071 {
9072 inst.instruction = THUMB_OP16 (inst.instruction);
9073 inst.instruction |= THUMB_PP_PC_LR;
9074 mask &= 0xff;
9075 }
9076 else if (unified_syntax)
9077 {
9078 if (mask & (1 << 13))
9079 inst.error = _("SP not allowed in register list");
9080 if (inst.instruction == T_MNEM_push)
9081 {
9082 if (mask & (1 << 15))
9083 inst.error = _("PC not allowed in register list");
9084 }
9085 else
9086 {
9087 if (mask & (1 << 14)
9088 && mask & (1 << 15))
9089 inst.error = _("LR and PC should not both be in register list");
9090 }
9091 if ((mask & (mask - 1)) == 0)
9092 {
9093 /* Single register push/pop implemented as str/ldr. */
9094 if (inst.instruction == T_MNEM_push)
9095 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9096 else
9097 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9098 mask = ffs(mask) - 1;
9099 mask <<= 12;
9100 }
9101 else
9102 inst.instruction = THUMB_OP32 (inst.instruction);
9103 }
9104 else
9105 {
9106 inst.error = _("invalid register list to push/pop instruction");
9107 return;
9108 }
9109
9110 inst.instruction |= mask;
9111 }
9112
9113 static void
9114 do_t_rbit (void)
9115 {
9116 inst.instruction |= inst.operands[0].reg << 8;
9117 inst.instruction |= inst.operands[1].reg << 16;
9118 }
9119
9120 static void
9121 do_t_rev (void)
9122 {
9123 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9124 && inst.size_req != 4)
9125 {
9126 inst.instruction = THUMB_OP16 (inst.instruction);
9127 inst.instruction |= inst.operands[0].reg;
9128 inst.instruction |= inst.operands[1].reg << 3;
9129 }
9130 else if (unified_syntax)
9131 {
9132 inst.instruction = THUMB_OP32 (inst.instruction);
9133 inst.instruction |= inst.operands[0].reg << 8;
9134 inst.instruction |= inst.operands[1].reg << 16;
9135 inst.instruction |= inst.operands[1].reg;
9136 }
9137 else
9138 inst.error = BAD_HIREG;
9139 }
9140
9141 static void
9142 do_t_rsb (void)
9143 {
9144 int Rd, Rs;
9145
9146 Rd = inst.operands[0].reg;
9147 Rs = (inst.operands[1].present
9148 ? inst.operands[1].reg /* Rd, Rs, foo */
9149 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9150
9151 inst.instruction |= Rd << 8;
9152 inst.instruction |= Rs << 16;
9153 if (!inst.operands[2].isreg)
9154 {
9155 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9156 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9157 }
9158 else
9159 encode_thumb32_shifted_operand (2);
9160 }
9161
9162 static void
9163 do_t_setend (void)
9164 {
9165 constraint (current_it_mask, BAD_NOT_IT);
9166 if (inst.operands[0].imm)
9167 inst.instruction |= 0x8;
9168 }
9169
9170 static void
9171 do_t_shift (void)
9172 {
9173 if (!inst.operands[1].present)
9174 inst.operands[1].reg = inst.operands[0].reg;
9175
9176 if (unified_syntax)
9177 {
9178 bfd_boolean narrow;
9179 int shift_kind;
9180
9181 switch (inst.instruction)
9182 {
9183 case T_MNEM_asr:
9184 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9185 case T_MNEM_lsl:
9186 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9187 case T_MNEM_lsr:
9188 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9189 case T_MNEM_ror:
9190 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9191 default: abort ();
9192 }
9193
9194 if (THUMB_SETS_FLAGS (inst.instruction))
9195 narrow = (current_it_mask == 0);
9196 else
9197 narrow = (current_it_mask != 0);
9198 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9199 narrow = FALSE;
9200 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9201 narrow = FALSE;
9202 if (inst.operands[2].isreg
9203 && (inst.operands[1].reg != inst.operands[0].reg
9204 || inst.operands[2].reg > 7))
9205 narrow = FALSE;
9206 if (inst.size_req == 4)
9207 narrow = FALSE;
9208
9209 if (!narrow)
9210 {
9211 if (inst.operands[2].isreg)
9212 {
9213 inst.instruction = THUMB_OP32 (inst.instruction);
9214 inst.instruction |= inst.operands[0].reg << 8;
9215 inst.instruction |= inst.operands[1].reg << 16;
9216 inst.instruction |= inst.operands[2].reg;
9217 }
9218 else
9219 {
9220 inst.operands[1].shifted = 1;
9221 inst.operands[1].shift_kind = shift_kind;
9222 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9223 ? T_MNEM_movs : T_MNEM_mov);
9224 inst.instruction |= inst.operands[0].reg << 8;
9225 encode_thumb32_shifted_operand (1);
9226 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9227 inst.reloc.type = BFD_RELOC_UNUSED;
9228 }
9229 }
9230 else
9231 {
9232 if (inst.operands[2].isreg)
9233 {
9234 switch (shift_kind)
9235 {
9236 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9237 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9238 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9239 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9240 default: abort ();
9241 }
9242
9243 inst.instruction |= inst.operands[0].reg;
9244 inst.instruction |= inst.operands[2].reg << 3;
9245 }
9246 else
9247 {
9248 switch (shift_kind)
9249 {
9250 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9251 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9252 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9253 default: abort ();
9254 }
9255 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9256 inst.instruction |= inst.operands[0].reg;
9257 inst.instruction |= inst.operands[1].reg << 3;
9258 }
9259 }
9260 }
9261 else
9262 {
9263 constraint (inst.operands[0].reg > 7
9264 || inst.operands[1].reg > 7, BAD_HIREG);
9265 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9266
9267 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9268 {
9269 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9270 constraint (inst.operands[0].reg != inst.operands[1].reg,
9271 _("source1 and dest must be same register"));
9272
9273 switch (inst.instruction)
9274 {
9275 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9276 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9277 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9278 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9279 default: abort ();
9280 }
9281
9282 inst.instruction |= inst.operands[0].reg;
9283 inst.instruction |= inst.operands[2].reg << 3;
9284 }
9285 else
9286 {
9287 switch (inst.instruction)
9288 {
9289 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9290 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9291 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9292 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9293 default: abort ();
9294 }
9295 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9296 inst.instruction |= inst.operands[0].reg;
9297 inst.instruction |= inst.operands[1].reg << 3;
9298 }
9299 }
9300 }
9301
9302 static void
9303 do_t_simd (void)
9304 {
9305 inst.instruction |= inst.operands[0].reg << 8;
9306 inst.instruction |= inst.operands[1].reg << 16;
9307 inst.instruction |= inst.operands[2].reg;
9308 }
9309
9310 static void
9311 do_t_smc (void)
9312 {
9313 unsigned int value = inst.reloc.exp.X_add_number;
9314 constraint (inst.reloc.exp.X_op != O_constant,
9315 _("expression too complex"));
9316 inst.reloc.type = BFD_RELOC_UNUSED;
9317 inst.instruction |= (value & 0xf000) >> 12;
9318 inst.instruction |= (value & 0x0ff0);
9319 inst.instruction |= (value & 0x000f) << 16;
9320 }
9321
9322 static void
9323 do_t_ssat (void)
9324 {
9325 inst.instruction |= inst.operands[0].reg << 8;
9326 inst.instruction |= inst.operands[1].imm - 1;
9327 inst.instruction |= inst.operands[2].reg << 16;
9328
9329 if (inst.operands[3].present)
9330 {
9331 constraint (inst.reloc.exp.X_op != O_constant,
9332 _("expression too complex"));
9333
9334 if (inst.reloc.exp.X_add_number != 0)
9335 {
9336 if (inst.operands[3].shift_kind == SHIFT_ASR)
9337 inst.instruction |= 0x00200000; /* sh bit */
9338 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9339 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9340 }
9341 inst.reloc.type = BFD_RELOC_UNUSED;
9342 }
9343 }
9344
9345 static void
9346 do_t_ssat16 (void)
9347 {
9348 inst.instruction |= inst.operands[0].reg << 8;
9349 inst.instruction |= inst.operands[1].imm - 1;
9350 inst.instruction |= inst.operands[2].reg << 16;
9351 }
9352
9353 static void
9354 do_t_strex (void)
9355 {
9356 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9357 || inst.operands[2].postind || inst.operands[2].writeback
9358 || inst.operands[2].immisreg || inst.operands[2].shifted
9359 || inst.operands[2].negative,
9360 BAD_ADDR_MODE);
9361
9362 inst.instruction |= inst.operands[0].reg << 8;
9363 inst.instruction |= inst.operands[1].reg << 12;
9364 inst.instruction |= inst.operands[2].reg << 16;
9365 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9366 }
9367
9368 static void
9369 do_t_strexd (void)
9370 {
9371 if (!inst.operands[2].present)
9372 inst.operands[2].reg = inst.operands[1].reg + 1;
9373
9374 constraint (inst.operands[0].reg == inst.operands[1].reg
9375 || inst.operands[0].reg == inst.operands[2].reg
9376 || inst.operands[0].reg == inst.operands[3].reg
9377 || inst.operands[1].reg == inst.operands[2].reg,
9378 BAD_OVERLAP);
9379
9380 inst.instruction |= inst.operands[0].reg;
9381 inst.instruction |= inst.operands[1].reg << 12;
9382 inst.instruction |= inst.operands[2].reg << 8;
9383 inst.instruction |= inst.operands[3].reg << 16;
9384 }
9385
9386 static void
9387 do_t_sxtah (void)
9388 {
9389 inst.instruction |= inst.operands[0].reg << 8;
9390 inst.instruction |= inst.operands[1].reg << 16;
9391 inst.instruction |= inst.operands[2].reg;
9392 inst.instruction |= inst.operands[3].imm << 4;
9393 }
9394
9395 static void
9396 do_t_sxth (void)
9397 {
9398 if (inst.instruction <= 0xffff && inst.size_req != 4
9399 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9400 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9401 {
9402 inst.instruction = THUMB_OP16 (inst.instruction);
9403 inst.instruction |= inst.operands[0].reg;
9404 inst.instruction |= inst.operands[1].reg << 3;
9405 }
9406 else if (unified_syntax)
9407 {
9408 if (inst.instruction <= 0xffff)
9409 inst.instruction = THUMB_OP32 (inst.instruction);
9410 inst.instruction |= inst.operands[0].reg << 8;
9411 inst.instruction |= inst.operands[1].reg;
9412 inst.instruction |= inst.operands[2].imm << 4;
9413 }
9414 else
9415 {
9416 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9417 _("Thumb encoding does not support rotation"));
9418 constraint (1, BAD_HIREG);
9419 }
9420 }
9421
9422 static void
9423 do_t_swi (void)
9424 {
9425 inst.reloc.type = BFD_RELOC_ARM_SWI;
9426 }
9427
9428 static void
9429 do_t_tb (void)
9430 {
9431 int half;
9432
9433 half = (inst.instruction & 0x10) != 0;
9434 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9435 constraint (inst.operands[0].immisreg,
9436 _("instruction requires register index"));
9437 constraint (inst.operands[0].imm == 15,
9438 _("PC is not a valid index register"));
9439 constraint (!half && inst.operands[0].shifted,
9440 _("instruction does not allow shifted index"));
9441 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9442 }
9443
9444 static void
9445 do_t_usat (void)
9446 {
9447 inst.instruction |= inst.operands[0].reg << 8;
9448 inst.instruction |= inst.operands[1].imm;
9449 inst.instruction |= inst.operands[2].reg << 16;
9450
9451 if (inst.operands[3].present)
9452 {
9453 constraint (inst.reloc.exp.X_op != O_constant,
9454 _("expression too complex"));
9455 if (inst.reloc.exp.X_add_number != 0)
9456 {
9457 if (inst.operands[3].shift_kind == SHIFT_ASR)
9458 inst.instruction |= 0x00200000; /* sh bit */
9459
9460 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9461 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9462 }
9463 inst.reloc.type = BFD_RELOC_UNUSED;
9464 }
9465 }
9466
9467 static void
9468 do_t_usat16 (void)
9469 {
9470 inst.instruction |= inst.operands[0].reg << 8;
9471 inst.instruction |= inst.operands[1].imm;
9472 inst.instruction |= inst.operands[2].reg << 16;
9473 }
9474
9475 /* Neon instruction encoder helpers. */
9476
9477 /* Encodings for the different types for various Neon opcodes. */
9478
9479 /* An "invalid" code for the following tables. */
9480 #define N_INV -1u
9481
9482 struct neon_tab_entry
9483 {
9484 unsigned integer;
9485 unsigned float_or_poly;
9486 unsigned scalar_or_imm;
9487 };
9488
9489 /* Map overloaded Neon opcodes to their respective encodings. */
9490 #define NEON_ENC_TAB \
9491 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9492 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9493 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9494 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9495 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9496 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9497 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9498 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9499 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9500 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9501 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9502 /* Register variants of the following two instructions are encoded as
9503 vcge / vcgt with the operands reversed. */ \
9504 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9505 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9506 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9507 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9508 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9509 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9510 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9511 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9512 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9513 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9514 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9515 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9516 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9517 X(vshl, 0x0000400, N_INV, 0x0800510), \
9518 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9519 X(vand, 0x0000110, N_INV, 0x0800030), \
9520 X(vbic, 0x0100110, N_INV, 0x0800030), \
9521 X(veor, 0x1000110, N_INV, N_INV), \
9522 X(vorn, 0x0300110, N_INV, 0x0800010), \
9523 X(vorr, 0x0200110, N_INV, 0x0800010), \
9524 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9525 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9526 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9527 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9528 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9529 X(vst1, 0x0000000, 0x0800000, N_INV), \
9530 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9531 X(vst2, 0x0000100, 0x0800100, N_INV), \
9532 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9533 X(vst3, 0x0000200, 0x0800200, N_INV), \
9534 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9535 X(vst4, 0x0000300, 0x0800300, N_INV), \
9536 X(vmovn, 0x1b20200, N_INV, N_INV), \
9537 X(vtrn, 0x1b20080, N_INV, N_INV), \
9538 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9539 X(vqmovun, 0x1b20240, N_INV, N_INV)
9540
9541 enum neon_opc
9542 {
9543 #define X(OPC,I,F,S) N_MNEM_##OPC
9544 NEON_ENC_TAB
9545 #undef X
9546 };
9547
9548 static const struct neon_tab_entry neon_enc_tab[] =
9549 {
9550 #define X(OPC,I,F,S) { (I), (F), (S) }
9551 NEON_ENC_TAB
9552 #undef X
9553 };
9554
9555 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9556 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9557 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9558 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9559 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9560 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9561 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9562 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9563 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9564
9565 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9566 shapes which an instruction can accept. The following mnemonic characters
9567 are used in the tag names for this enumeration:
9568
9569 D - Neon D<n> register
9570 Q - Neon Q<n> register
9571 I - Immediate
9572 S - Scalar
9573 R - ARM register
9574 L - D<n> register list
9575 */
9576
9577 enum neon_shape
9578 {
9579 NS_DDD_QQQ,
9580 NS_DDD,
9581 NS_QQQ,
9582 NS_DDI_QQI,
9583 NS_DDI,
9584 NS_QQI,
9585 NS_DDS_QQS,
9586 NS_DDS,
9587 NS_QQS,
9588 NS_DD_QQ,
9589 NS_DD,
9590 NS_QQ,
9591 NS_DS_QS,
9592 NS_DS,
9593 NS_QS,
9594 NS_DR_QR,
9595 NS_DR,
9596 NS_QR,
9597 NS_DI_QI,
9598 NS_DI,
9599 NS_QI,
9600 NS_DLD,
9601 NS_DQ,
9602 NS_QD,
9603 NS_DQI,
9604 NS_QDI,
9605 NS_QDD,
9606 NS_QDS,
9607 NS_QQD,
9608 NS_DQQ,
9609 NS_DDDI_QQQI,
9610 NS_DDDI,
9611 NS_QQQI,
9612 NS_IGNORE
9613 };
9614
9615 /* Bit masks used in type checking given instructions.
9616 'N_EQK' means the type must be the same as (or based on in some way) the key
9617 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9618 set, various other bits can be set as well in order to modify the meaning of
9619 the type constraint. */
9620
9621 enum neon_type_mask
9622 {
9623 N_S8 = 0x000001,
9624 N_S16 = 0x000002,
9625 N_S32 = 0x000004,
9626 N_S64 = 0x000008,
9627 N_U8 = 0x000010,
9628 N_U16 = 0x000020,
9629 N_U32 = 0x000040,
9630 N_U64 = 0x000080,
9631 N_I8 = 0x000100,
9632 N_I16 = 0x000200,
9633 N_I32 = 0x000400,
9634 N_I64 = 0x000800,
9635 N_8 = 0x001000,
9636 N_16 = 0x002000,
9637 N_32 = 0x004000,
9638 N_64 = 0x008000,
9639 N_P8 = 0x010000,
9640 N_P16 = 0x020000,
9641 N_F32 = 0x040000,
9642 N_KEY = 0x080000, /* key element (main type specifier). */
9643 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9644 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9645 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9646 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9647 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9648 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9649 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9650 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9651 N_UTYP = 0,
9652 N_MAX_NONSPECIAL = N_F32
9653 };
9654
9655 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9656
9657 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9658 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9659 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9660 #define N_SUF_32 (N_SU_32 | N_F32)
9661 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9662 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9663
9664 /* Pass this as the first type argument to neon_check_type to ignore types
9665 altogether. */
9666 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9667
9668 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9669 specific shape when there are two alternatives. For non-polymorphic shapes,
9670 checking is done during operand parsing, so is not implemented here. */
9671
9672 static enum neon_shape
9673 neon_check_shape (enum neon_shape req)
9674 {
9675 #define RR(X) (inst.operands[(X)].isreg)
9676 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9677 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9678 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9679 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9680
9681 /* Fix missing optional operands. FIXME: we don't know at this point how
9682 many arguments we should have, so this makes the assumption that we have
9683 > 1. This is true of all current Neon opcodes, I think, but may not be
9684 true in the future. */
9685 if (!inst.operands[1].present)
9686 inst.operands[1] = inst.operands[0];
9687
9688 switch (req)
9689 {
9690 case NS_DDD_QQQ:
9691 {
9692 if (RD(0) && RD(1) && RD(2))
9693 return NS_DDD;
9694 else if (RQ(0) && RQ(1) && RQ(2))
9695 return NS_QQQ;
9696 else
9697 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9698 "operands"));
9699 }
9700 break;
9701
9702 case NS_DDI_QQI:
9703 {
9704 if (RD(0) && RD(1) && IM(2))
9705 return NS_DDI;
9706 else if (RQ(0) && RQ(1) && IM(2))
9707 return NS_QQI;
9708 else
9709 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9710 "operands"));
9711 }
9712 break;
9713
9714 case NS_DDDI_QQQI:
9715 {
9716 if (RD(0) && RD(1) && RD(2) && IM(3))
9717 return NS_DDDI;
9718 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9719 return NS_QQQI;
9720 else
9721 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9722 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9723 }
9724 break;
9725
9726 case NS_DDS_QQS:
9727 {
9728 if (RD(0) && RD(1) && SC(2))
9729 return NS_DDS;
9730 else if (RQ(0) && RQ(1) && SC(2))
9731 return NS_QQS;
9732 else
9733 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9734 "operands"));
9735 }
9736 break;
9737
9738 case NS_DD_QQ:
9739 {
9740 if (RD(0) && RD(1))
9741 return NS_DD;
9742 else if (RQ(0) && RQ(1))
9743 return NS_QQ;
9744 else
9745 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9746 }
9747 break;
9748
9749 case NS_DS_QS:
9750 {
9751 if (RD(0) && SC(1))
9752 return NS_DS;
9753 else if (RQ(0) && SC(1))
9754 return NS_QS;
9755 else
9756 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9757 }
9758 break;
9759
9760 case NS_DR_QR:
9761 {
9762 if (RD(0) && RR(1))
9763 return NS_DR;
9764 else if (RQ(0) && RR(1))
9765 return NS_QR;
9766 else
9767 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9768 }
9769 break;
9770
9771 case NS_DI_QI:
9772 {
9773 if (RD(0) && IM(1))
9774 return NS_DI;
9775 else if (RQ(0) && IM(1))
9776 return NS_QI;
9777 else
9778 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9779 }
9780 break;
9781
9782 default:
9783 abort ();
9784 }
9785
9786 return req;
9787 #undef RR
9788 #undef RD
9789 #undef RQ
9790 #undef IM
9791 #undef SC
9792 }
9793
9794 static void
9795 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9796 unsigned *g_size)
9797 {
9798 /* Allow modification to be made to types which are constrained to be
9799 based on the key element, based on bits set alongside N_EQK. */
9800 if ((typebits & N_EQK) != 0)
9801 {
9802 if ((typebits & N_HLF) != 0)
9803 *g_size /= 2;
9804 else if ((typebits & N_DBL) != 0)
9805 *g_size *= 2;
9806 if ((typebits & N_SGN) != 0)
9807 *g_type = NT_signed;
9808 else if ((typebits & N_UNS) != 0)
9809 *g_type = NT_unsigned;
9810 else if ((typebits & N_INT) != 0)
9811 *g_type = NT_integer;
9812 else if ((typebits & N_FLT) != 0)
9813 *g_type = NT_float;
9814 else if ((typebits & N_SIZ) != 0)
9815 *g_type = NT_untyped;
9816 }
9817 }
9818
9819 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9820 operand type, i.e. the single type specified in a Neon instruction when it
9821 is the only one given. */
9822
9823 static struct neon_type_el
9824 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9825 {
9826 struct neon_type_el dest = *key;
9827
9828 assert ((thisarg & N_EQK) != 0);
9829
9830 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9831
9832 return dest;
9833 }
9834
9835 /* Convert Neon type and size into compact bitmask representation. */
9836
9837 static enum neon_type_mask
9838 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9839 {
9840 switch (type)
9841 {
9842 case NT_untyped:
9843 switch (size)
9844 {
9845 case 8: return N_8;
9846 case 16: return N_16;
9847 case 32: return N_32;
9848 case 64: return N_64;
9849 default: ;
9850 }
9851 break;
9852
9853 case NT_integer:
9854 switch (size)
9855 {
9856 case 8: return N_I8;
9857 case 16: return N_I16;
9858 case 32: return N_I32;
9859 case 64: return N_I64;
9860 default: ;
9861 }
9862 break;
9863
9864 case NT_float:
9865 if (size == 32)
9866 return N_F32;
9867 break;
9868
9869 case NT_poly:
9870 switch (size)
9871 {
9872 case 8: return N_P8;
9873 case 16: return N_P16;
9874 default: ;
9875 }
9876 break;
9877
9878 case NT_signed:
9879 switch (size)
9880 {
9881 case 8: return N_S8;
9882 case 16: return N_S16;
9883 case 32: return N_S32;
9884 case 64: return N_S64;
9885 default: ;
9886 }
9887 break;
9888
9889 case NT_unsigned:
9890 switch (size)
9891 {
9892 case 8: return N_U8;
9893 case 16: return N_U16;
9894 case 32: return N_U32;
9895 case 64: return N_U64;
9896 default: ;
9897 }
9898 break;
9899
9900 default: ;
9901 }
9902
9903 return N_UTYP;
9904 }
9905
9906 /* Convert compact Neon bitmask type representation to a type and size. Only
9907 handles the case where a single bit is set in the mask. */
9908
9909 static int
9910 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9911 enum neon_type_mask mask)
9912 {
9913 if ((mask & N_EQK) != 0)
9914 return FAIL;
9915
9916 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9917 *size = 8;
9918 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9919 *size = 16;
9920 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9921 *size = 32;
9922 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9923 *size = 64;
9924 else
9925 return FAIL;
9926
9927 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9928 *type = NT_signed;
9929 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9930 *type = NT_unsigned;
9931 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9932 *type = NT_integer;
9933 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9934 *type = NT_untyped;
9935 else if ((mask & (N_P8 | N_P16)) != 0)
9936 *type = NT_poly;
9937 else if ((mask & N_F32) != 0)
9938 *type = NT_float;
9939 else
9940 return FAIL;
9941
9942 return SUCCESS;
9943 }
9944
9945 /* Modify a bitmask of allowed types. This is only needed for type
9946 relaxation. */
9947
9948 static unsigned
9949 modify_types_allowed (unsigned allowed, unsigned mods)
9950 {
9951 unsigned size;
9952 enum neon_el_type type;
9953 unsigned destmask;
9954 int i;
9955
9956 destmask = 0;
9957
9958 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9959 {
9960 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9961 {
9962 neon_modify_type_size (mods, &type, &size);
9963 destmask |= type_chk_of_el_type (type, size);
9964 }
9965 }
9966
9967 return destmask;
9968 }
9969
9970 /* Check type and return type classification.
9971 The manual states (paraphrase): If one datatype is given, it indicates the
9972 type given in:
9973 - the second operand, if there is one
9974 - the operand, if there is no second operand
9975 - the result, if there are no operands.
9976 This isn't quite good enough though, so we use a concept of a "key" datatype
9977 which is set on a per-instruction basis, which is the one which matters when
9978 only one data type is written.
9979 Note: this function has side-effects (e.g. filling in missing operands). All
9980 Neon instructions should call it before performing bit encoding.
9981 */
9982
9983 static struct neon_type_el
9984 neon_check_type (unsigned els, enum neon_shape ns, ...)
9985 {
9986 va_list ap;
9987 unsigned i, pass, key_el = 0;
9988 unsigned types[NEON_MAX_TYPE_ELS];
9989 enum neon_el_type k_type = NT_invtype;
9990 unsigned k_size = -1u;
9991 struct neon_type_el badtype = {NT_invtype, -1};
9992 unsigned key_allowed = 0;
9993
9994 /* Optional registers in Neon instructions are always (not) in operand 1.
9995 Fill in the missing operand here, if it was omitted. */
9996 if (els > 1 && !inst.operands[1].present)
9997 inst.operands[1] = inst.operands[0];
9998
9999 /* Suck up all the varargs. */
10000 va_start (ap, ns);
10001 for (i = 0; i < els; i++)
10002 {
10003 unsigned thisarg = va_arg (ap, unsigned);
10004 if (thisarg == N_IGNORE_TYPE)
10005 {
10006 va_end (ap);
10007 return badtype;
10008 }
10009 types[i] = thisarg;
10010 if ((thisarg & N_KEY) != 0)
10011 key_el = i;
10012 }
10013 va_end (ap);
10014
10015 if (inst.vectype.elems > 0)
10016 for (i = 0; i < els; i++)
10017 if (inst.operands[i].vectype.type != NT_invtype)
10018 {
10019 first_error (_("types specified in both the mnemonic and operands"));
10020 return badtype;
10021 }
10022
10023 /* Duplicate inst.vectype elements here as necessary.
10024 FIXME: No idea if this is exactly the same as the ARM assembler,
10025 particularly when an insn takes one register and one non-register
10026 operand. */
10027 if (inst.vectype.elems == 1 && els > 1)
10028 {
10029 unsigned j;
10030 inst.vectype.elems = els;
10031 inst.vectype.el[key_el] = inst.vectype.el[0];
10032 for (j = 0; j < els; j++)
10033 if (j != key_el)
10034 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10035 types[j]);
10036 }
10037 else if (inst.vectype.elems == 0 && els > 0)
10038 {
10039 unsigned j;
10040 /* No types were given after the mnemonic, so look for types specified
10041 after each operand. We allow some flexibility here; as long as the
10042 "key" operand has a type, we can infer the others. */
10043 for (j = 0; j < els; j++)
10044 if (inst.operands[j].vectype.type != NT_invtype)
10045 inst.vectype.el[j] = inst.operands[j].vectype;
10046
10047 if (inst.operands[key_el].vectype.type != NT_invtype)
10048 {
10049 for (j = 0; j < els; j++)
10050 if (inst.operands[j].vectype.type == NT_invtype)
10051 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10052 types[j]);
10053 }
10054 else
10055 {
10056 first_error (_("operand types can't be inferred"));
10057 return badtype;
10058 }
10059 }
10060 else if (inst.vectype.elems != els)
10061 {
10062 first_error (_("type specifier has the wrong number of parts"));
10063 return badtype;
10064 }
10065
10066 for (pass = 0; pass < 2; pass++)
10067 {
10068 for (i = 0; i < els; i++)
10069 {
10070 unsigned thisarg = types[i];
10071 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10072 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10073 enum neon_el_type g_type = inst.vectype.el[i].type;
10074 unsigned g_size = inst.vectype.el[i].size;
10075
10076 /* Decay more-specific signed & unsigned types to sign-insensitive
10077 integer types if sign-specific variants are unavailable. */
10078 if ((g_type == NT_signed || g_type == NT_unsigned)
10079 && (types_allowed & N_SU_ALL) == 0)
10080 g_type = NT_integer;
10081
10082 /* If only untyped args are allowed, decay any more specific types to
10083 them. Some instructions only care about signs for some element
10084 sizes, so handle that properly. */
10085 if ((g_size == 8 && (types_allowed & N_8) != 0)
10086 || (g_size == 16 && (types_allowed & N_16) != 0)
10087 || (g_size == 32 && (types_allowed & N_32) != 0)
10088 || (g_size == 64 && (types_allowed & N_64) != 0))
10089 g_type = NT_untyped;
10090
10091 if (pass == 0)
10092 {
10093 if ((thisarg & N_KEY) != 0)
10094 {
10095 k_type = g_type;
10096 k_size = g_size;
10097 key_allowed = thisarg & ~N_KEY;
10098 }
10099 }
10100 else
10101 {
10102 if ((thisarg & N_EQK) == 0)
10103 {
10104 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10105
10106 if ((given_type & types_allowed) == 0)
10107 {
10108 first_error (_("bad type in Neon instruction"));
10109 return badtype;
10110 }
10111 }
10112 else
10113 {
10114 enum neon_el_type mod_k_type = k_type;
10115 unsigned mod_k_size = k_size;
10116 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10117 if (g_type != mod_k_type || g_size != mod_k_size)
10118 {
10119 first_error (_("inconsistent types in Neon instruction"));
10120 return badtype;
10121 }
10122 }
10123 }
10124 }
10125 }
10126
10127 return inst.vectype.el[key_el];
10128 }
10129
10130 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10131 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10132
10133 static unsigned
10134 neon_dp_fixup (unsigned i)
10135 {
10136 if (thumb_mode)
10137 {
10138 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10139 if (i & (1 << 24))
10140 i |= 1 << 28;
10141
10142 i &= ~(1 << 24);
10143
10144 i |= 0xef000000;
10145 }
10146 else
10147 i |= 0xf2000000;
10148
10149 return i;
10150 }
10151
10152 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10153 (0, 1, 2, 3). */
10154
10155 static unsigned
10156 neon_logbits (unsigned x)
10157 {
10158 return ffs (x) - 4;
10159 }
10160
10161 #define LOW4(R) ((R) & 0xf)
10162 #define HI1(R) (((R) >> 4) & 1)
10163
10164 /* Encode insns with bit pattern:
10165
10166 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10167 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10168
10169 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10170 different meaning for some instruction. */
10171
10172 static void
10173 neon_three_same (int isquad, int ubit, int size)
10174 {
10175 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10176 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10177 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10178 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10179 inst.instruction |= LOW4 (inst.operands[2].reg);
10180 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10181 inst.instruction |= (isquad != 0) << 6;
10182 inst.instruction |= (ubit != 0) << 24;
10183 if (size != -1)
10184 inst.instruction |= neon_logbits (size) << 20;
10185
10186 inst.instruction = neon_dp_fixup (inst.instruction);
10187 }
10188
10189 /* Encode instructions of the form:
10190
10191 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10192 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10193
10194 Don't write size if SIZE == -1. */
10195
10196 static void
10197 neon_two_same (int qbit, int ubit, int size)
10198 {
10199 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10200 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10201 inst.instruction |= LOW4 (inst.operands[1].reg);
10202 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10203 inst.instruction |= (qbit != 0) << 6;
10204 inst.instruction |= (ubit != 0) << 24;
10205
10206 if (size != -1)
10207 inst.instruction |= neon_logbits (size) << 18;
10208
10209 inst.instruction = neon_dp_fixup (inst.instruction);
10210 }
10211
10212 /* Neon instruction encoders, in approximate order of appearance. */
10213
10214 static void
10215 do_neon_dyadic_i_su (void)
10216 {
10217 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10218 struct neon_type_el et = neon_check_type (3, rs,
10219 N_EQK, N_EQK, N_SU_32 | N_KEY);
10220 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10221 }
10222
10223 static void
10224 do_neon_dyadic_i64_su (void)
10225 {
10226 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10227 struct neon_type_el et = neon_check_type (3, rs,
10228 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10229 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10230 }
10231
10232 static void
10233 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10234 unsigned immbits)
10235 {
10236 unsigned size = et.size >> 3;
10237 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10238 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10239 inst.instruction |= LOW4 (inst.operands[1].reg);
10240 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10241 inst.instruction |= (isquad != 0) << 6;
10242 inst.instruction |= immbits << 16;
10243 inst.instruction |= (size >> 3) << 7;
10244 inst.instruction |= (size & 0x7) << 19;
10245 if (write_ubit)
10246 inst.instruction |= (uval != 0) << 24;
10247
10248 inst.instruction = neon_dp_fixup (inst.instruction);
10249 }
10250
10251 static void
10252 do_neon_shl_imm (void)
10253 {
10254 if (!inst.operands[2].isreg)
10255 {
10256 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10257 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10258 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10259 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10260 }
10261 else
10262 {
10263 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10264 struct neon_type_el et = neon_check_type (3, rs,
10265 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10266 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10267 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10268 }
10269 }
10270
10271 static void
10272 do_neon_qshl_imm (void)
10273 {
10274 if (!inst.operands[2].isreg)
10275 {
10276 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10277 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10278 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10279 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10280 inst.operands[2].imm);
10281 }
10282 else
10283 {
10284 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10285 struct neon_type_el et = neon_check_type (3, rs,
10286 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10287 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10288 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10289 }
10290 }
10291
10292 static int
10293 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10294 {
10295 /* Handle .I8 and .I64 as pseudo-instructions. */
10296 switch (size)
10297 {
10298 case 8:
10299 /* Unfortunately, this will make everything apart from zero out-of-range.
10300 FIXME is this the intended semantics? There doesn't seem much point in
10301 accepting .I8 if so. */
10302 immediate |= immediate << 8;
10303 size = 16;
10304 break;
10305 case 64:
10306 /* Similarly, anything other than zero will be replicated in bits [63:32],
10307 which probably isn't want we want if we specified .I64. */
10308 if (immediate != 0)
10309 goto bad_immediate;
10310 size = 32;
10311 break;
10312 default: ;
10313 }
10314
10315 if (immediate == (immediate & 0x000000ff))
10316 {
10317 *immbits = immediate;
10318 return (size == 16) ? 0x9 : 0x1;
10319 }
10320 else if (immediate == (immediate & 0x0000ff00))
10321 {
10322 *immbits = immediate >> 8;
10323 return (size == 16) ? 0xb : 0x3;
10324 }
10325 else if (immediate == (immediate & 0x00ff0000))
10326 {
10327 *immbits = immediate >> 16;
10328 return 0x5;
10329 }
10330 else if (immediate == (immediate & 0xff000000))
10331 {
10332 *immbits = immediate >> 24;
10333 return 0x7;
10334 }
10335
10336 bad_immediate:
10337 first_error (_("immediate value out of range"));
10338 return FAIL;
10339 }
10340
10341 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10342 A, B, C, D. */
10343
10344 static int
10345 neon_bits_same_in_bytes (unsigned imm)
10346 {
10347 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10348 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10349 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10350 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10351 }
10352
10353 /* For immediate of above form, return 0bABCD. */
10354
10355 static unsigned
10356 neon_squash_bits (unsigned imm)
10357 {
10358 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10359 | ((imm & 0x01000000) >> 21);
10360 }
10361
10362 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10363
10364 static unsigned
10365 neon_qfloat_bits (unsigned imm)
10366 {
10367 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10368 }
10369
10370 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10371 the instruction. *OP is passed as the initial value of the op field, and
10372 may be set to a different value depending on the constant (i.e.
10373 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10374 MVN). */
10375
10376 static int
10377 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10378 int *op, int size, enum neon_el_type type)
10379 {
10380 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10381 {
10382 if (size != 32 || *op == 1)
10383 return FAIL;
10384 *immbits = neon_qfloat_bits (immlo);
10385 return 0xf;
10386 }
10387 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10388 && neon_bits_same_in_bytes (immlo))
10389 {
10390 /* Check this one first so we don't have to bother with immhi in later
10391 tests. */
10392 if (*op == 1)
10393 return FAIL;
10394 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10395 *op = 1;
10396 return 0xe;
10397 }
10398 else if (immhi != 0)
10399 return FAIL;
10400 else if (immlo == (immlo & 0x000000ff))
10401 {
10402 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10403 immediate. */
10404 if ((size != 8 && size != 16 && size != 32)
10405 || (size == 8 && *op == 1))
10406 return FAIL;
10407 *immbits = immlo;
10408 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10409 }
10410 else if (immlo == (immlo & 0x0000ff00))
10411 {
10412 if (size != 16 && size != 32)
10413 return FAIL;
10414 *immbits = immlo >> 8;
10415 return (size == 16) ? 0xa : 0x2;
10416 }
10417 else if (immlo == (immlo & 0x00ff0000))
10418 {
10419 if (size != 32)
10420 return FAIL;
10421 *immbits = immlo >> 16;
10422 return 0x4;
10423 }
10424 else if (immlo == (immlo & 0xff000000))
10425 {
10426 if (size != 32)
10427 return FAIL;
10428 *immbits = immlo >> 24;
10429 return 0x6;
10430 }
10431 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10432 {
10433 if (size != 32)
10434 return FAIL;
10435 *immbits = (immlo >> 8) & 0xff;
10436 return 0xc;
10437 }
10438 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10439 {
10440 if (size != 32)
10441 return FAIL;
10442 *immbits = (immlo >> 16) & 0xff;
10443 return 0xd;
10444 }
10445
10446 return FAIL;
10447 }
10448
10449 /* Write immediate bits [7:0] to the following locations:
10450
10451 |28/24|23 19|18 16|15 4|3 0|
10452 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10453
10454 This function is used by VMOV/VMVN/VORR/VBIC. */
10455
10456 static void
10457 neon_write_immbits (unsigned immbits)
10458 {
10459 inst.instruction |= immbits & 0xf;
10460 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10461 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10462 }
10463
10464 /* Invert low-order SIZE bits of XHI:XLO. */
10465
10466 static void
10467 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10468 {
10469 unsigned immlo = xlo ? *xlo : 0;
10470 unsigned immhi = xhi ? *xhi : 0;
10471
10472 switch (size)
10473 {
10474 case 8:
10475 immlo = (~immlo) & 0xff;
10476 break;
10477
10478 case 16:
10479 immlo = (~immlo) & 0xffff;
10480 break;
10481
10482 case 64:
10483 immhi = (~immhi) & 0xffffffff;
10484 /* fall through. */
10485
10486 case 32:
10487 immlo = (~immlo) & 0xffffffff;
10488 break;
10489
10490 default:
10491 abort ();
10492 }
10493
10494 if (xlo)
10495 *xlo = immlo;
10496
10497 if (xhi)
10498 *xhi = immhi;
10499 }
10500
10501 static void
10502 do_neon_logic (void)
10503 {
10504 if (inst.operands[2].present && inst.operands[2].isreg)
10505 {
10506 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10507 neon_check_type (3, rs, N_IGNORE_TYPE);
10508 /* U bit and size field were set as part of the bitmask. */
10509 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10510 neon_three_same (rs == NS_QQQ, 0, -1);
10511 }
10512 else
10513 {
10514 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10515 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10516 | N_I64 | N_F32);
10517 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10518 unsigned immbits;
10519 int cmode;
10520
10521 if (et.type == NT_invtype)
10522 return;
10523
10524 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10525
10526 switch (opcode)
10527 {
10528 case N_MNEM_vbic:
10529 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10530 et.size);
10531 break;
10532
10533 case N_MNEM_vorr:
10534 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10535 et.size);
10536 break;
10537
10538 case N_MNEM_vand:
10539 /* Pseudo-instruction for VBIC. */
10540 immbits = inst.operands[1].imm;
10541 neon_invert_size (&immbits, 0, et.size);
10542 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10543 break;
10544
10545 case N_MNEM_vorn:
10546 /* Pseudo-instruction for VORR. */
10547 immbits = inst.operands[1].imm;
10548 neon_invert_size (&immbits, 0, et.size);
10549 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10550 break;
10551
10552 default:
10553 abort ();
10554 }
10555
10556 if (cmode == FAIL)
10557 return;
10558
10559 inst.instruction |= (rs == NS_QI) << 6;
10560 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10561 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10562 inst.instruction |= cmode << 8;
10563 neon_write_immbits (immbits);
10564
10565 inst.instruction = neon_dp_fixup (inst.instruction);
10566 }
10567 }
10568
10569 static void
10570 do_neon_bitfield (void)
10571 {
10572 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10573 neon_check_type (3, rs, N_IGNORE_TYPE);
10574 neon_three_same (rs == NS_QQQ, 0, -1);
10575 }
10576
10577 static void
10578 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10579 unsigned destbits)
10580 {
10581 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10582 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10583 types | N_KEY);
10584 if (et.type == NT_float)
10585 {
10586 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10587 neon_three_same (rs == NS_QQQ, 0, -1);
10588 }
10589 else
10590 {
10591 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10592 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10593 }
10594 }
10595
10596 static void
10597 do_neon_dyadic_if_su (void)
10598 {
10599 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10600 }
10601
10602 static void
10603 do_neon_dyadic_if_su_d (void)
10604 {
10605 /* This version only allow D registers, but that constraint is enforced during
10606 operand parsing so we don't need to do anything extra here. */
10607 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10608 }
10609
10610 static void
10611 do_neon_dyadic_if_i (void)
10612 {
10613 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10614 }
10615
10616 static void
10617 do_neon_dyadic_if_i_d (void)
10618 {
10619 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10620 }
10621
10622 static void
10623 do_neon_addsub_if_i (void)
10624 {
10625 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10626 affected if we specify unsigned args. */
10627 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10628 }
10629
10630 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10631 result to be:
10632 V<op> A,B (A is operand 0, B is operand 2)
10633 to mean:
10634 V<op> A,B,A
10635 not:
10636 V<op> A,B,B
10637 so handle that case specially. */
10638
10639 static void
10640 neon_exchange_operands (void)
10641 {
10642 void *scratch = alloca (sizeof (inst.operands[0]));
10643 if (inst.operands[1].present)
10644 {
10645 /* Swap operands[1] and operands[2]. */
10646 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10647 inst.operands[1] = inst.operands[2];
10648 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10649 }
10650 else
10651 {
10652 inst.operands[1] = inst.operands[2];
10653 inst.operands[2] = inst.operands[0];
10654 }
10655 }
10656
10657 static void
10658 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10659 {
10660 if (inst.operands[2].isreg)
10661 {
10662 if (invert)
10663 neon_exchange_operands ();
10664 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10665 }
10666 else
10667 {
10668 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10669 struct neon_type_el et = neon_check_type (2, rs,
10670 N_EQK | N_SIZ, immtypes | N_KEY);
10671
10672 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10673 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10674 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10675 inst.instruction |= LOW4 (inst.operands[1].reg);
10676 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10677 inst.instruction |= (rs == NS_QQI) << 6;
10678 inst.instruction |= (et.type == NT_float) << 10;
10679 inst.instruction |= neon_logbits (et.size) << 18;
10680
10681 inst.instruction = neon_dp_fixup (inst.instruction);
10682 }
10683 }
10684
10685 static void
10686 do_neon_cmp (void)
10687 {
10688 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10689 }
10690
10691 static void
10692 do_neon_cmp_inv (void)
10693 {
10694 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10695 }
10696
10697 static void
10698 do_neon_ceq (void)
10699 {
10700 neon_compare (N_IF_32, N_IF_32, FALSE);
10701 }
10702
10703 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10704 scalars, which are encoded in 5 bits, M : Rm.
10705 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10706 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10707 index in M. */
10708
10709 static unsigned
10710 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10711 {
10712 unsigned regno = NEON_SCALAR_REG (scalar);
10713 unsigned elno = NEON_SCALAR_INDEX (scalar);
10714
10715 switch (elsize)
10716 {
10717 case 16:
10718 if (regno > 7 || elno > 3)
10719 goto bad_scalar;
10720 return regno | (elno << 3);
10721
10722 case 32:
10723 if (regno > 15 || elno > 1)
10724 goto bad_scalar;
10725 return regno | (elno << 4);
10726
10727 default:
10728 bad_scalar:
10729 first_error (_("scalar out of range for multiply instruction"));
10730 }
10731
10732 return 0;
10733 }
10734
10735 /* Encode multiply / multiply-accumulate scalar instructions. */
10736
10737 static void
10738 neon_mul_mac (struct neon_type_el et, int ubit)
10739 {
10740 unsigned scalar;
10741
10742 /* Give a more helpful error message if we have an invalid type. */
10743 if (et.type == NT_invtype)
10744 return;
10745
10746 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10747 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10748 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10749 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10750 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10751 inst.instruction |= LOW4 (scalar);
10752 inst.instruction |= HI1 (scalar) << 5;
10753 inst.instruction |= (et.type == NT_float) << 8;
10754 inst.instruction |= neon_logbits (et.size) << 20;
10755 inst.instruction |= (ubit != 0) << 24;
10756
10757 inst.instruction = neon_dp_fixup (inst.instruction);
10758 }
10759
10760 static void
10761 do_neon_mac_maybe_scalar (void)
10762 {
10763 if (inst.operands[2].isscalar)
10764 {
10765 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10766 struct neon_type_el et = neon_check_type (3, rs,
10767 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10768 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10769 neon_mul_mac (et, rs == NS_QQS);
10770 }
10771 else
10772 do_neon_dyadic_if_i ();
10773 }
10774
10775 static void
10776 do_neon_tst (void)
10777 {
10778 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10779 struct neon_type_el et = neon_check_type (3, rs,
10780 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10781 neon_three_same (rs == NS_QQQ, 0, et.size);
10782 }
10783
10784 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10785 same types as the MAC equivalents. The polynomial type for this instruction
10786 is encoded the same as the integer type. */
10787
10788 static void
10789 do_neon_mul (void)
10790 {
10791 if (inst.operands[2].isscalar)
10792 do_neon_mac_maybe_scalar ();
10793 else
10794 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10795 }
10796
10797 static void
10798 do_neon_qdmulh (void)
10799 {
10800 if (inst.operands[2].isscalar)
10801 {
10802 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10803 struct neon_type_el et = neon_check_type (3, rs,
10804 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10805 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10806 neon_mul_mac (et, rs == NS_QQS);
10807 }
10808 else
10809 {
10810 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10811 struct neon_type_el et = neon_check_type (3, rs,
10812 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10813 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10814 /* The U bit (rounding) comes from bit mask. */
10815 neon_three_same (rs == NS_QQQ, 0, et.size);
10816 }
10817 }
10818
10819 static void
10820 do_neon_fcmp_absolute (void)
10821 {
10822 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10823 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10824 /* Size field comes from bit mask. */
10825 neon_three_same (rs == NS_QQQ, 1, -1);
10826 }
10827
10828 static void
10829 do_neon_fcmp_absolute_inv (void)
10830 {
10831 neon_exchange_operands ();
10832 do_neon_fcmp_absolute ();
10833 }
10834
10835 static void
10836 do_neon_step (void)
10837 {
10838 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10839 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10840 neon_three_same (rs == NS_QQQ, 0, -1);
10841 }
10842
10843 static void
10844 do_neon_abs_neg (void)
10845 {
10846 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10847 struct neon_type_el et = neon_check_type (3, rs,
10848 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10849 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10850 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10851 inst.instruction |= LOW4 (inst.operands[1].reg);
10852 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10853 inst.instruction |= (rs == NS_QQ) << 6;
10854 inst.instruction |= (et.type == NT_float) << 10;
10855 inst.instruction |= neon_logbits (et.size) << 18;
10856
10857 inst.instruction = neon_dp_fixup (inst.instruction);
10858 }
10859
10860 static void
10861 do_neon_sli (void)
10862 {
10863 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10864 struct neon_type_el et = neon_check_type (2, rs,
10865 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10866 int imm = inst.operands[2].imm;
10867 constraint (imm < 0 || (unsigned)imm >= et.size,
10868 _("immediate out of range for insert"));
10869 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10870 }
10871
10872 static void
10873 do_neon_sri (void)
10874 {
10875 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10876 struct neon_type_el et = neon_check_type (2, rs,
10877 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10878 int imm = inst.operands[2].imm;
10879 constraint (imm < 1 || (unsigned)imm > et.size,
10880 _("immediate out of range for insert"));
10881 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10882 }
10883
10884 static void
10885 do_neon_qshlu_imm (void)
10886 {
10887 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10888 struct neon_type_el et = neon_check_type (2, rs,
10889 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10890 int imm = inst.operands[2].imm;
10891 constraint (imm < 0 || (unsigned)imm >= et.size,
10892 _("immediate out of range for shift"));
10893 /* Only encodes the 'U present' variant of the instruction.
10894 In this case, signed types have OP (bit 8) set to 0.
10895 Unsigned types have OP set to 1. */
10896 inst.instruction |= (et.type == NT_unsigned) << 8;
10897 /* The rest of the bits are the same as other immediate shifts. */
10898 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10899 }
10900
10901 static void
10902 do_neon_qmovn (void)
10903 {
10904 struct neon_type_el et = neon_check_type (2, NS_DQ,
10905 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10906 /* Saturating move where operands can be signed or unsigned, and the
10907 destination has the same signedness. */
10908 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10909 if (et.type == NT_unsigned)
10910 inst.instruction |= 0xc0;
10911 else
10912 inst.instruction |= 0x80;
10913 neon_two_same (0, 1, et.size / 2);
10914 }
10915
10916 static void
10917 do_neon_qmovun (void)
10918 {
10919 struct neon_type_el et = neon_check_type (2, NS_DQ,
10920 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10921 /* Saturating move with unsigned results. Operands must be signed. */
10922 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10923 neon_two_same (0, 1, et.size / 2);
10924 }
10925
10926 static void
10927 do_neon_rshift_sat_narrow (void)
10928 {
10929 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10930 or unsigned. If operands are unsigned, results must also be unsigned. */
10931 struct neon_type_el et = neon_check_type (2, NS_DQI,
10932 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10933 int imm = inst.operands[2].imm;
10934 /* This gets the bounds check, size encoding and immediate bits calculation
10935 right. */
10936 et.size /= 2;
10937
10938 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10939 VQMOVN.I<size> <Dd>, <Qm>. */
10940 if (imm == 0)
10941 {
10942 inst.operands[2].present = 0;
10943 inst.instruction = N_MNEM_vqmovn;
10944 do_neon_qmovn ();
10945 return;
10946 }
10947
10948 constraint (imm < 1 || (unsigned)imm > et.size,
10949 _("immediate out of range"));
10950 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10951 }
10952
10953 static void
10954 do_neon_rshift_sat_narrow_u (void)
10955 {
10956 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10957 or unsigned. If operands are unsigned, results must also be unsigned. */
10958 struct neon_type_el et = neon_check_type (2, NS_DQI,
10959 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10960 int imm = inst.operands[2].imm;
10961 /* This gets the bounds check, size encoding and immediate bits calculation
10962 right. */
10963 et.size /= 2;
10964
10965 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10966 VQMOVUN.I<size> <Dd>, <Qm>. */
10967 if (imm == 0)
10968 {
10969 inst.operands[2].present = 0;
10970 inst.instruction = N_MNEM_vqmovun;
10971 do_neon_qmovun ();
10972 return;
10973 }
10974
10975 constraint (imm < 1 || (unsigned)imm > et.size,
10976 _("immediate out of range"));
10977 /* FIXME: The manual is kind of unclear about what value U should have in
10978 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10979 must be 1. */
10980 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10981 }
10982
10983 static void
10984 do_neon_movn (void)
10985 {
10986 struct neon_type_el et = neon_check_type (2, NS_DQ,
10987 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10988 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10989 neon_two_same (0, 1, et.size / 2);
10990 }
10991
10992 static void
10993 do_neon_rshift_narrow (void)
10994 {
10995 struct neon_type_el et = neon_check_type (2, NS_DQI,
10996 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10997 int imm = inst.operands[2].imm;
10998 /* This gets the bounds check, size encoding and immediate bits calculation
10999 right. */
11000 et.size /= 2;
11001
11002 /* If immediate is zero then we are a pseudo-instruction for
11003 VMOVN.I<size> <Dd>, <Qm> */
11004 if (imm == 0)
11005 {
11006 inst.operands[2].present = 0;
11007 inst.instruction = N_MNEM_vmovn;
11008 do_neon_movn ();
11009 return;
11010 }
11011
11012 constraint (imm < 1 || (unsigned)imm > et.size,
11013 _("immediate out of range for narrowing operation"));
11014 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
11015 }
11016
11017 static void
11018 do_neon_shll (void)
11019 {
11020 /* FIXME: Type checking when lengthening. */
11021 struct neon_type_el et = neon_check_type (2, NS_QDI,
11022 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
11023 unsigned imm = inst.operands[2].imm;
11024
11025 if (imm == et.size)
11026 {
11027 /* Maximum shift variant. */
11028 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11029 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11030 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11031 inst.instruction |= LOW4 (inst.operands[1].reg);
11032 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11033 inst.instruction |= neon_logbits (et.size) << 18;
11034
11035 inst.instruction = neon_dp_fixup (inst.instruction);
11036 }
11037 else
11038 {
11039 /* A more-specific type check for non-max versions. */
11040 et = neon_check_type (2, NS_QDI,
11041 N_EQK | N_DBL, N_SU_32 | N_KEY);
11042 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11043 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
11044 }
11045 }
11046
11047 /* Check the various types for the VCVT instruction, and return the one that
11048 the current instruction is. */
11049
11050 static int
11051 neon_cvt_flavour (enum neon_shape rs)
11052 {
11053 #define CVT_VAR(C,X,Y) \
11054 et = neon_check_type (2, rs, (X), (Y)); \
11055 if (et.type != NT_invtype) \
11056 { \
11057 inst.error = NULL; \
11058 return (C); \
11059 }
11060 struct neon_type_el et;
11061
11062 CVT_VAR (0, N_S32, N_F32);
11063 CVT_VAR (1, N_U32, N_F32);
11064 CVT_VAR (2, N_F32, N_S32);
11065 CVT_VAR (3, N_F32, N_U32);
11066
11067 return -1;
11068 #undef CVT_VAR
11069 }
11070
11071 static void
11072 do_neon_cvt (void)
11073 {
11074 /* Fixed-point conversion with #0 immediate is encoded as an integer
11075 conversion. */
11076 if (inst.operands[2].present && inst.operands[2].imm != 0)
11077 {
11078 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11079 int flavour = neon_cvt_flavour (rs);
11080 unsigned immbits = 32 - inst.operands[2].imm;
11081 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11082 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11083 if (flavour != -1)
11084 inst.instruction |= enctab[flavour];
11085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11087 inst.instruction |= LOW4 (inst.operands[1].reg);
11088 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11089 inst.instruction |= (rs == NS_QQI) << 6;
11090 inst.instruction |= 1 << 21;
11091 inst.instruction |= immbits << 16;
11092 }
11093 else
11094 {
11095 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11096 int flavour = neon_cvt_flavour (rs);
11097 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11098 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11099 if (flavour != -1)
11100 inst.instruction |= enctab[flavour];
11101 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11102 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11103 inst.instruction |= LOW4 (inst.operands[1].reg);
11104 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11105 inst.instruction |= (rs == NS_QQ) << 6;
11106 inst.instruction |= 2 << 18;
11107 }
11108 inst.instruction = neon_dp_fixup (inst.instruction);
11109 }
11110
11111 static void
11112 neon_move_immediate (void)
11113 {
11114 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11115 struct neon_type_el et = neon_check_type (1, rs,
11116 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11117 unsigned immlo, immhi = 0, immbits;
11118 int op, cmode;
11119
11120 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11121 op = (inst.instruction & (1 << 5)) != 0;
11122
11123 immlo = inst.operands[1].imm;
11124 if (inst.operands[1].regisimm)
11125 immhi = inst.operands[1].reg;
11126
11127 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11128 _("immediate has bits set outside the operand size"));
11129
11130 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11131 et.size, et.type)) == FAIL)
11132 {
11133 /* Invert relevant bits only. */
11134 neon_invert_size (&immlo, &immhi, et.size);
11135 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11136 with one or the other; those cases are caught by
11137 neon_cmode_for_move_imm. */
11138 op = !op;
11139 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11140 et.size, et.type)) == FAIL)
11141 {
11142 first_error (_("immediate out of range"));
11143 return;
11144 }
11145 }
11146
11147 inst.instruction &= ~(1 << 5);
11148 inst.instruction |= op << 5;
11149
11150 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11151 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11152 inst.instruction |= (rs == NS_QI) << 6;
11153 inst.instruction |= cmode << 8;
11154
11155 neon_write_immbits (immbits);
11156 }
11157
11158 static void
11159 do_neon_mvn (void)
11160 {
11161 if (inst.operands[1].isreg)
11162 {
11163 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11164
11165 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11166 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11167 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11168 inst.instruction |= LOW4 (inst.operands[1].reg);
11169 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11170 inst.instruction |= (rs == NS_QQ) << 6;
11171 }
11172 else
11173 {
11174 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11175 neon_move_immediate ();
11176 }
11177
11178 inst.instruction = neon_dp_fixup (inst.instruction);
11179 }
11180
11181 /* Encode instructions of form:
11182
11183 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11184 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11185
11186 */
11187
11188 static void
11189 neon_mixed_length (struct neon_type_el et, unsigned size)
11190 {
11191 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11192 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11193 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11194 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11195 inst.instruction |= LOW4 (inst.operands[2].reg);
11196 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11197 inst.instruction |= (et.type == NT_unsigned) << 24;
11198 inst.instruction |= neon_logbits (size) << 20;
11199
11200 inst.instruction = neon_dp_fixup (inst.instruction);
11201 }
11202
11203 static void
11204 do_neon_dyadic_long (void)
11205 {
11206 /* FIXME: Type checking for lengthening op. */
11207 struct neon_type_el et = neon_check_type (3, NS_QDD,
11208 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11209 neon_mixed_length (et, et.size);
11210 }
11211
11212 static void
11213 do_neon_abal (void)
11214 {
11215 struct neon_type_el et = neon_check_type (3, NS_QDD,
11216 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11217 neon_mixed_length (et, et.size);
11218 }
11219
11220 static void
11221 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11222 {
11223 if (inst.operands[2].isscalar)
11224 {
11225 struct neon_type_el et = neon_check_type (3, NS_QDS,
11226 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11227 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11228 neon_mul_mac (et, et.type == NT_unsigned);
11229 }
11230 else
11231 {
11232 struct neon_type_el et = neon_check_type (3, NS_QDD,
11233 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11234 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11235 neon_mixed_length (et, et.size);
11236 }
11237 }
11238
11239 static void
11240 do_neon_mac_maybe_scalar_long (void)
11241 {
11242 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11243 }
11244
11245 static void
11246 do_neon_dyadic_wide (void)
11247 {
11248 struct neon_type_el et = neon_check_type (3, NS_QQD,
11249 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11250 neon_mixed_length (et, et.size);
11251 }
11252
11253 static void
11254 do_neon_dyadic_narrow (void)
11255 {
11256 struct neon_type_el et = neon_check_type (3, NS_QDD,
11257 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11258 neon_mixed_length (et, et.size / 2);
11259 }
11260
11261 static void
11262 do_neon_mul_sat_scalar_long (void)
11263 {
11264 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11265 }
11266
11267 static void
11268 do_neon_vmull (void)
11269 {
11270 if (inst.operands[2].isscalar)
11271 do_neon_mac_maybe_scalar_long ();
11272 else
11273 {
11274 struct neon_type_el et = neon_check_type (3, NS_QDD,
11275 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11276 if (et.type == NT_poly)
11277 inst.instruction = NEON_ENC_POLY (inst.instruction);
11278 else
11279 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11280 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11281 zero. Should be OK as-is. */
11282 neon_mixed_length (et, et.size);
11283 }
11284 }
11285
11286 static void
11287 do_neon_ext (void)
11288 {
11289 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11290 struct neon_type_el et = neon_check_type (3, rs,
11291 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11292 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11293 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11294 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11295 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11296 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11297 inst.instruction |= LOW4 (inst.operands[2].reg);
11298 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11299 inst.instruction |= (rs == NS_QQQI) << 6;
11300 inst.instruction |= imm << 8;
11301
11302 inst.instruction = neon_dp_fixup (inst.instruction);
11303 }
11304
11305 static void
11306 do_neon_rev (void)
11307 {
11308 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11309 struct neon_type_el et = neon_check_type (2, rs,
11310 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11311 unsigned op = (inst.instruction >> 7) & 3;
11312 /* N (width of reversed regions) is encoded as part of the bitmask. We
11313 extract it here to check the elements to be reversed are smaller.
11314 Otherwise we'd get a reserved instruction. */
11315 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11316 assert (elsize != 0);
11317 constraint (et.size >= elsize,
11318 _("elements must be smaller than reversal region"));
11319 neon_two_same (rs == NS_QQ, 1, et.size);
11320 }
11321
11322 static void
11323 do_neon_dup (void)
11324 {
11325 if (inst.operands[1].isscalar)
11326 {
11327 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11328 struct neon_type_el et = neon_check_type (2, rs,
11329 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11330 unsigned sizebits = et.size >> 3;
11331 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11332 int logsize = neon_logbits (et.size);
11333 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11334 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11335 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11336 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11337 inst.instruction |= LOW4 (dm);
11338 inst.instruction |= HI1 (dm) << 5;
11339 inst.instruction |= (rs == NS_QS) << 6;
11340 inst.instruction |= x << 17;
11341 inst.instruction |= sizebits << 16;
11342
11343 inst.instruction = neon_dp_fixup (inst.instruction);
11344 }
11345 else
11346 {
11347 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11348 struct neon_type_el et = neon_check_type (1, rs,
11349 N_8 | N_16 | N_32 | N_KEY);
11350 unsigned save_cond = inst.instruction & 0xf0000000;
11351 /* Duplicate ARM register to lanes of vector. */
11352 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11353 switch (et.size)
11354 {
11355 case 8: inst.instruction |= 0x400000; break;
11356 case 16: inst.instruction |= 0x000020; break;
11357 case 32: inst.instruction |= 0x000000; break;
11358 default: break;
11359 }
11360 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11361 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11362 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11363 inst.instruction |= (rs == NS_QR) << 21;
11364 /* The encoding for this instruction is identical for the ARM and Thumb
11365 variants, except for the condition field. */
11366 if (thumb_mode)
11367 inst.instruction |= 0xe0000000;
11368 else
11369 inst.instruction |= save_cond;
11370 }
11371 }
11372
11373 /* VMOV has particularly many variations. It can be one of:
11374 0. VMOV<c><q> <Qd>, <Qm>
11375 1. VMOV<c><q> <Dd>, <Dm>
11376 (Register operations, which are VORR with Rm = Rn.)
11377 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11378 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11379 (Immediate loads.)
11380 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11381 (ARM register to scalar.)
11382 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11383 (Two ARM registers to vector.)
11384 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11385 (Scalar to ARM register.)
11386 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11387 (Vector to two ARM registers.)
11388
11389 We should have just enough information to be able to disambiguate most of
11390 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11391 registers" cases. For these, abuse the .regisimm operand field to signify a
11392 Neon register.
11393
11394 All the encoded bits are hardcoded by this function.
11395
11396 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
11397 Cases 5, 7 may be used with VFPv2 and above.
11398
11399 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11400 can specify a type where it doesn't make sense to, and is ignored).
11401 */
11402
11403 static void
11404 do_neon_mov (void)
11405 {
11406 int nargs = inst.operands[0].present + inst.operands[1].present
11407 + inst.operands[2].present;
11408 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11409 const char *vfp_vers = "selected FPU does not support instruction";
11410
11411 switch (nargs)
11412 {
11413 case 2:
11414 /* Cases 0, 1, 2, 3, 4, 6. */
11415 if (inst.operands[1].isscalar)
11416 {
11417 /* Case 6. */
11418 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11419 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11420 unsigned logsize = neon_logbits (et.size);
11421 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11422 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11423 unsigned abcdebits = 0;
11424
11425 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11426 _(vfp_vers));
11427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11428 && et.size != 32, _(vfp_vers));
11429 constraint (et.type == NT_invtype, _("bad type for scalar"));
11430 constraint (x >= 64 / et.size, _("scalar index out of range"));
11431
11432 switch (et.size)
11433 {
11434 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11435 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11436 case 32: abcdebits = 0x00; break;
11437 default: ;
11438 }
11439
11440 abcdebits |= x << logsize;
11441 inst.instruction = save_cond;
11442 inst.instruction |= 0xe100b10;
11443 inst.instruction |= LOW4 (dn) << 16;
11444 inst.instruction |= HI1 (dn) << 7;
11445 inst.instruction |= inst.operands[0].reg << 12;
11446 inst.instruction |= (abcdebits & 3) << 5;
11447 inst.instruction |= (abcdebits >> 2) << 21;
11448 }
11449 else if (inst.operands[1].isreg)
11450 {
11451 /* Cases 0, 1, 4. */
11452 if (inst.operands[0].isscalar)
11453 {
11454 /* Case 4. */
11455 unsigned bcdebits = 0;
11456 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11457 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11458 int logsize = neon_logbits (et.size);
11459 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11460 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11461
11462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11463 _(vfp_vers));
11464 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11465 && et.size != 32, _(vfp_vers));
11466 constraint (et.type == NT_invtype, _("bad type for scalar"));
11467 constraint (x >= 64 / et.size, _("scalar index out of range"));
11468
11469 switch (et.size)
11470 {
11471 case 8: bcdebits = 0x8; break;
11472 case 16: bcdebits = 0x1; break;
11473 case 32: bcdebits = 0x0; break;
11474 default: ;
11475 }
11476
11477 bcdebits |= x << logsize;
11478 inst.instruction = save_cond;
11479 inst.instruction |= 0xe000b10;
11480 inst.instruction |= LOW4 (dn) << 16;
11481 inst.instruction |= HI1 (dn) << 7;
11482 inst.instruction |= inst.operands[1].reg << 12;
11483 inst.instruction |= (bcdebits & 3) << 5;
11484 inst.instruction |= (bcdebits >> 2) << 21;
11485 }
11486 else
11487 {
11488 /* Cases 0, 1. */
11489 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11490 /* The architecture manual I have doesn't explicitly state which
11491 value the U bit should have for register->register moves, but
11492 the equivalent VORR instruction has U = 0, so do that. */
11493 inst.instruction = 0x0200110;
11494 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11495 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11496 inst.instruction |= LOW4 (inst.operands[1].reg);
11497 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11498 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11499 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11500 inst.instruction |= (rs == NS_QQ) << 6;
11501
11502 inst.instruction = neon_dp_fixup (inst.instruction);
11503 }
11504 }
11505 else
11506 {
11507 /* Cases 2, 3. */
11508 inst.instruction = 0x0800010;
11509 neon_move_immediate ();
11510 inst.instruction = neon_dp_fixup (inst.instruction);
11511 }
11512 break;
11513
11514 case 3:
11515 /* Cases 5, 7. */
11516 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
11517 _(vfp_vers));
11518
11519 if (inst.operands[0].regisimm)
11520 {
11521 /* Case 5. */
11522 inst.instruction = save_cond;
11523 inst.instruction |= 0xc400b10;
11524 inst.instruction |= LOW4 (inst.operands[0].reg);
11525 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11526 inst.instruction |= inst.operands[1].reg << 12;
11527 inst.instruction |= inst.operands[2].reg << 16;
11528 }
11529 else
11530 {
11531 /* Case 7. */
11532 inst.instruction = save_cond;
11533 inst.instruction |= 0xc500b10;
11534 inst.instruction |= inst.operands[0].reg << 12;
11535 inst.instruction |= inst.operands[1].reg << 16;
11536 inst.instruction |= LOW4 (inst.operands[2].reg);
11537 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11538 }
11539 break;
11540
11541 default:
11542 abort ();
11543 }
11544 }
11545
11546 static void
11547 do_neon_rshift_round_imm (void)
11548 {
11549 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11550 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11551 int imm = inst.operands[2].imm;
11552
11553 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11554 if (imm == 0)
11555 {
11556 inst.operands[2].present = 0;
11557 do_neon_mov ();
11558 return;
11559 }
11560
11561 constraint (imm < 1 || (unsigned)imm > et.size,
11562 _("immediate out of range for shift"));
11563 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11564 et.size - imm);
11565 }
11566
11567 static void
11568 do_neon_movl (void)
11569 {
11570 struct neon_type_el et = neon_check_type (2, NS_QD,
11571 N_EQK | N_DBL, N_SU_32 | N_KEY);
11572 unsigned sizebits = et.size >> 3;
11573 inst.instruction |= sizebits << 19;
11574 neon_two_same (0, et.type == NT_unsigned, -1);
11575 }
11576
11577 static void
11578 do_neon_trn (void)
11579 {
11580 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11581 struct neon_type_el et = neon_check_type (2, rs,
11582 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11583 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11584 neon_two_same (rs == NS_QQ, 1, et.size);
11585 }
11586
11587 static void
11588 do_neon_zip_uzp (void)
11589 {
11590 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11591 struct neon_type_el et = neon_check_type (2, rs,
11592 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11593 if (rs == NS_DD && et.size == 32)
11594 {
11595 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11596 inst.instruction = N_MNEM_vtrn;
11597 do_neon_trn ();
11598 return;
11599 }
11600 neon_two_same (rs == NS_QQ, 1, et.size);
11601 }
11602
11603 static void
11604 do_neon_sat_abs_neg (void)
11605 {
11606 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11607 struct neon_type_el et = neon_check_type (2, rs,
11608 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11609 neon_two_same (rs == NS_QQ, 1, et.size);
11610 }
11611
11612 static void
11613 do_neon_pair_long (void)
11614 {
11615 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11616 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11617 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11618 inst.instruction |= (et.type == NT_unsigned) << 7;
11619 neon_two_same (rs == NS_QQ, 1, et.size);
11620 }
11621
11622 static void
11623 do_neon_recip_est (void)
11624 {
11625 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11626 struct neon_type_el et = neon_check_type (2, rs,
11627 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11628 inst.instruction |= (et.type == NT_float) << 8;
11629 neon_two_same (rs == NS_QQ, 1, et.size);
11630 }
11631
11632 static void
11633 do_neon_cls (void)
11634 {
11635 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11636 struct neon_type_el et = neon_check_type (2, rs,
11637 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11638 neon_two_same (rs == NS_QQ, 1, et.size);
11639 }
11640
11641 static void
11642 do_neon_clz (void)
11643 {
11644 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11645 struct neon_type_el et = neon_check_type (2, rs,
11646 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11647 neon_two_same (rs == NS_QQ, 1, et.size);
11648 }
11649
11650 static void
11651 do_neon_cnt (void)
11652 {
11653 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11654 struct neon_type_el et = neon_check_type (2, rs,
11655 N_EQK | N_INT, N_8 | N_KEY);
11656 neon_two_same (rs == NS_QQ, 1, et.size);
11657 }
11658
11659 static void
11660 do_neon_swp (void)
11661 {
11662 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11663 neon_two_same (rs == NS_QQ, 1, -1);
11664 }
11665
11666 static void
11667 do_neon_tbl_tbx (void)
11668 {
11669 unsigned listlenbits;
11670 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11671
11672 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11673 {
11674 first_error (_("bad list length for table lookup"));
11675 return;
11676 }
11677
11678 listlenbits = inst.operands[1].imm - 1;
11679 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11680 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11681 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11682 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11683 inst.instruction |= LOW4 (inst.operands[2].reg);
11684 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11685 inst.instruction |= listlenbits << 8;
11686
11687 inst.instruction = neon_dp_fixup (inst.instruction);
11688 }
11689
11690 static void
11691 do_neon_ldm_stm (void)
11692 {
11693 /* P, U and L bits are part of bitmask. */
11694 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11695 unsigned offsetbits = inst.operands[1].imm * 2;
11696
11697 constraint (is_dbmode && !inst.operands[0].writeback,
11698 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11699
11700 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11701 _("register list must contain at least 1 and at most 16 "
11702 "registers"));
11703
11704 inst.instruction |= inst.operands[0].reg << 16;
11705 inst.instruction |= inst.operands[0].writeback << 21;
11706 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11707 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11708
11709 inst.instruction |= offsetbits;
11710
11711 if (thumb_mode)
11712 inst.instruction |= 0xe0000000;
11713 }
11714
11715 static void
11716 do_neon_ldr_str (void)
11717 {
11718 unsigned offsetbits;
11719 int offset_up = 1;
11720 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11721
11722 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11723 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11724
11725 constraint (inst.reloc.pc_rel && !is_ldr,
11726 _("PC-relative addressing unavailable with VSTR"));
11727
11728 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11729 _("Immediate value must be a constant"));
11730
11731 if (inst.reloc.exp.X_add_number < 0)
11732 {
11733 offset_up = 0;
11734 offsetbits = -inst.reloc.exp.X_add_number / 4;
11735 }
11736 else
11737 offsetbits = inst.reloc.exp.X_add_number / 4;
11738
11739 /* FIXME: Does this catch everything? */
11740 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11741 || inst.operands[1].postind || inst.operands[1].writeback
11742 || inst.operands[1].immisreg || inst.operands[1].shifted,
11743 BAD_ADDR_MODE);
11744 constraint ((inst.operands[1].imm & 3) != 0,
11745 _("Offset must be a multiple of 4"));
11746 constraint (offsetbits != (offsetbits & 0xff),
11747 _("Immediate offset out of range"));
11748
11749 inst.instruction |= inst.operands[1].reg << 16;
11750 inst.instruction |= offsetbits & 0xff;
11751 inst.instruction |= offset_up << 23;
11752
11753 if (thumb_mode)
11754 inst.instruction |= 0xe0000000;
11755
11756 if (inst.reloc.pc_rel)
11757 {
11758 if (thumb_mode)
11759 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11760 else
11761 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11762 }
11763 else
11764 inst.reloc.type = BFD_RELOC_UNUSED;
11765 }
11766
11767 /* "interleave" version also handles non-interleaving register VLD1/VST1
11768 instructions. */
11769
11770 static void
11771 do_neon_ld_st_interleave (void)
11772 {
11773 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11774 N_8 | N_16 | N_32 | N_64);
11775 unsigned alignbits = 0;
11776 unsigned idx;
11777 /* The bits in this table go:
11778 0: register stride of one (0) or two (1)
11779 1,2: register list length, minus one (1, 2, 3, 4).
11780 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11781 We use -1 for invalid entries. */
11782 const int typetable[] =
11783 {
11784 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11785 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11786 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11787 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11788 };
11789 int typebits;
11790
11791 if (et.type == NT_invtype)
11792 return;
11793
11794 if (inst.operands[1].immisalign)
11795 switch (inst.operands[1].imm >> 8)
11796 {
11797 case 64: alignbits = 1; break;
11798 case 128:
11799 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11800 goto bad_alignment;
11801 alignbits = 2;
11802 break;
11803 case 256:
11804 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11805 goto bad_alignment;
11806 alignbits = 3;
11807 break;
11808 default:
11809 bad_alignment:
11810 first_error (_("bad alignment"));
11811 return;
11812 }
11813
11814 inst.instruction |= alignbits << 4;
11815 inst.instruction |= neon_logbits (et.size) << 6;
11816
11817 /* Bits [4:6] of the immediate in a list specifier encode register stride
11818 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11819 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11820 up the right value for "type" in a table based on this value and the given
11821 list style, then stick it back. */
11822 idx = ((inst.operands[0].imm >> 4) & 7)
11823 | (((inst.instruction >> 8) & 3) << 3);
11824
11825 typebits = typetable[idx];
11826
11827 constraint (typebits == -1, _("bad list type for instruction"));
11828
11829 inst.instruction &= ~0xf00;
11830 inst.instruction |= typebits << 8;
11831 }
11832
11833 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11834 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11835 otherwise. The variable arguments are a list of pairs of legal (size, align)
11836 values, terminated with -1. */
11837
11838 static int
11839 neon_alignment_bit (int size, int align, int *do_align, ...)
11840 {
11841 va_list ap;
11842 int result = FAIL, thissize, thisalign;
11843
11844 if (!inst.operands[1].immisalign)
11845 {
11846 *do_align = 0;
11847 return SUCCESS;
11848 }
11849
11850 va_start (ap, do_align);
11851
11852 do
11853 {
11854 thissize = va_arg (ap, int);
11855 if (thissize == -1)
11856 break;
11857 thisalign = va_arg (ap, int);
11858
11859 if (size == thissize && align == thisalign)
11860 result = SUCCESS;
11861 }
11862 while (result != SUCCESS);
11863
11864 va_end (ap);
11865
11866 if (result == SUCCESS)
11867 *do_align = 1;
11868 else
11869 first_error (_("unsupported alignment for instruction"));
11870
11871 return result;
11872 }
11873
11874 static void
11875 do_neon_ld_st_lane (void)
11876 {
11877 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11878 int align_good, do_align = 0;
11879 int logsize = neon_logbits (et.size);
11880 int align = inst.operands[1].imm >> 8;
11881 int n = (inst.instruction >> 8) & 3;
11882 int max_el = 64 / et.size;
11883
11884 if (et.type == NT_invtype)
11885 return;
11886
11887 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11888 _("bad list length"));
11889 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11890 _("scalar index out of range"));
11891 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11892 && et.size == 8,
11893 _("stride of 2 unavailable when element size is 8"));
11894
11895 switch (n)
11896 {
11897 case 0: /* VLD1 / VST1. */
11898 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11899 32, 32, -1);
11900 if (align_good == FAIL)
11901 return;
11902 if (do_align)
11903 {
11904 unsigned alignbits = 0;
11905 switch (et.size)
11906 {
11907 case 16: alignbits = 0x1; break;
11908 case 32: alignbits = 0x3; break;
11909 default: ;
11910 }
11911 inst.instruction |= alignbits << 4;
11912 }
11913 break;
11914
11915 case 1: /* VLD2 / VST2. */
11916 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11917 32, 64, -1);
11918 if (align_good == FAIL)
11919 return;
11920 if (do_align)
11921 inst.instruction |= 1 << 4;
11922 break;
11923
11924 case 2: /* VLD3 / VST3. */
11925 constraint (inst.operands[1].immisalign,
11926 _("can't use alignment with this instruction"));
11927 break;
11928
11929 case 3: /* VLD4 / VST4. */
11930 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11931 16, 64, 32, 64, 32, 128, -1);
11932 if (align_good == FAIL)
11933 return;
11934 if (do_align)
11935 {
11936 unsigned alignbits = 0;
11937 switch (et.size)
11938 {
11939 case 8: alignbits = 0x1; break;
11940 case 16: alignbits = 0x1; break;
11941 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11942 default: ;
11943 }
11944 inst.instruction |= alignbits << 4;
11945 }
11946 break;
11947
11948 default: ;
11949 }
11950
11951 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11952 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11953 inst.instruction |= 1 << (4 + logsize);
11954
11955 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11956 inst.instruction |= logsize << 10;
11957 }
11958
11959 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11960
11961 static void
11962 do_neon_ld_dup (void)
11963 {
11964 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11965 int align_good, do_align = 0;
11966
11967 if (et.type == NT_invtype)
11968 return;
11969
11970 switch ((inst.instruction >> 8) & 3)
11971 {
11972 case 0: /* VLD1. */
11973 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11974 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11975 &do_align, 16, 16, 32, 32, -1);
11976 if (align_good == FAIL)
11977 return;
11978 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11979 {
11980 case 1: break;
11981 case 2: inst.instruction |= 1 << 5; break;
11982 default: first_error (_("bad list length")); return;
11983 }
11984 inst.instruction |= neon_logbits (et.size) << 6;
11985 break;
11986
11987 case 1: /* VLD2. */
11988 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11989 &do_align, 8, 16, 16, 32, 32, 64, -1);
11990 if (align_good == FAIL)
11991 return;
11992 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11993 _("bad list length"));
11994 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11995 inst.instruction |= 1 << 5;
11996 inst.instruction |= neon_logbits (et.size) << 6;
11997 break;
11998
11999 case 2: /* VLD3. */
12000 constraint (inst.operands[1].immisalign,
12001 _("can't use alignment with this instruction"));
12002 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
12003 _("bad list length"));
12004 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12005 inst.instruction |= 1 << 5;
12006 inst.instruction |= neon_logbits (et.size) << 6;
12007 break;
12008
12009 case 3: /* VLD4. */
12010 {
12011 int align = inst.operands[1].imm >> 8;
12012 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
12013 16, 64, 32, 64, 32, 128, -1);
12014 if (align_good == FAIL)
12015 return;
12016 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
12017 _("bad list length"));
12018 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12019 inst.instruction |= 1 << 5;
12020 if (et.size == 32 && align == 128)
12021 inst.instruction |= 0x3 << 6;
12022 else
12023 inst.instruction |= neon_logbits (et.size) << 6;
12024 }
12025 break;
12026
12027 default: ;
12028 }
12029
12030 inst.instruction |= do_align << 4;
12031 }
12032
12033 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12034 apart from bits [11:4]. */
12035
12036 static void
12037 do_neon_ldx_stx (void)
12038 {
12039 switch (NEON_LANE (inst.operands[0].imm))
12040 {
12041 case NEON_INTERLEAVE_LANES:
12042 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
12043 do_neon_ld_st_interleave ();
12044 break;
12045
12046 case NEON_ALL_LANES:
12047 inst.instruction = NEON_ENC_DUP (inst.instruction);
12048 do_neon_ld_dup ();
12049 break;
12050
12051 default:
12052 inst.instruction = NEON_ENC_LANE (inst.instruction);
12053 do_neon_ld_st_lane ();
12054 }
12055
12056 /* L bit comes from bit mask. */
12057 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12058 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12059 inst.instruction |= inst.operands[1].reg << 16;
12060
12061 if (inst.operands[1].postind)
12062 {
12063 int postreg = inst.operands[1].imm & 0xf;
12064 constraint (!inst.operands[1].immisreg,
12065 _("post-index must be a register"));
12066 constraint (postreg == 0xd || postreg == 0xf,
12067 _("bad register for post-index"));
12068 inst.instruction |= postreg;
12069 }
12070 else if (inst.operands[1].writeback)
12071 {
12072 inst.instruction |= 0xd;
12073 }
12074 else
12075 inst.instruction |= 0xf;
12076
12077 if (thumb_mode)
12078 inst.instruction |= 0xf9000000;
12079 else
12080 inst.instruction |= 0xf4000000;
12081 }
12082
12083 \f
12084 /* Overall per-instruction processing. */
12085
12086 /* We need to be able to fix up arbitrary expressions in some statements.
12087 This is so that we can handle symbols that are an arbitrary distance from
12088 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12089 which returns part of an address in a form which will be valid for
12090 a data instruction. We do this by pushing the expression into a symbol
12091 in the expr_section, and creating a fix for that. */
12092
12093 static void
12094 fix_new_arm (fragS * frag,
12095 int where,
12096 short int size,
12097 expressionS * exp,
12098 int pc_rel,
12099 int reloc)
12100 {
12101 fixS * new_fix;
12102
12103 switch (exp->X_op)
12104 {
12105 case O_constant:
12106 case O_symbol:
12107 case O_add:
12108 case O_subtract:
12109 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12110 break;
12111
12112 default:
12113 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12114 pc_rel, reloc);
12115 break;
12116 }
12117
12118 /* Mark whether the fix is to a THUMB instruction, or an ARM
12119 instruction. */
12120 new_fix->tc_fix_data = thumb_mode;
12121 }
12122
12123 /* Create a frg for an instruction requiring relaxation. */
12124 static void
12125 output_relax_insn (void)
12126 {
12127 char * to;
12128 symbolS *sym;
12129 int offset;
12130
12131 #ifdef OBJ_ELF
12132 /* The size of the instruction is unknown, so tie the debug info to the
12133 start of the instruction. */
12134 dwarf2_emit_insn (0);
12135 #endif
12136
12137 switch (inst.reloc.exp.X_op)
12138 {
12139 case O_symbol:
12140 sym = inst.reloc.exp.X_add_symbol;
12141 offset = inst.reloc.exp.X_add_number;
12142 break;
12143 case O_constant:
12144 sym = NULL;
12145 offset = inst.reloc.exp.X_add_number;
12146 break;
12147 default:
12148 sym = make_expr_symbol (&inst.reloc.exp);
12149 offset = 0;
12150 break;
12151 }
12152 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12153 inst.relax, sym, offset, NULL/*offset, opcode*/);
12154 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12155 }
12156
12157 /* Write a 32-bit thumb instruction to buf. */
12158 static void
12159 put_thumb32_insn (char * buf, unsigned long insn)
12160 {
12161 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12162 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12163 }
12164
12165 static void
12166 output_inst (const char * str)
12167 {
12168 char * to = NULL;
12169
12170 if (inst.error)
12171 {
12172 as_bad ("%s -- `%s'", inst.error, str);
12173 return;
12174 }
12175 if (inst.relax) {
12176 output_relax_insn();
12177 return;
12178 }
12179 if (inst.size == 0)
12180 return;
12181
12182 to = frag_more (inst.size);
12183
12184 if (thumb_mode && (inst.size > THUMB_SIZE))
12185 {
12186 assert (inst.size == (2 * THUMB_SIZE));
12187 put_thumb32_insn (to, inst.instruction);
12188 }
12189 else if (inst.size > INSN_SIZE)
12190 {
12191 assert (inst.size == (2 * INSN_SIZE));
12192 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12193 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12194 }
12195 else
12196 md_number_to_chars (to, inst.instruction, inst.size);
12197
12198 if (inst.reloc.type != BFD_RELOC_UNUSED)
12199 fix_new_arm (frag_now, to - frag_now->fr_literal,
12200 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12201 inst.reloc.type);
12202
12203 #ifdef OBJ_ELF
12204 dwarf2_emit_insn (inst.size);
12205 #endif
12206 }
12207
12208 /* Tag values used in struct asm_opcode's tag field. */
12209 enum opcode_tag
12210 {
12211 OT_unconditional, /* Instruction cannot be conditionalized.
12212 The ARM condition field is still 0xE. */
12213 OT_unconditionalF, /* Instruction cannot be conditionalized
12214 and carries 0xF in its ARM condition field. */
12215 OT_csuffix, /* Instruction takes a conditional suffix. */
12216 OT_cinfix3, /* Instruction takes a conditional infix,
12217 beginning at character index 3. (In
12218 unified mode, it becomes a suffix.) */
12219 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
12220 tsts, cmps, cmns, and teqs. */
12221 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12222 character index 3, even in unified mode. Used for
12223 legacy instructions where suffix and infix forms
12224 may be ambiguous. */
12225 OT_csuf_or_in3, /* Instruction takes either a conditional
12226 suffix or an infix at character index 3. */
12227 OT_odd_infix_unc, /* This is the unconditional variant of an
12228 instruction that takes a conditional infix
12229 at an unusual position. In unified mode,
12230 this variant will accept a suffix. */
12231 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12232 are the conditional variants of instructions that
12233 take conditional infixes in unusual positions.
12234 The infix appears at character index
12235 (tag - OT_odd_infix_0). These are not accepted
12236 in unified mode. */
12237 };
12238
12239 /* Subroutine of md_assemble, responsible for looking up the primary
12240 opcode from the mnemonic the user wrote. STR points to the
12241 beginning of the mnemonic.
12242
12243 This is not simply a hash table lookup, because of conditional
12244 variants. Most instructions have conditional variants, which are
12245 expressed with a _conditional affix_ to the mnemonic. If we were
12246 to encode each conditional variant as a literal string in the opcode
12247 table, it would have approximately 20,000 entries.
12248
12249 Most mnemonics take this affix as a suffix, and in unified syntax,
12250 'most' is upgraded to 'all'. However, in the divided syntax, some
12251 instructions take the affix as an infix, notably the s-variants of
12252 the arithmetic instructions. Of those instructions, all but six
12253 have the infix appear after the third character of the mnemonic.
12254
12255 Accordingly, the algorithm for looking up primary opcodes given
12256 an identifier is:
12257
12258 1. Look up the identifier in the opcode table.
12259 If we find a match, go to step U.
12260
12261 2. Look up the last two characters of the identifier in the
12262 conditions table. If we find a match, look up the first N-2
12263 characters of the identifier in the opcode table. If we
12264 find a match, go to step CE.
12265
12266 3. Look up the fourth and fifth characters of the identifier in
12267 the conditions table. If we find a match, extract those
12268 characters from the identifier, and look up the remaining
12269 characters in the opcode table. If we find a match, go
12270 to step CM.
12271
12272 4. Fail.
12273
12274 U. Examine the tag field of the opcode structure, in case this is
12275 one of the six instructions with its conditional infix in an
12276 unusual place. If it is, the tag tells us where to find the
12277 infix; look it up in the conditions table and set inst.cond
12278 accordingly. Otherwise, this is an unconditional instruction.
12279 Again set inst.cond accordingly. Return the opcode structure.
12280
12281 CE. Examine the tag field to make sure this is an instruction that
12282 should receive a conditional suffix. If it is not, fail.
12283 Otherwise, set inst.cond from the suffix we already looked up,
12284 and return the opcode structure.
12285
12286 CM. Examine the tag field to make sure this is an instruction that
12287 should receive a conditional infix after the third character.
12288 If it is not, fail. Otherwise, undo the edits to the current
12289 line of input and proceed as for case CE. */
12290
12291 static const struct asm_opcode *
12292 opcode_lookup (char **str)
12293 {
12294 char *end, *base;
12295 char *affix;
12296 const struct asm_opcode *opcode;
12297 const struct asm_cond *cond;
12298 char save[2];
12299
12300 /* Scan up to the end of the mnemonic, which must end in white space,
12301 '.' (in unified mode only), or end of string. */
12302 for (base = end = *str; *end != '\0'; end++)
12303 if (*end == ' ' || (unified_syntax && *end == '.'))
12304 break;
12305
12306 if (end == base)
12307 return 0;
12308
12309 /* Handle a possible width suffix and/or Neon type suffix. */
12310 if (end[0] == '.')
12311 {
12312 int offset = 2;
12313
12314 if (end[1] == 'w')
12315 inst.size_req = 4;
12316 else if (end[1] == 'n')
12317 inst.size_req = 2;
12318 else
12319 offset = 0;
12320
12321 inst.vectype.elems = 0;
12322
12323 *str = end + offset;
12324
12325 if (end[offset] == '.')
12326 {
12327 /* See if we have a Neon type suffix. */
12328 if (parse_neon_type (&inst.vectype, str) == FAIL)
12329 return 0;
12330 }
12331 else if (end[offset] != '\0' && end[offset] != ' ')
12332 return 0;
12333 }
12334 else
12335 *str = end;
12336
12337 /* Look for unaffixed or special-case affixed mnemonic. */
12338 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12339 if (opcode)
12340 {
12341 /* step U */
12342 if (opcode->tag < OT_odd_infix_0)
12343 {
12344 inst.cond = COND_ALWAYS;
12345 return opcode;
12346 }
12347
12348 if (unified_syntax)
12349 as_warn (_("conditional infixes are deprecated in unified syntax"));
12350 affix = base + (opcode->tag - OT_odd_infix_0);
12351 cond = hash_find_n (arm_cond_hsh, affix, 2);
12352 assert (cond);
12353
12354 inst.cond = cond->value;
12355 return opcode;
12356 }
12357
12358 /* Cannot have a conditional suffix on a mnemonic of less than two
12359 characters. */
12360 if (end - base < 3)
12361 return 0;
12362
12363 /* Look for suffixed mnemonic. */
12364 affix = end - 2;
12365 cond = hash_find_n (arm_cond_hsh, affix, 2);
12366 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12367 if (opcode && cond)
12368 {
12369 /* step CE */
12370 switch (opcode->tag)
12371 {
12372 case OT_cinfix3_legacy:
12373 /* Ignore conditional suffixes matched on infix only mnemonics. */
12374 break;
12375
12376 case OT_cinfix3:
12377 case OT_cinfix3_deprecated:
12378 case OT_odd_infix_unc:
12379 if (!unified_syntax)
12380 return 0;
12381 /* else fall through */
12382
12383 case OT_csuffix:
12384 case OT_csuf_or_in3:
12385 inst.cond = cond->value;
12386 return opcode;
12387
12388 case OT_unconditional:
12389 case OT_unconditionalF:
12390 if (thumb_mode)
12391 {
12392 inst.cond = cond->value;
12393 }
12394 else
12395 {
12396 /* delayed diagnostic */
12397 inst.error = BAD_COND;
12398 inst.cond = COND_ALWAYS;
12399 }
12400 return opcode;
12401
12402 default:
12403 return 0;
12404 }
12405 }
12406
12407 /* Cannot have a usual-position infix on a mnemonic of less than
12408 six characters (five would be a suffix). */
12409 if (end - base < 6)
12410 return 0;
12411
12412 /* Look for infixed mnemonic in the usual position. */
12413 affix = base + 3;
12414 cond = hash_find_n (arm_cond_hsh, affix, 2);
12415 if (!cond)
12416 return 0;
12417
12418 memcpy (save, affix, 2);
12419 memmove (affix, affix + 2, (end - affix) - 2);
12420 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12421 memmove (affix + 2, affix, (end - affix) - 2);
12422 memcpy (affix, save, 2);
12423
12424 if (opcode
12425 && (opcode->tag == OT_cinfix3
12426 || opcode->tag == OT_cinfix3_deprecated
12427 || opcode->tag == OT_csuf_or_in3
12428 || opcode->tag == OT_cinfix3_legacy))
12429 {
12430 /* step CM */
12431 if (unified_syntax
12432 && (opcode->tag == OT_cinfix3
12433 || opcode->tag == OT_cinfix3_deprecated))
12434 as_warn (_("conditional infixes are deprecated in unified syntax"));
12435
12436 inst.cond = cond->value;
12437 return opcode;
12438 }
12439
12440 return 0;
12441 }
12442
12443 void
12444 md_assemble (char *str)
12445 {
12446 char *p = str;
12447 const struct asm_opcode * opcode;
12448
12449 /* Align the previous label if needed. */
12450 if (last_label_seen != NULL)
12451 {
12452 symbol_set_frag (last_label_seen, frag_now);
12453 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12454 S_SET_SEGMENT (last_label_seen, now_seg);
12455 }
12456
12457 memset (&inst, '\0', sizeof (inst));
12458 inst.reloc.type = BFD_RELOC_UNUSED;
12459
12460 opcode = opcode_lookup (&p);
12461 if (!opcode)
12462 {
12463 /* It wasn't an instruction, but it might be a register alias of
12464 the form alias .req reg, or a Neon .dn/.qn directive. */
12465 if (!create_register_alias (str, p)
12466 && !create_neon_reg_alias (str, p))
12467 as_bad (_("bad instruction `%s'"), str);
12468
12469 return;
12470 }
12471
12472 if (opcode->tag == OT_cinfix3_deprecated)
12473 as_warn (_("s suffix on comparison instruction is deprecated"));
12474
12475 if (thumb_mode)
12476 {
12477 arm_feature_set variant;
12478
12479 variant = cpu_variant;
12480 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12481 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12482 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12483 /* Check that this instruction is supported for this CPU. */
12484 if (!opcode->tvariant
12485 || (thumb_mode == 1
12486 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12487 {
12488 as_bad (_("selected processor does not support `%s'"), str);
12489 return;
12490 }
12491 if (inst.cond != COND_ALWAYS && !unified_syntax
12492 && opcode->tencode != do_t_branch)
12493 {
12494 as_bad (_("Thumb does not support conditional execution"));
12495 return;
12496 }
12497
12498 /* Check conditional suffixes. */
12499 if (current_it_mask)
12500 {
12501 int cond;
12502 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12503 current_it_mask <<= 1;
12504 current_it_mask &= 0x1f;
12505 /* The BKPT instruction is unconditional even in an IT block. */
12506 if (!inst.error
12507 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12508 {
12509 as_bad (_("incorrect condition in IT block"));
12510 return;
12511 }
12512 }
12513 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12514 {
12515 as_bad (_("thumb conditional instrunction not in IT block"));
12516 return;
12517 }
12518
12519 mapping_state (MAP_THUMB);
12520 inst.instruction = opcode->tvalue;
12521
12522 if (!parse_operands (p, opcode->operands))
12523 opcode->tencode ();
12524
12525 /* Clear current_it_mask at the end of an IT block. */
12526 if (current_it_mask == 0x10)
12527 current_it_mask = 0;
12528
12529 if (!(inst.error || inst.relax))
12530 {
12531 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12532 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12533 if (inst.size_req && inst.size_req != inst.size)
12534 {
12535 as_bad (_("cannot honor width suffix -- `%s'"), str);
12536 return;
12537 }
12538 }
12539 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12540 *opcode->tvariant);
12541 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12542 set those bits when Thumb-2 32-bit instructions are seen. ie.
12543 anything other than bl/blx.
12544 This is overly pessimistic for relaxable instructions. */
12545 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12546 || inst.relax)
12547 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12548 arm_ext_v6t2);
12549 }
12550 else
12551 {
12552 /* Check that this instruction is supported for this CPU. */
12553 if (!opcode->avariant ||
12554 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12555 {
12556 as_bad (_("selected processor does not support `%s'"), str);
12557 return;
12558 }
12559 if (inst.size_req)
12560 {
12561 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12562 return;
12563 }
12564
12565 mapping_state (MAP_ARM);
12566 inst.instruction = opcode->avalue;
12567 if (opcode->tag == OT_unconditionalF)
12568 inst.instruction |= 0xF << 28;
12569 else
12570 inst.instruction |= inst.cond << 28;
12571 inst.size = INSN_SIZE;
12572 if (!parse_operands (p, opcode->operands))
12573 opcode->aencode ();
12574 /* Arm mode bx is marked as both v4T and v5 because it's still required
12575 on a hypothetical non-thumb v5 core. */
12576 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12577 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12578 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12579 else
12580 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12581 *opcode->avariant);
12582 }
12583 output_inst (str);
12584 }
12585
12586 /* Various frobbings of labels and their addresses. */
12587
12588 void
12589 arm_start_line_hook (void)
12590 {
12591 last_label_seen = NULL;
12592 }
12593
12594 void
12595 arm_frob_label (symbolS * sym)
12596 {
12597 last_label_seen = sym;
12598
12599 ARM_SET_THUMB (sym, thumb_mode);
12600
12601 #if defined OBJ_COFF || defined OBJ_ELF
12602 ARM_SET_INTERWORK (sym, support_interwork);
12603 #endif
12604
12605 /* Note - do not allow local symbols (.Lxxx) to be labeled
12606 as Thumb functions. This is because these labels, whilst
12607 they exist inside Thumb code, are not the entry points for
12608 possible ARM->Thumb calls. Also, these labels can be used
12609 as part of a computed goto or switch statement. eg gcc
12610 can generate code that looks like this:
12611
12612 ldr r2, [pc, .Laaa]
12613 lsl r3, r3, #2
12614 ldr r2, [r3, r2]
12615 mov pc, r2
12616
12617 .Lbbb: .word .Lxxx
12618 .Lccc: .word .Lyyy
12619 ..etc...
12620 .Laaa: .word Lbbb
12621
12622 The first instruction loads the address of the jump table.
12623 The second instruction converts a table index into a byte offset.
12624 The third instruction gets the jump address out of the table.
12625 The fourth instruction performs the jump.
12626
12627 If the address stored at .Laaa is that of a symbol which has the
12628 Thumb_Func bit set, then the linker will arrange for this address
12629 to have the bottom bit set, which in turn would mean that the
12630 address computation performed by the third instruction would end
12631 up with the bottom bit set. Since the ARM is capable of unaligned
12632 word loads, the instruction would then load the incorrect address
12633 out of the jump table, and chaos would ensue. */
12634 if (label_is_thumb_function_name
12635 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12636 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12637 {
12638 /* When the address of a Thumb function is taken the bottom
12639 bit of that address should be set. This will allow
12640 interworking between Arm and Thumb functions to work
12641 correctly. */
12642
12643 THUMB_SET_FUNC (sym, 1);
12644
12645 label_is_thumb_function_name = FALSE;
12646 }
12647
12648 #ifdef OBJ_ELF
12649 dwarf2_emit_label (sym);
12650 #endif
12651 }
12652
12653 int
12654 arm_data_in_code (void)
12655 {
12656 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12657 {
12658 *input_line_pointer = '/';
12659 input_line_pointer += 5;
12660 *input_line_pointer = 0;
12661 return 1;
12662 }
12663
12664 return 0;
12665 }
12666
12667 char *
12668 arm_canonicalize_symbol_name (char * name)
12669 {
12670 int len;
12671
12672 if (thumb_mode && (len = strlen (name)) > 5
12673 && streq (name + len - 5, "/data"))
12674 *(name + len - 5) = 0;
12675
12676 return name;
12677 }
12678 \f
12679 /* Table of all register names defined by default. The user can
12680 define additional names with .req. Note that all register names
12681 should appear in both upper and lowercase variants. Some registers
12682 also have mixed-case names. */
12683
12684 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12685 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12686 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12687 #define REGSET(p,t) \
12688 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12689 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12690 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12691 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12692 #define REGSETH(p,t) \
12693 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12694 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12695 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12696 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12697 #define REGSET2(p,t) \
12698 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12699 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12700 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12701 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12702
12703 static const struct reg_entry reg_names[] =
12704 {
12705 /* ARM integer registers. */
12706 REGSET(r, RN), REGSET(R, RN),
12707
12708 /* ATPCS synonyms. */
12709 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12710 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12711 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12712
12713 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12714 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12715 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12716
12717 /* Well-known aliases. */
12718 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12719 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12720
12721 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12722 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12723
12724 /* Coprocessor numbers. */
12725 REGSET(p, CP), REGSET(P, CP),
12726
12727 /* Coprocessor register numbers. The "cr" variants are for backward
12728 compatibility. */
12729 REGSET(c, CN), REGSET(C, CN),
12730 REGSET(cr, CN), REGSET(CR, CN),
12731
12732 /* FPA registers. */
12733 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12734 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12735
12736 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12737 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12738
12739 /* VFP SP registers. */
12740 REGSET(s,VFS), REGSET(S,VFS),
12741 REGSETH(s,VFS), REGSETH(S,VFS),
12742
12743 /* VFP DP Registers. */
12744 REGSET(d,VFD), REGSET(D,VFD),
12745 /* Extra Neon DP registers. */
12746 REGSETH(d,VFD), REGSETH(D,VFD),
12747
12748 /* Neon QP registers. */
12749 REGSET2(q,NQ), REGSET2(Q,NQ),
12750
12751 /* VFP control registers. */
12752 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12753 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12754
12755 /* Maverick DSP coprocessor registers. */
12756 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12757 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12758
12759 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12760 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12761 REGDEF(dspsc,0,DSPSC),
12762
12763 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12764 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12765 REGDEF(DSPSC,0,DSPSC),
12766
12767 /* iWMMXt data registers - p0, c0-15. */
12768 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12769
12770 /* iWMMXt control registers - p1, c0-3. */
12771 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12772 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12773 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12774 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12775
12776 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12777 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12778 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12779 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12780 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12781
12782 /* XScale accumulator registers. */
12783 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12784 };
12785 #undef REGDEF
12786 #undef REGNUM
12787 #undef REGSET
12788
12789 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12790 within psr_required_here. */
12791 static const struct asm_psr psrs[] =
12792 {
12793 /* Backward compatibility notation. Note that "all" is no longer
12794 truly all possible PSR bits. */
12795 {"all", PSR_c | PSR_f},
12796 {"flg", PSR_f},
12797 {"ctl", PSR_c},
12798
12799 /* Individual flags. */
12800 {"f", PSR_f},
12801 {"c", PSR_c},
12802 {"x", PSR_x},
12803 {"s", PSR_s},
12804 /* Combinations of flags. */
12805 {"fs", PSR_f | PSR_s},
12806 {"fx", PSR_f | PSR_x},
12807 {"fc", PSR_f | PSR_c},
12808 {"sf", PSR_s | PSR_f},
12809 {"sx", PSR_s | PSR_x},
12810 {"sc", PSR_s | PSR_c},
12811 {"xf", PSR_x | PSR_f},
12812 {"xs", PSR_x | PSR_s},
12813 {"xc", PSR_x | PSR_c},
12814 {"cf", PSR_c | PSR_f},
12815 {"cs", PSR_c | PSR_s},
12816 {"cx", PSR_c | PSR_x},
12817 {"fsx", PSR_f | PSR_s | PSR_x},
12818 {"fsc", PSR_f | PSR_s | PSR_c},
12819 {"fxs", PSR_f | PSR_x | PSR_s},
12820 {"fxc", PSR_f | PSR_x | PSR_c},
12821 {"fcs", PSR_f | PSR_c | PSR_s},
12822 {"fcx", PSR_f | PSR_c | PSR_x},
12823 {"sfx", PSR_s | PSR_f | PSR_x},
12824 {"sfc", PSR_s | PSR_f | PSR_c},
12825 {"sxf", PSR_s | PSR_x | PSR_f},
12826 {"sxc", PSR_s | PSR_x | PSR_c},
12827 {"scf", PSR_s | PSR_c | PSR_f},
12828 {"scx", PSR_s | PSR_c | PSR_x},
12829 {"xfs", PSR_x | PSR_f | PSR_s},
12830 {"xfc", PSR_x | PSR_f | PSR_c},
12831 {"xsf", PSR_x | PSR_s | PSR_f},
12832 {"xsc", PSR_x | PSR_s | PSR_c},
12833 {"xcf", PSR_x | PSR_c | PSR_f},
12834 {"xcs", PSR_x | PSR_c | PSR_s},
12835 {"cfs", PSR_c | PSR_f | PSR_s},
12836 {"cfx", PSR_c | PSR_f | PSR_x},
12837 {"csf", PSR_c | PSR_s | PSR_f},
12838 {"csx", PSR_c | PSR_s | PSR_x},
12839 {"cxf", PSR_c | PSR_x | PSR_f},
12840 {"cxs", PSR_c | PSR_x | PSR_s},
12841 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12842 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12843 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12844 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12845 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12846 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12847 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12848 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12849 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12850 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12851 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12852 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12853 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12854 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12855 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12856 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12857 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12858 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12859 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12860 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12861 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12862 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12863 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12864 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12865 };
12866
12867 /* Table of V7M psr names. */
12868 static const struct asm_psr v7m_psrs[] =
12869 {
12870 {"apsr", 0 },
12871 {"iapsr", 1 },
12872 {"eapsr", 2 },
12873 {"psr", 3 },
12874 {"ipsr", 5 },
12875 {"epsr", 6 },
12876 {"iepsr", 7 },
12877 {"msp", 8 },
12878 {"psp", 9 },
12879 {"primask", 16},
12880 {"basepri", 17},
12881 {"basepri_max", 18},
12882 {"faultmask", 19},
12883 {"control", 20}
12884 };
12885
12886 /* Table of all shift-in-operand names. */
12887 static const struct asm_shift_name shift_names [] =
12888 {
12889 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12890 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12891 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12892 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12893 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12894 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12895 };
12896
12897 /* Table of all explicit relocation names. */
12898 #ifdef OBJ_ELF
12899 static struct reloc_entry reloc_names[] =
12900 {
12901 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12902 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12903 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12904 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12905 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12906 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12907 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12908 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12909 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12910 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12911 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12912 };
12913 #endif
12914
12915 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12916 static const struct asm_cond conds[] =
12917 {
12918 {"eq", 0x0},
12919 {"ne", 0x1},
12920 {"cs", 0x2}, {"hs", 0x2},
12921 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12922 {"mi", 0x4},
12923 {"pl", 0x5},
12924 {"vs", 0x6},
12925 {"vc", 0x7},
12926 {"hi", 0x8},
12927 {"ls", 0x9},
12928 {"ge", 0xa},
12929 {"lt", 0xb},
12930 {"gt", 0xc},
12931 {"le", 0xd},
12932 {"al", 0xe}
12933 };
12934
12935 static struct asm_barrier_opt barrier_opt_names[] =
12936 {
12937 { "sy", 0xf },
12938 { "un", 0x7 },
12939 { "st", 0xe },
12940 { "unst", 0x6 }
12941 };
12942
12943 /* Table of ARM-format instructions. */
12944
12945 /* Macros for gluing together operand strings. N.B. In all cases
12946 other than OPS0, the trailing OP_stop comes from default
12947 zero-initialization of the unspecified elements of the array. */
12948 #define OPS0() { OP_stop, }
12949 #define OPS1(a) { OP_##a, }
12950 #define OPS2(a,b) { OP_##a,OP_##b, }
12951 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12952 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12953 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12954 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12955
12956 /* These macros abstract out the exact format of the mnemonic table and
12957 save some repeated characters. */
12958
12959 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12960 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12961 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12962 THUMB_VARIANT, do_##ae, do_##te }
12963
12964 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12965 a T_MNEM_xyz enumerator. */
12966 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12967 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12968 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12969 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12970
12971 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12972 infix after the third character. */
12973 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12974 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12975 THUMB_VARIANT, do_##ae, do_##te }
12976 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12977 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12978 THUMB_VARIANT, do_##ae, do_##te }
12979 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12980 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12981 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12982 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12983 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12984 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12985 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12986 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12987
12988 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12989 appear in the condition table. */
12990 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12991 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12992 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12993
12994 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12995 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12996 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12997 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12998 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12999 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
13000 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
13001 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
13002 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
13003 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
13004 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
13005 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13006 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13007 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13008 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13009 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13010 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13011 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13012 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13013 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13014
13015 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13016 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13017 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13018 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13019
13020 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13021 field is still 0xE. Many of the Thumb variants can be executed
13022 conditionally, so this is checked separately. */
13023 #define TUE(mnem, op, top, nops, ops, ae, te) \
13024 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13025 THUMB_VARIANT, do_##ae, do_##te }
13026
13027 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13028 condition code field. */
13029 #define TUF(mnem, op, top, nops, ops, ae, te) \
13030 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13031 THUMB_VARIANT, do_##ae, do_##te }
13032
13033 /* ARM-only variants of all the above. */
13034 #define CE(mnem, op, nops, ops, ae) \
13035 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13036
13037 #define C3(mnem, op, nops, ops, ae) \
13038 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13039
13040 /* Legacy mnemonics that always have conditional infix after the third
13041 character. */
13042 #define CL(mnem, op, nops, ops, ae) \
13043 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13044 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13045
13046 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13047 #define cCE(mnem, op, nops, ops, ae) \
13048 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13049
13050 /* Legacy coprocessor instructions where conditional infix and conditional
13051 suffix are ambiguous. For consistency this includes all FPA instructions,
13052 not just the potentially ambiguous ones. */
13053 #define cCL(mnem, op, nops, ops, ae) \
13054 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13055 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13056
13057 /* Coprocessor, takes either a suffix or a position-3 infix
13058 (for an FPA corner case). */
13059 #define C3E(mnem, op, nops, ops, ae) \
13060 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
13061 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13062
13063 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
13064 { #m1 #m2 #m3, OPS##nops ops, \
13065 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13066 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13067
13068 #define CM(m1, m2, op, nops, ops, ae) \
13069 xCM_(m1, , m2, op, nops, ops, ae), \
13070 xCM_(m1, eq, m2, op, nops, ops, ae), \
13071 xCM_(m1, ne, m2, op, nops, ops, ae), \
13072 xCM_(m1, cs, m2, op, nops, ops, ae), \
13073 xCM_(m1, hs, m2, op, nops, ops, ae), \
13074 xCM_(m1, cc, m2, op, nops, ops, ae), \
13075 xCM_(m1, ul, m2, op, nops, ops, ae), \
13076 xCM_(m1, lo, m2, op, nops, ops, ae), \
13077 xCM_(m1, mi, m2, op, nops, ops, ae), \
13078 xCM_(m1, pl, m2, op, nops, ops, ae), \
13079 xCM_(m1, vs, m2, op, nops, ops, ae), \
13080 xCM_(m1, vc, m2, op, nops, ops, ae), \
13081 xCM_(m1, hi, m2, op, nops, ops, ae), \
13082 xCM_(m1, ls, m2, op, nops, ops, ae), \
13083 xCM_(m1, ge, m2, op, nops, ops, ae), \
13084 xCM_(m1, lt, m2, op, nops, ops, ae), \
13085 xCM_(m1, gt, m2, op, nops, ops, ae), \
13086 xCM_(m1, le, m2, op, nops, ops, ae), \
13087 xCM_(m1, al, m2, op, nops, ops, ae)
13088
13089 #define UE(mnem, op, nops, ops, ae) \
13090 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13091
13092 #define UF(mnem, op, nops, ops, ae) \
13093 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13094
13095 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
13096 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
13097 use the same encoding function for each. */
13098 #define NUF(mnem, op, nops, ops, enc) \
13099 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
13100 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13101
13102 /* Neon data processing, version which indirects through neon_enc_tab for
13103 the various overloaded versions of opcodes. */
13104 #define nUF(mnem, op, nops, ops, enc) \
13105 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13106 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13107
13108 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13109 version. */
13110 #define NCE(mnem, op, nops, ops, enc) \
13111 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13112 THUMB_VARIANT, do_##enc, do_##enc }
13113
13114 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13115 #define nCE(mnem, op, nops, ops, enc) \
13116 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13117 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13118
13119 #define do_0 0
13120
13121 /* Thumb-only, unconditional. */
13122 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13123
13124 static const struct asm_opcode insns[] =
13125 {
13126 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13127 #define THUMB_VARIANT &arm_ext_v4t
13128 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13129 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13130 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13131 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13132 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13133 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13134 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13135 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13136 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13137 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13138 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13139 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13140 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13141 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13142 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13143 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13144
13145 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13146 for setting PSR flag bits. They are obsolete in V6 and do not
13147 have Thumb equivalents. */
13148 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13149 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13150 CL(tstp, 110f000, 2, (RR, SH), cmp),
13151 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13152 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13153 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13154 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13155 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13156 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13157
13158 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13159 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13160 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13161 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13162
13163 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13164 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13165 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13166 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13167
13168 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13169 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13170 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13171 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13172 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13173 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13174
13175 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13176 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13177 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13178 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13179
13180 /* Pseudo ops. */
13181 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13182 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13183 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13184
13185 /* Thumb-compatibility pseudo ops. */
13186 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13187 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13188 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13189 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13190 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13191 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13192 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13193 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13194 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13195 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13196 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13197 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13198
13199 #undef THUMB_VARIANT
13200 #define THUMB_VARIANT &arm_ext_v6
13201 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13202
13203 /* V1 instructions with no Thumb analogue prior to V6T2. */
13204 #undef THUMB_VARIANT
13205 #define THUMB_VARIANT &arm_ext_v6t2
13206 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13207 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13208 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13209 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13210 CL(teqp, 130f000, 2, (RR, SH), cmp),
13211
13212 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13213 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13214 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13215 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13216
13217 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13218 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13219
13220 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13221 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13222
13223 /* V1 instructions with no Thumb analogue at all. */
13224 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13225 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13226
13227 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13228 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13229 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13230 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13231 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13232 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13233 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13234 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13235
13236 #undef ARM_VARIANT
13237 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13238 #undef THUMB_VARIANT
13239 #define THUMB_VARIANT &arm_ext_v4t
13240 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13241 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13242
13243 #undef THUMB_VARIANT
13244 #define THUMB_VARIANT &arm_ext_v6t2
13245 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13246 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13247
13248 /* Generic coprocessor instructions. */
13249 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13250 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13251 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13252 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13253 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13254 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13255 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13256
13257 #undef ARM_VARIANT
13258 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13259 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13260 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13261
13262 #undef ARM_VARIANT
13263 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13264 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13265 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13266
13267 #undef ARM_VARIANT
13268 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13269 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13270 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13271 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13272 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13273 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13274 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13275 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13276 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13277
13278 #undef ARM_VARIANT
13279 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13280 #undef THUMB_VARIANT
13281 #define THUMB_VARIANT &arm_ext_v4t
13282 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13283 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13284 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13285 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13286 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13287 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13288
13289 #undef ARM_VARIANT
13290 #define ARM_VARIANT &arm_ext_v4t_5
13291 /* ARM Architecture 4T. */
13292 /* Note: bx (and blx) are required on V5, even if the processor does
13293 not support Thumb. */
13294 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13295
13296 #undef ARM_VARIANT
13297 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13298 #undef THUMB_VARIANT
13299 #define THUMB_VARIANT &arm_ext_v5t
13300 /* Note: blx has 2 variants; the .value coded here is for
13301 BLX(2). Only this variant has conditional execution. */
13302 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13303 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13304
13305 #undef THUMB_VARIANT
13306 #define THUMB_VARIANT &arm_ext_v6t2
13307 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13308 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13309 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13310 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13311 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13312 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13313 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13314 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13315
13316 #undef ARM_VARIANT
13317 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13318 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13319 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13320 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13321 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13322
13323 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13324 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13325
13326 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13327 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13328 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13329 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13330
13331 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13332 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13333 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13334 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13335
13336 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13337 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13338
13339 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13340 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13341 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13342 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13343
13344 #undef ARM_VARIANT
13345 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13346 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13347 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13348 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13349
13350 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13351 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13352
13353 #undef ARM_VARIANT
13354 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13355 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13356
13357 #undef ARM_VARIANT
13358 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13359 #undef THUMB_VARIANT
13360 #define THUMB_VARIANT &arm_ext_v6
13361 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13362 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13363 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13364 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13365 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13366 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13367 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13368 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13369 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13370 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13371
13372 #undef THUMB_VARIANT
13373 #define THUMB_VARIANT &arm_ext_v6t2
13374 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13375 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13376 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13377
13378 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13379 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13380
13381 /* ARM V6 not included in V7M (eg. integer SIMD). */
13382 #undef THUMB_VARIANT
13383 #define THUMB_VARIANT &arm_ext_v6_notm
13384 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13385 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13386 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13387 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13388 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13389 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13390 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13391 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13392 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13393 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13394 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13395 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13396 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13397 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13398 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13399 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13400 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13401 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13402 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13403 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13404 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13405 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13406 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13407 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13408 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13409 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13410 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13411 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13412 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13413 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13414 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13415 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13416 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13417 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13418 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13419 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13420 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13421 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13422 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13423 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13424 UF(rfeib, 9900a00, 1, (RRw), rfe),
13425 UF(rfeda, 8100a00, 1, (RRw), rfe),
13426 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13427 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13428 UF(rfefa, 9900a00, 1, (RRw), rfe),
13429 UF(rfeea, 8100a00, 1, (RRw), rfe),
13430 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13431 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13432 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13433 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13434 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13435 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13436 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13437 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13438 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13439 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13440 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13441 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13442 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13443 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13444 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13445 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13446 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13447 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13448 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13449 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13450 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13451 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13452 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13453 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13454 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13455 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13456 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13457 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13458 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13459 UF(srsib, 9cd0500, 1, (I31w), srs),
13460 UF(srsda, 84d0500, 1, (I31w), srs),
13461 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13462 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13463 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13464 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13465 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13466 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13467 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13468
13469 #undef ARM_VARIANT
13470 #define ARM_VARIANT &arm_ext_v6k
13471 #undef THUMB_VARIANT
13472 #define THUMB_VARIANT &arm_ext_v6k
13473 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13474 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13475 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13476 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13477
13478 #undef THUMB_VARIANT
13479 #define THUMB_VARIANT &arm_ext_v6_notm
13480 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13481 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13482
13483 #undef THUMB_VARIANT
13484 #define THUMB_VARIANT &arm_ext_v6t2
13485 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13486 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13487 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13488 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13489 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13490
13491 #undef ARM_VARIANT
13492 #define ARM_VARIANT &arm_ext_v6z
13493 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13494
13495 #undef ARM_VARIANT
13496 #define ARM_VARIANT &arm_ext_v6t2
13497 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13498 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13499 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13500 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13501
13502 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13503 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
13504 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
13505 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13506
13507 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13508 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13509 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13510 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13511
13512 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13513 UT(cbz, b100, 2, (RR, EXP), t_czb),
13514 /* ARM does not really have an IT instruction. */
13515 TUE(it, 0, bf08, 1, (COND), it, t_it),
13516 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13517 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13518 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13519 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13520 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13521 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13522 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13523 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13524 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13525 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13526 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13527 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13528 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13529 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13530
13531 /* Thumb2 only instructions. */
13532 #undef ARM_VARIANT
13533 #define ARM_VARIANT NULL
13534
13535 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13536 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13537 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13538 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13539
13540 /* Thumb-2 hardware division instructions (R and M profiles only). */
13541 #undef THUMB_VARIANT
13542 #define THUMB_VARIANT &arm_ext_div
13543 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13544 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13545
13546 /* ARM V7 instructions. */
13547 #undef ARM_VARIANT
13548 #define ARM_VARIANT &arm_ext_v7
13549 #undef THUMB_VARIANT
13550 #define THUMB_VARIANT &arm_ext_v7
13551 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13552 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13553 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13554 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13555 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13556
13557 #undef ARM_VARIANT
13558 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13559 cCE(wfs, e200110, 1, (RR), rd),
13560 cCE(rfs, e300110, 1, (RR), rd),
13561 cCE(wfc, e400110, 1, (RR), rd),
13562 cCE(rfc, e500110, 1, (RR), rd),
13563
13564 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13565 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13566 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13567 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13568
13569 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13570 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13571 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13572 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13573
13574 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13575 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13576 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13577 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13578 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13579 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13580 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13581 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13582 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13583 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13584 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13585 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13586
13587 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13588 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13589 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13590 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13591 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13592 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13593 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13594 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13595 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13596 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13597 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13598 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13599
13600 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13601 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13602 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13603 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13604 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13605 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13606 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13607 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13608 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13609 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13610 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13611 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13612
13613 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13614 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13615 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13616 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13617 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13618 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13619 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13620 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13621 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13622 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13623 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13624 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13625
13626 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13627 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13628 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13629 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13630 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13631 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13632 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13633 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13634 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13635 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13636 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13637 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13638
13639 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13640 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13641 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13642 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13643 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13644 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13645 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13646 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13647 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13648 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13649 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13650 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13651
13652 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13653 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13654 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13655 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13656 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13657 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13658 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13659 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13660 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13661 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13662 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13663 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13664
13665 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13666 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13667 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13668 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13669 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13670 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13671 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13672 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13673 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13674 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13675 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13676 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13677
13678 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13679 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13680 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13681 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13682 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13683 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13684 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13685 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13686 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13687 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13688 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13689 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13690
13691 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13692 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13693 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13694 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13695 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13696 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13697 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13698 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13699 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13700 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13701 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13702 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13703
13704 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13705 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13706 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13707 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13708 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13709 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13710 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13711 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13712 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13713 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13714 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13715 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13716
13717 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13718 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13719 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13720 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13721 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13722 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13723 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13724 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13725 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13726 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13727 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13728 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13729
13730 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13731 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13732 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13733 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13734 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13735 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13736 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13737 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13738 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13739 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13740 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13741 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13742
13743 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13744 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13745 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13746 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13747 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13748 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13749 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13750 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13751 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13752 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13753 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13754 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13755
13756 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13757 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13758 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13759 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13760 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13761 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13762 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13763 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13764 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13765 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13766 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13767 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13768
13769 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13770 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13771 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13772 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13773 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13774 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13775 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13776 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13777 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13778 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13779 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13780 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13781
13782 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13784 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13788 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13789 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13794
13795 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13797 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13801 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13802 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13807
13808 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13810 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13814 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13815 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13820
13821 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13823 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13827 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13828 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13829 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13830 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13831 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13832 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13833
13834 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13835 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13836 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13837 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13838 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13839 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13840 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13841 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13842 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13843 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13844 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13845 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13846
13847 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13848 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13849 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13850 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13851 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13852 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13853 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13854 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13855 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13856 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13857 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13858 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13859
13860 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13861 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13862 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13863 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13864 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13865 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13866 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13867 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13868 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13869 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13870 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13871 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13872
13873 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13874 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13875 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13876 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13877 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13878 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13879 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13880 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13881 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13882 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13883 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13884 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13885
13886 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13887 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13888 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13889 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13890 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13891 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13892 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13893 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13894 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13895 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13896 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13897 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13898
13899 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13900 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13901 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13902 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13903 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13904 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13905 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13906 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13907 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13908 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13909 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13910 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13911
13912 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13913 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13914 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13915 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13916 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13917 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13918 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13919 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13920 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13921 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13922 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13923 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13924
13925 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13926 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13927 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13928 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13929 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13930 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13931 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13932 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13933 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13934 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13935 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13936 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13937
13938 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13939 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13940 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13941 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13942 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13943 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13944 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13945 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13946 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13947 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13948 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13949 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13950
13951 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13952 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13953 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13954 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13955
13956 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13957 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13958 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13959 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13960 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13961 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13962 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13963 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13964 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13965 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13966 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13967 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13968
13969 /* The implementation of the FIX instruction is broken on some
13970 assemblers, in that it accepts a precision specifier as well as a
13971 rounding specifier, despite the fact that this is meaningless.
13972 To be more compatible, we accept it as well, though of course it
13973 does not set any bits. */
13974 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13975 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13976 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13977 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13978 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13979 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13980 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13981 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13982 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13983 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13984 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13985 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13986 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13987
13988 /* Instructions that were new with the real FPA, call them V2. */
13989 #undef ARM_VARIANT
13990 #define ARM_VARIANT &fpu_fpa_ext_v2
13991 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13992 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13993 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13994 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13995 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13996 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13997
13998 #undef ARM_VARIANT
13999 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
14000 /* Moves and type conversions. */
14001 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
14002 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
14003 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
14004 cCE(fmstat, ef1fa10, 0, (), noargs),
14005 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
14006 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
14007 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
14008 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14009 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
14010 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14011 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
14012 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
14013
14014 /* Memory operations. */
14015 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
14016 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
14017 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14018 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14019 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14020 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14021 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14022 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14023 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14024 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14025 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14026 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14027 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14028 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14029 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14030 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14031 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14032 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14033
14034 /* Monadic operations. */
14035 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
14036 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
14037 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
14038
14039 /* Dyadic operations. */
14040 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14041 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14042 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14043 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14044 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14045 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14046 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14047 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14048 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14049
14050 /* Comparisons. */
14051 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
14052 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
14053 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
14054 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
14055
14056 #undef ARM_VARIANT
14057 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
14058 /* Moves and type conversions. */
14059 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14060 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14061 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14062 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
14063 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
14064 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
14065 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
14066 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14067 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
14068 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14069 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14070 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14071 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14072
14073 /* Memory operations. */
14074 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
14075 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
14076 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14077 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14078 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14079 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14080 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14081 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14082 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14083 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14084
14085 /* Monadic operations. */
14086 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14087 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14088 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14089
14090 /* Dyadic operations. */
14091 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14092 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14093 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14094 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14095 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14096 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14097 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14098 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14099 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14100
14101 /* Comparisons. */
14102 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14103 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
14104 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14105 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
14106
14107 #undef ARM_VARIANT
14108 #define ARM_VARIANT &fpu_vfp_ext_v2
14109 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
14110 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
14111 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
14112 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
14113
14114 #undef THUMB_VARIANT
14115 #define THUMB_VARIANT &fpu_neon_ext_v1
14116 #undef ARM_VARIANT
14117 #define ARM_VARIANT &fpu_neon_ext_v1
14118 /* Data processing with three registers of the same length. */
14119 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14120 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
14121 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14122 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14123 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14124 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14125 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14126 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14127 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14128 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14129 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14130 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14131 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14132 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14133 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14134 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14135 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14136 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14137 /* If not immediate, fall back to neon_dyadic_i64_su.
14138 shl_imm should accept I8 I16 I32 I64,
14139 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14140 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14141 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14142 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14143 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14144 /* Logic ops, types optional & ignored. */
14145 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14146 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14147 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14148 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14149 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14150 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14151 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14152 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14153 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14154 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14155 /* Bitfield ops, untyped. */
14156 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14157 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14158 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14159 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14160 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14161 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14162 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14163 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14164 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14165 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14166 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14167 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14168 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14169 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14170 back to neon_dyadic_if_su. */
14171 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14172 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14173 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14174 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14175 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14176 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14177 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14178 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14179 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14180 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14181 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14182 /* As above, D registers only. */
14183 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14184 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14185 /* Int and float variants, signedness unimportant. */
14186 /* If not scalar, fall back to neon_dyadic_if_i. */
14187 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14188 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14189 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14190 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14191 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14192 /* Add/sub take types I8 I16 I32 I64 F32. */
14193 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14194 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14195 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14196 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14197 /* vtst takes sizes 8, 16, 32. */
14198 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14199 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14200 /* VMUL takes I8 I16 I32 F32 P8. */
14201 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14202 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14203 /* VQD{R}MULH takes S16 S32. */
14204 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14205 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14206 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14207 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14208 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14209 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14210 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14211 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14212 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14213 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14214 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14215 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14216 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14217 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14218 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14219 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14220
14221 /* Two address, int/float. Types S8 S16 S32 F32. */
14222 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14223 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14224 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14225 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14226
14227 /* Data processing with two registers and a shift amount. */
14228 /* Right shifts, and variants with rounding.
14229 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14230 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14231 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14232 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14233 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14234 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14235 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14236 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14237 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14238 /* Shift and insert. Sizes accepted 8 16 32 64. */
14239 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14240 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14241 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14242 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14243 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14244 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14245 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14246 /* Right shift immediate, saturating & narrowing, with rounding variants.
14247 Types accepted S16 S32 S64 U16 U32 U64. */
14248 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14249 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14250 /* As above, unsigned. Types accepted S16 S32 S64. */
14251 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14252 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14253 /* Right shift narrowing. Types accepted I16 I32 I64. */
14254 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14255 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14256 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14257 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14258 /* CVT with optional immediate for fixed-point variant. */
14259 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14260 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14261
14262 /* One register and an immediate value. All encoding special-cased! */
14263 #undef THUMB_VARIANT
14264 #define THUMB_VARIANT &fpu_vfp_ext_v1
14265 #undef ARM_VARIANT
14266 #define ARM_VARIANT &fpu_vfp_ext_v1
14267 NCE(vmov, 0, 1, (VMOV), neon_mov),
14268
14269 #undef THUMB_VARIANT
14270 #define THUMB_VARIANT &fpu_neon_ext_v1
14271 #undef ARM_VARIANT
14272 #define ARM_VARIANT &fpu_neon_ext_v1
14273 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14274 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14275 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14276
14277 /* Data processing, three registers of different lengths. */
14278 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14279 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14280 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14281 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14282 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14283 /* If not scalar, fall back to neon_dyadic_long.
14284 Vector types as above, scalar types S16 S32 U16 U32. */
14285 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14286 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14287 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14288 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14289 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14290 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14291 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14292 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14293 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14294 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14295 /* Saturating doubling multiplies. Types S16 S32. */
14296 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14297 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14298 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14299 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14300 S16 S32 U16 U32. */
14301 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14302
14303 /* Extract. Size 8. */
14304 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14305 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14306
14307 /* Two registers, miscellaneous. */
14308 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14309 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14310 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14311 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14312 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14313 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14314 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14315 /* Vector replicate. Sizes 8 16 32. */
14316 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14317 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14318 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14319 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14320 /* VMOVN. Types I16 I32 I64. */
14321 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14322 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14323 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14324 /* VQMOVUN. Types S16 S32 S64. */
14325 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14326 /* VZIP / VUZP. Sizes 8 16 32. */
14327 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14328 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14329 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14330 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14331 /* VQABS / VQNEG. Types S8 S16 S32. */
14332 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14333 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14334 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14335 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14336 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14337 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14338 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14339 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14340 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14341 /* Reciprocal estimates. Types U32 F32. */
14342 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14343 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14344 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14345 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14346 /* VCLS. Types S8 S16 S32. */
14347 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14348 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14349 /* VCLZ. Types I8 I16 I32. */
14350 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14351 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14352 /* VCNT. Size 8. */
14353 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14354 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14355 /* Two address, untyped. */
14356 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14357 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14358 /* VTRN. Sizes 8 16 32. */
14359 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14360 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14361
14362 /* Table lookup. Size 8. */
14363 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14364 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14365
14366 #undef THUMB_VARIANT
14367 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
14368 #undef ARM_VARIANT
14369 #define ARM_VARIANT &fpu_vfp_ext_v1xd
14370
14371 /* Load/store instructions. Available in Neon or VFPv3. */
14372 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14373 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14374 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14375 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14376 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14377 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14378 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14379 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14380
14381 #undef THUMB_VARIANT
14382 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14383 #undef ARM_VARIANT
14384 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14385
14386 /* Neon element/structure load/store. */
14387 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14388 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14389 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14390 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14391 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14392 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14393 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14394 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14395
14396 #undef THUMB_VARIANT
14397 #define THUMB_VARIANT &fpu_vfp_ext_v3
14398 #undef ARM_VARIANT
14399 #define ARM_VARIANT &fpu_vfp_ext_v3
14400
14401 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14402 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14403 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14404 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14405 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14406 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14407 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14408 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14409 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14410 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14411 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14412 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14413 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14414 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14415 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14416 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14417 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14418 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14419
14420 #undef THUMB_VARIANT
14421 #undef ARM_VARIANT
14422 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14423 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14424 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14425 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14426 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14427 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14428 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14429 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14430 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14431
14432 #undef ARM_VARIANT
14433 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14434 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14435 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14436 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14437 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14438 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14439 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14440 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14441 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14442 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14443 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14444 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14445 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14446 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14447 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14448 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14449 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14450 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14451 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14452 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14453 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14454 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14455 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14456 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14457 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14458 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14459 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14460 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14461 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14462 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14463 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14464 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14465 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14466 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14467 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14468 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14469 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14470 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14471 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14472 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14473 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14474 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14475 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14476 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14477 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14478 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14479 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14480 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14481 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14482 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14483 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14484 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14485 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14486 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14487 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14488 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14489 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14490 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14491 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14492 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14493 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14494 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14495 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14496 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14497 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14498 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14499 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14500 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14501 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14502 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14503 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14504 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14505 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14506 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14507 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14508 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14509 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14510 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14511 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14512 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14513 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14514 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14515 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14516 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14517 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14518 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14519 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14520 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14521 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14522 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14523 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14524 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14525 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14526 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14527 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14528 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14529 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14530 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14531 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14532 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14533 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14534 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14535 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14536 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14537 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14538 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14539 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14540 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14541 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14542 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14543 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14544 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14545 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14546 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14547 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14548 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14549 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14550 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14551 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14552 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14553 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14554 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14555 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14556 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14557 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14558 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14559 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14560 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14561 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14562 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14563 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14564 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14565 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14566 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14567 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14568 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14569 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14570 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14571 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14572 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14573 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14574 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14575 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14576 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14577 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14578 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14579 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14580 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14581 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14582 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14583 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14584 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14585 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14586 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14587 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14588 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14589 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14590 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14591 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14592 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14593 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14594 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14595 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14596
14597 #undef ARM_VARIANT
14598 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14599 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14600 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14601 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14602 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14603 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14604 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14605 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14606 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14607 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14608 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14609 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14610 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14611 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14612 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14613 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14614 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14615 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14616 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14617 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14618 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14619 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14620 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14621 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14622 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14623 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14624 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14625 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14626 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14627 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14628 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14629 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14630 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14631 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14632 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14633 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14634 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14635 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14636 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14637 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14638 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14639 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14640 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14641 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14642 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14643 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14644 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14645 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14646 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14647 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14648 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14649 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14650 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14651 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14652 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14653 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14654 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14655 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14656 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14657 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14658 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14659 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14660 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14661 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14662 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14663 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14664 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14665 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14666 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14667 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14668 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14669 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14670 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14671 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14672 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14673 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14674 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14675 };
14676 #undef ARM_VARIANT
14677 #undef THUMB_VARIANT
14678 #undef TCE
14679 #undef TCM
14680 #undef TUE
14681 #undef TUF
14682 #undef TCC
14683 #undef cCE
14684 #undef cCL
14685 #undef C3E
14686 #undef CE
14687 #undef CM
14688 #undef UE
14689 #undef UF
14690 #undef UT
14691 #undef NUF
14692 #undef nUF
14693 #undef NCE
14694 #undef nCE
14695 #undef OPS0
14696 #undef OPS1
14697 #undef OPS2
14698 #undef OPS3
14699 #undef OPS4
14700 #undef OPS5
14701 #undef OPS6
14702 #undef do_0
14703 \f
14704 /* MD interface: bits in the object file. */
14705
14706 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14707 for use in the a.out file, and stores them in the array pointed to by buf.
14708 This knows about the endian-ness of the target machine and does
14709 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14710 2 (short) and 4 (long) Floating numbers are put out as a series of
14711 LITTLENUMS (shorts, here at least). */
14712
14713 void
14714 md_number_to_chars (char * buf, valueT val, int n)
14715 {
14716 if (target_big_endian)
14717 number_to_chars_bigendian (buf, val, n);
14718 else
14719 number_to_chars_littleendian (buf, val, n);
14720 }
14721
14722 static valueT
14723 md_chars_to_number (char * buf, int n)
14724 {
14725 valueT result = 0;
14726 unsigned char * where = (unsigned char *) buf;
14727
14728 if (target_big_endian)
14729 {
14730 while (n--)
14731 {
14732 result <<= 8;
14733 result |= (*where++ & 255);
14734 }
14735 }
14736 else
14737 {
14738 while (n--)
14739 {
14740 result <<= 8;
14741 result |= (where[n] & 255);
14742 }
14743 }
14744
14745 return result;
14746 }
14747
14748 /* MD interface: Sections. */
14749
14750 /* Estimate the size of a frag before relaxing. Assume everything fits in
14751 2 bytes. */
14752
14753 int
14754 md_estimate_size_before_relax (fragS * fragp,
14755 segT segtype ATTRIBUTE_UNUSED)
14756 {
14757 fragp->fr_var = 2;
14758 return 2;
14759 }
14760
14761 /* Convert a machine dependent frag. */
14762
14763 void
14764 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14765 {
14766 unsigned long insn;
14767 unsigned long old_op;
14768 char *buf;
14769 expressionS exp;
14770 fixS *fixp;
14771 int reloc_type;
14772 int pc_rel;
14773 int opcode;
14774
14775 buf = fragp->fr_literal + fragp->fr_fix;
14776
14777 old_op = bfd_get_16(abfd, buf);
14778 if (fragp->fr_symbol) {
14779 exp.X_op = O_symbol;
14780 exp.X_add_symbol = fragp->fr_symbol;
14781 } else {
14782 exp.X_op = O_constant;
14783 }
14784 exp.X_add_number = fragp->fr_offset;
14785 opcode = fragp->fr_subtype;
14786 switch (opcode)
14787 {
14788 case T_MNEM_ldr_pc:
14789 case T_MNEM_ldr_pc2:
14790 case T_MNEM_ldr_sp:
14791 case T_MNEM_str_sp:
14792 case T_MNEM_ldr:
14793 case T_MNEM_ldrb:
14794 case T_MNEM_ldrh:
14795 case T_MNEM_str:
14796 case T_MNEM_strb:
14797 case T_MNEM_strh:
14798 if (fragp->fr_var == 4)
14799 {
14800 insn = THUMB_OP32(opcode);
14801 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14802 {
14803 insn |= (old_op & 0x700) << 4;
14804 }
14805 else
14806 {
14807 insn |= (old_op & 7) << 12;
14808 insn |= (old_op & 0x38) << 13;
14809 }
14810 insn |= 0x00000c00;
14811 put_thumb32_insn (buf, insn);
14812 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14813 }
14814 else
14815 {
14816 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14817 }
14818 pc_rel = (opcode == T_MNEM_ldr_pc2);
14819 break;
14820 case T_MNEM_adr:
14821 if (fragp->fr_var == 4)
14822 {
14823 insn = THUMB_OP32 (opcode);
14824 insn |= (old_op & 0xf0) << 4;
14825 put_thumb32_insn (buf, insn);
14826 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14827 }
14828 else
14829 {
14830 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14831 exp.X_add_number -= 4;
14832 }
14833 pc_rel = 1;
14834 break;
14835 case T_MNEM_mov:
14836 case T_MNEM_movs:
14837 case T_MNEM_cmp:
14838 case T_MNEM_cmn:
14839 if (fragp->fr_var == 4)
14840 {
14841 int r0off = (opcode == T_MNEM_mov
14842 || opcode == T_MNEM_movs) ? 0 : 8;
14843 insn = THUMB_OP32 (opcode);
14844 insn = (insn & 0xe1ffffff) | 0x10000000;
14845 insn |= (old_op & 0x700) << r0off;
14846 put_thumb32_insn (buf, insn);
14847 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14848 }
14849 else
14850 {
14851 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14852 }
14853 pc_rel = 0;
14854 break;
14855 case T_MNEM_b:
14856 if (fragp->fr_var == 4)
14857 {
14858 insn = THUMB_OP32(opcode);
14859 put_thumb32_insn (buf, insn);
14860 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14861 }
14862 else
14863 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14864 pc_rel = 1;
14865 break;
14866 case T_MNEM_bcond:
14867 if (fragp->fr_var == 4)
14868 {
14869 insn = THUMB_OP32(opcode);
14870 insn |= (old_op & 0xf00) << 14;
14871 put_thumb32_insn (buf, insn);
14872 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14873 }
14874 else
14875 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14876 pc_rel = 1;
14877 break;
14878 case T_MNEM_add_sp:
14879 case T_MNEM_add_pc:
14880 case T_MNEM_inc_sp:
14881 case T_MNEM_dec_sp:
14882 if (fragp->fr_var == 4)
14883 {
14884 /* ??? Choose between add and addw. */
14885 insn = THUMB_OP32 (opcode);
14886 insn |= (old_op & 0xf0) << 4;
14887 put_thumb32_insn (buf, insn);
14888 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14889 }
14890 else
14891 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14892 pc_rel = 0;
14893 break;
14894
14895 case T_MNEM_addi:
14896 case T_MNEM_addis:
14897 case T_MNEM_subi:
14898 case T_MNEM_subis:
14899 if (fragp->fr_var == 4)
14900 {
14901 insn = THUMB_OP32 (opcode);
14902 insn |= (old_op & 0xf0) << 4;
14903 insn |= (old_op & 0xf) << 16;
14904 put_thumb32_insn (buf, insn);
14905 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14906 }
14907 else
14908 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14909 pc_rel = 0;
14910 break;
14911 default:
14912 abort();
14913 }
14914 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14915 reloc_type);
14916 fixp->fx_file = fragp->fr_file;
14917 fixp->fx_line = fragp->fr_line;
14918 fragp->fr_fix += fragp->fr_var;
14919 }
14920
14921 /* Return the size of a relaxable immediate operand instruction.
14922 SHIFT and SIZE specify the form of the allowable immediate. */
14923 static int
14924 relax_immediate (fragS *fragp, int size, int shift)
14925 {
14926 offsetT offset;
14927 offsetT mask;
14928 offsetT low;
14929
14930 /* ??? Should be able to do better than this. */
14931 if (fragp->fr_symbol)
14932 return 4;
14933
14934 low = (1 << shift) - 1;
14935 mask = (1 << (shift + size)) - (1 << shift);
14936 offset = fragp->fr_offset;
14937 /* Force misaligned offsets to 32-bit variant. */
14938 if (offset & low)
14939 return -4;
14940 if (offset & ~mask)
14941 return 4;
14942 return 2;
14943 }
14944
14945 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14946 load. */
14947 static int
14948 relax_adr (fragS *fragp, asection *sec)
14949 {
14950 addressT addr;
14951 offsetT val;
14952
14953 /* Assume worst case for symbols not known to be in the same section. */
14954 if (!S_IS_DEFINED(fragp->fr_symbol)
14955 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14956 return 4;
14957
14958 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14959 addr = fragp->fr_address + fragp->fr_fix;
14960 addr = (addr + 4) & ~3;
14961 /* Fix the insn as the 4-byte version if the target address is not
14962 sufficiently aligned. This is prevents an infinite loop when two
14963 instructions have contradictory range/alignment requirements. */
14964 if (val & 3)
14965 return -4;
14966 val -= addr;
14967 if (val < 0 || val > 1020)
14968 return 4;
14969 return 2;
14970 }
14971
14972 /* Return the size of a relaxable add/sub immediate instruction. */
14973 static int
14974 relax_addsub (fragS *fragp, asection *sec)
14975 {
14976 char *buf;
14977 int op;
14978
14979 buf = fragp->fr_literal + fragp->fr_fix;
14980 op = bfd_get_16(sec->owner, buf);
14981 if ((op & 0xf) == ((op >> 4) & 0xf))
14982 return relax_immediate (fragp, 8, 0);
14983 else
14984 return relax_immediate (fragp, 3, 0);
14985 }
14986
14987
14988 /* Return the size of a relaxable branch instruction. BITS is the
14989 size of the offset field in the narrow instruction. */
14990
14991 static int
14992 relax_branch (fragS *fragp, asection *sec, int bits)
14993 {
14994 addressT addr;
14995 offsetT val;
14996 offsetT limit;
14997
14998 /* Assume worst case for symbols not known to be in the same section. */
14999 if (!S_IS_DEFINED(fragp->fr_symbol)
15000 || sec != S_GET_SEGMENT (fragp->fr_symbol))
15001 return 4;
15002
15003 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
15004 addr = fragp->fr_address + fragp->fr_fix + 4;
15005 val -= addr;
15006
15007 /* Offset is a signed value *2 */
15008 limit = 1 << bits;
15009 if (val >= limit || val < -limit)
15010 return 4;
15011 return 2;
15012 }
15013
15014
15015 /* Relax a machine dependent frag. This returns the amount by which
15016 the current size of the frag should change. */
15017
15018 int
15019 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
15020 {
15021 int oldsize;
15022 int newsize;
15023
15024 oldsize = fragp->fr_var;
15025 switch (fragp->fr_subtype)
15026 {
15027 case T_MNEM_ldr_pc2:
15028 newsize = relax_adr(fragp, sec);
15029 break;
15030 case T_MNEM_ldr_pc:
15031 case T_MNEM_ldr_sp:
15032 case T_MNEM_str_sp:
15033 newsize = relax_immediate(fragp, 8, 2);
15034 break;
15035 case T_MNEM_ldr:
15036 case T_MNEM_str:
15037 newsize = relax_immediate(fragp, 5, 2);
15038 break;
15039 case T_MNEM_ldrh:
15040 case T_MNEM_strh:
15041 newsize = relax_immediate(fragp, 5, 1);
15042 break;
15043 case T_MNEM_ldrb:
15044 case T_MNEM_strb:
15045 newsize = relax_immediate(fragp, 5, 0);
15046 break;
15047 case T_MNEM_adr:
15048 newsize = relax_adr(fragp, sec);
15049 break;
15050 case T_MNEM_mov:
15051 case T_MNEM_movs:
15052 case T_MNEM_cmp:
15053 case T_MNEM_cmn:
15054 newsize = relax_immediate(fragp, 8, 0);
15055 break;
15056 case T_MNEM_b:
15057 newsize = relax_branch(fragp, sec, 11);
15058 break;
15059 case T_MNEM_bcond:
15060 newsize = relax_branch(fragp, sec, 8);
15061 break;
15062 case T_MNEM_add_sp:
15063 case T_MNEM_add_pc:
15064 newsize = relax_immediate (fragp, 8, 2);
15065 break;
15066 case T_MNEM_inc_sp:
15067 case T_MNEM_dec_sp:
15068 newsize = relax_immediate (fragp, 7, 2);
15069 break;
15070 case T_MNEM_addi:
15071 case T_MNEM_addis:
15072 case T_MNEM_subi:
15073 case T_MNEM_subis:
15074 newsize = relax_addsub (fragp, sec);
15075 break;
15076 default:
15077 abort();
15078 }
15079 if (newsize < 0)
15080 {
15081 fragp->fr_var = -newsize;
15082 md_convert_frag (sec->owner, sec, fragp);
15083 frag_wane(fragp);
15084 return -(newsize + oldsize);
15085 }
15086 fragp->fr_var = newsize;
15087 return newsize - oldsize;
15088 }
15089
15090 /* Round up a section size to the appropriate boundary. */
15091
15092 valueT
15093 md_section_align (segT segment ATTRIBUTE_UNUSED,
15094 valueT size)
15095 {
15096 #ifdef OBJ_ELF
15097 return size;
15098 #else
15099 /* Round all sects to multiple of 4. */
15100 return (size + 3) & ~3;
15101 #endif
15102 }
15103
15104 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
15105 of an rs_align_code fragment. */
15106
15107 void
15108 arm_handle_align (fragS * fragP)
15109 {
15110 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
15111 static char const thumb_noop[2] = { 0xc0, 0x46 };
15112 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
15113 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
15114
15115 int bytes, fix, noop_size;
15116 char * p;
15117 const char * noop;
15118
15119 if (fragP->fr_type != rs_align_code)
15120 return;
15121
15122 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
15123 p = fragP->fr_literal + fragP->fr_fix;
15124 fix = 0;
15125
15126 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
15127 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
15128
15129 if (fragP->tc_frag_data)
15130 {
15131 if (target_big_endian)
15132 noop = thumb_bigend_noop;
15133 else
15134 noop = thumb_noop;
15135 noop_size = sizeof (thumb_noop);
15136 }
15137 else
15138 {
15139 if (target_big_endian)
15140 noop = arm_bigend_noop;
15141 else
15142 noop = arm_noop;
15143 noop_size = sizeof (arm_noop);
15144 }
15145
15146 if (bytes & (noop_size - 1))
15147 {
15148 fix = bytes & (noop_size - 1);
15149 memset (p, 0, fix);
15150 p += fix;
15151 bytes -= fix;
15152 }
15153
15154 while (bytes >= noop_size)
15155 {
15156 memcpy (p, noop, noop_size);
15157 p += noop_size;
15158 bytes -= noop_size;
15159 fix += noop_size;
15160 }
15161
15162 fragP->fr_fix += fix;
15163 fragP->fr_var = noop_size;
15164 }
15165
15166 /* Called from md_do_align. Used to create an alignment
15167 frag in a code section. */
15168
15169 void
15170 arm_frag_align_code (int n, int max)
15171 {
15172 char * p;
15173
15174 /* We assume that there will never be a requirement
15175 to support alignments greater than 32 bytes. */
15176 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15177 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15178
15179 p = frag_var (rs_align_code,
15180 MAX_MEM_FOR_RS_ALIGN_CODE,
15181 1,
15182 (relax_substateT) max,
15183 (symbolS *) NULL,
15184 (offsetT) n,
15185 (char *) NULL);
15186 *p = 0;
15187 }
15188
15189 /* Perform target specific initialisation of a frag. */
15190
15191 void
15192 arm_init_frag (fragS * fragP)
15193 {
15194 /* Record whether this frag is in an ARM or a THUMB area. */
15195 fragP->tc_frag_data = thumb_mode;
15196 }
15197
15198 #ifdef OBJ_ELF
15199 /* When we change sections we need to issue a new mapping symbol. */
15200
15201 void
15202 arm_elf_change_section (void)
15203 {
15204 flagword flags;
15205 segment_info_type *seginfo;
15206
15207 /* Link an unlinked unwind index table section to the .text section. */
15208 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15209 && elf_linked_to_section (now_seg) == NULL)
15210 elf_linked_to_section (now_seg) = text_section;
15211
15212 if (!SEG_NORMAL (now_seg))
15213 return;
15214
15215 flags = bfd_get_section_flags (stdoutput, now_seg);
15216
15217 /* We can ignore sections that only contain debug info. */
15218 if ((flags & SEC_ALLOC) == 0)
15219 return;
15220
15221 seginfo = seg_info (now_seg);
15222 mapstate = seginfo->tc_segment_info_data.mapstate;
15223 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15224 }
15225
15226 int
15227 arm_elf_section_type (const char * str, size_t len)
15228 {
15229 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15230 return SHT_ARM_EXIDX;
15231
15232 return -1;
15233 }
15234 \f
15235 /* Code to deal with unwinding tables. */
15236
15237 static void add_unwind_adjustsp (offsetT);
15238
15239 /* Cenerate and deferred unwind frame offset. */
15240
15241 static void
15242 flush_pending_unwind (void)
15243 {
15244 offsetT offset;
15245
15246 offset = unwind.pending_offset;
15247 unwind.pending_offset = 0;
15248 if (offset != 0)
15249 add_unwind_adjustsp (offset);
15250 }
15251
15252 /* Add an opcode to this list for this function. Two-byte opcodes should
15253 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15254 order. */
15255
15256 static void
15257 add_unwind_opcode (valueT op, int length)
15258 {
15259 /* Add any deferred stack adjustment. */
15260 if (unwind.pending_offset)
15261 flush_pending_unwind ();
15262
15263 unwind.sp_restored = 0;
15264
15265 if (unwind.opcode_count + length > unwind.opcode_alloc)
15266 {
15267 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15268 if (unwind.opcodes)
15269 unwind.opcodes = xrealloc (unwind.opcodes,
15270 unwind.opcode_alloc);
15271 else
15272 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15273 }
15274 while (length > 0)
15275 {
15276 length--;
15277 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15278 op >>= 8;
15279 unwind.opcode_count++;
15280 }
15281 }
15282
15283 /* Add unwind opcodes to adjust the stack pointer. */
15284
15285 static void
15286 add_unwind_adjustsp (offsetT offset)
15287 {
15288 valueT op;
15289
15290 if (offset > 0x200)
15291 {
15292 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15293 char bytes[5];
15294 int n;
15295 valueT o;
15296
15297 /* Long form: 0xb2, uleb128. */
15298 /* This might not fit in a word so add the individual bytes,
15299 remembering the list is built in reverse order. */
15300 o = (valueT) ((offset - 0x204) >> 2);
15301 if (o == 0)
15302 add_unwind_opcode (0, 1);
15303
15304 /* Calculate the uleb128 encoding of the offset. */
15305 n = 0;
15306 while (o)
15307 {
15308 bytes[n] = o & 0x7f;
15309 o >>= 7;
15310 if (o)
15311 bytes[n] |= 0x80;
15312 n++;
15313 }
15314 /* Add the insn. */
15315 for (; n; n--)
15316 add_unwind_opcode (bytes[n - 1], 1);
15317 add_unwind_opcode (0xb2, 1);
15318 }
15319 else if (offset > 0x100)
15320 {
15321 /* Two short opcodes. */
15322 add_unwind_opcode (0x3f, 1);
15323 op = (offset - 0x104) >> 2;
15324 add_unwind_opcode (op, 1);
15325 }
15326 else if (offset > 0)
15327 {
15328 /* Short opcode. */
15329 op = (offset - 4) >> 2;
15330 add_unwind_opcode (op, 1);
15331 }
15332 else if (offset < 0)
15333 {
15334 offset = -offset;
15335 while (offset > 0x100)
15336 {
15337 add_unwind_opcode (0x7f, 1);
15338 offset -= 0x100;
15339 }
15340 op = ((offset - 4) >> 2) | 0x40;
15341 add_unwind_opcode (op, 1);
15342 }
15343 }
15344
15345 /* Finish the list of unwind opcodes for this function. */
15346 static void
15347 finish_unwind_opcodes (void)
15348 {
15349 valueT op;
15350
15351 if (unwind.fp_used)
15352 {
15353 /* Adjust sp as necessary. */
15354 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15355 flush_pending_unwind ();
15356
15357 /* After restoring sp from the frame pointer. */
15358 op = 0x90 | unwind.fp_reg;
15359 add_unwind_opcode (op, 1);
15360 }
15361 else
15362 flush_pending_unwind ();
15363 }
15364
15365
15366 /* Start an exception table entry. If idx is nonzero this is an index table
15367 entry. */
15368
15369 static void
15370 start_unwind_section (const segT text_seg, int idx)
15371 {
15372 const char * text_name;
15373 const char * prefix;
15374 const char * prefix_once;
15375 const char * group_name;
15376 size_t prefix_len;
15377 size_t text_len;
15378 char * sec_name;
15379 size_t sec_name_len;
15380 int type;
15381 int flags;
15382 int linkonce;
15383
15384 if (idx)
15385 {
15386 prefix = ELF_STRING_ARM_unwind;
15387 prefix_once = ELF_STRING_ARM_unwind_once;
15388 type = SHT_ARM_EXIDX;
15389 }
15390 else
15391 {
15392 prefix = ELF_STRING_ARM_unwind_info;
15393 prefix_once = ELF_STRING_ARM_unwind_info_once;
15394 type = SHT_PROGBITS;
15395 }
15396
15397 text_name = segment_name (text_seg);
15398 if (streq (text_name, ".text"))
15399 text_name = "";
15400
15401 if (strncmp (text_name, ".gnu.linkonce.t.",
15402 strlen (".gnu.linkonce.t.")) == 0)
15403 {
15404 prefix = prefix_once;
15405 text_name += strlen (".gnu.linkonce.t.");
15406 }
15407
15408 prefix_len = strlen (prefix);
15409 text_len = strlen (text_name);
15410 sec_name_len = prefix_len + text_len;
15411 sec_name = xmalloc (sec_name_len + 1);
15412 memcpy (sec_name, prefix, prefix_len);
15413 memcpy (sec_name + prefix_len, text_name, text_len);
15414 sec_name[prefix_len + text_len] = '\0';
15415
15416 flags = SHF_ALLOC;
15417 linkonce = 0;
15418 group_name = 0;
15419
15420 /* Handle COMDAT group. */
15421 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15422 {
15423 group_name = elf_group_name (text_seg);
15424 if (group_name == NULL)
15425 {
15426 as_bad ("Group section `%s' has no group signature",
15427 segment_name (text_seg));
15428 ignore_rest_of_line ();
15429 return;
15430 }
15431 flags |= SHF_GROUP;
15432 linkonce = 1;
15433 }
15434
15435 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15436
15437 /* Set the setion link for index tables. */
15438 if (idx)
15439 elf_linked_to_section (now_seg) = text_seg;
15440 }
15441
15442
15443 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15444 personality routine data. Returns zero, or the index table value for
15445 and inline entry. */
15446
15447 static valueT
15448 create_unwind_entry (int have_data)
15449 {
15450 int size;
15451 addressT where;
15452 char *ptr;
15453 /* The current word of data. */
15454 valueT data;
15455 /* The number of bytes left in this word. */
15456 int n;
15457
15458 finish_unwind_opcodes ();
15459
15460 /* Remember the current text section. */
15461 unwind.saved_seg = now_seg;
15462 unwind.saved_subseg = now_subseg;
15463
15464 start_unwind_section (now_seg, 0);
15465
15466 if (unwind.personality_routine == NULL)
15467 {
15468 if (unwind.personality_index == -2)
15469 {
15470 if (have_data)
15471 as_bad (_("handerdata in cantunwind frame"));
15472 return 1; /* EXIDX_CANTUNWIND. */
15473 }
15474
15475 /* Use a default personality routine if none is specified. */
15476 if (unwind.personality_index == -1)
15477 {
15478 if (unwind.opcode_count > 3)
15479 unwind.personality_index = 1;
15480 else
15481 unwind.personality_index = 0;
15482 }
15483
15484 /* Space for the personality routine entry. */
15485 if (unwind.personality_index == 0)
15486 {
15487 if (unwind.opcode_count > 3)
15488 as_bad (_("too many unwind opcodes for personality routine 0"));
15489
15490 if (!have_data)
15491 {
15492 /* All the data is inline in the index table. */
15493 data = 0x80;
15494 n = 3;
15495 while (unwind.opcode_count > 0)
15496 {
15497 unwind.opcode_count--;
15498 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15499 n--;
15500 }
15501
15502 /* Pad with "finish" opcodes. */
15503 while (n--)
15504 data = (data << 8) | 0xb0;
15505
15506 return data;
15507 }
15508 size = 0;
15509 }
15510 else
15511 /* We get two opcodes "free" in the first word. */
15512 size = unwind.opcode_count - 2;
15513 }
15514 else
15515 /* An extra byte is required for the opcode count. */
15516 size = unwind.opcode_count + 1;
15517
15518 size = (size + 3) >> 2;
15519 if (size > 0xff)
15520 as_bad (_("too many unwind opcodes"));
15521
15522 frag_align (2, 0, 0);
15523 record_alignment (now_seg, 2);
15524 unwind.table_entry = expr_build_dot ();
15525
15526 /* Allocate the table entry. */
15527 ptr = frag_more ((size << 2) + 4);
15528 where = frag_now_fix () - ((size << 2) + 4);
15529
15530 switch (unwind.personality_index)
15531 {
15532 case -1:
15533 /* ??? Should this be a PLT generating relocation? */
15534 /* Custom personality routine. */
15535 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15536 BFD_RELOC_ARM_PREL31);
15537
15538 where += 4;
15539 ptr += 4;
15540
15541 /* Set the first byte to the number of additional words. */
15542 data = size - 1;
15543 n = 3;
15544 break;
15545
15546 /* ABI defined personality routines. */
15547 case 0:
15548 /* Three opcodes bytes are packed into the first word. */
15549 data = 0x80;
15550 n = 3;
15551 break;
15552
15553 case 1:
15554 case 2:
15555 /* The size and first two opcode bytes go in the first word. */
15556 data = ((0x80 + unwind.personality_index) << 8) | size;
15557 n = 2;
15558 break;
15559
15560 default:
15561 /* Should never happen. */
15562 abort ();
15563 }
15564
15565 /* Pack the opcodes into words (MSB first), reversing the list at the same
15566 time. */
15567 while (unwind.opcode_count > 0)
15568 {
15569 if (n == 0)
15570 {
15571 md_number_to_chars (ptr, data, 4);
15572 ptr += 4;
15573 n = 4;
15574 data = 0;
15575 }
15576 unwind.opcode_count--;
15577 n--;
15578 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15579 }
15580
15581 /* Finish off the last word. */
15582 if (n < 4)
15583 {
15584 /* Pad with "finish" opcodes. */
15585 while (n--)
15586 data = (data << 8) | 0xb0;
15587
15588 md_number_to_chars (ptr, data, 4);
15589 }
15590
15591 if (!have_data)
15592 {
15593 /* Add an empty descriptor if there is no user-specified data. */
15594 ptr = frag_more (4);
15595 md_number_to_chars (ptr, 0, 4);
15596 }
15597
15598 return 0;
15599 }
15600
15601 /* Convert REGNAME to a DWARF-2 register number. */
15602
15603 int
15604 tc_arm_regname_to_dw2regnum (const char *regname)
15605 {
15606 int reg = arm_reg_parse ((char **) &regname, REG_TYPE_RN);
15607
15608 if (reg == FAIL)
15609 return -1;
15610
15611 return reg;
15612 }
15613
15614 /* Initialize the DWARF-2 unwind information for this procedure. */
15615
15616 void
15617 tc_arm_frame_initial_instructions (void)
15618 {
15619 cfi_add_CFA_def_cfa (REG_SP, 0);
15620 }
15621 #endif /* OBJ_ELF */
15622
15623
15624 /* MD interface: Symbol and relocation handling. */
15625
15626 /* Return the address within the segment that a PC-relative fixup is
15627 relative to. For ARM, PC-relative fixups applied to instructions
15628 are generally relative to the location of the fixup plus 8 bytes.
15629 Thumb branches are offset by 4, and Thumb loads relative to PC
15630 require special handling. */
15631
15632 long
15633 md_pcrel_from_section (fixS * fixP, segT seg)
15634 {
15635 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15636
15637 /* If this is pc-relative and we are going to emit a relocation
15638 then we just want to put out any pipeline compensation that the linker
15639 will need. Otherwise we want to use the calculated base.
15640 For WinCE we skip the bias for externals as well, since this
15641 is how the MS ARM-CE assembler behaves and we want to be compatible. */
15642 if (fixP->fx_pcrel
15643 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15644 || (arm_force_relocation (fixP)
15645 #ifdef TE_WINCE
15646 && !S_IS_EXTERNAL (fixP->fx_addsy)
15647 #endif
15648 )))
15649 base = 0;
15650
15651 switch (fixP->fx_r_type)
15652 {
15653 /* PC relative addressing on the Thumb is slightly odd as the
15654 bottom two bits of the PC are forced to zero for the
15655 calculation. This happens *after* application of the
15656 pipeline offset. However, Thumb adrl already adjusts for
15657 this, so we need not do it again. */
15658 case BFD_RELOC_ARM_THUMB_ADD:
15659 return base & ~3;
15660
15661 case BFD_RELOC_ARM_THUMB_OFFSET:
15662 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15663 case BFD_RELOC_ARM_T32_ADD_PC12:
15664 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15665 return (base + 4) & ~3;
15666
15667 /* Thumb branches are simply offset by +4. */
15668 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15669 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15670 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15671 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15672 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15673 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15674 case BFD_RELOC_THUMB_PCREL_BLX:
15675 return base + 4;
15676
15677 /* ARM mode branches are offset by +8. However, the Windows CE
15678 loader expects the relocation not to take this into account. */
15679 case BFD_RELOC_ARM_PCREL_BRANCH:
15680 case BFD_RELOC_ARM_PCREL_CALL:
15681 case BFD_RELOC_ARM_PCREL_JUMP:
15682 case BFD_RELOC_ARM_PCREL_BLX:
15683 case BFD_RELOC_ARM_PLT32:
15684 #ifdef TE_WINCE
15685 /* When handling fixups immediately, because we have already
15686 discovered the value of a symbol, or the address of the frag involved
15687 we must account for the offset by +8, as the OS loader will never see the reloc.
15688 see fixup_segment() in write.c
15689 The S_IS_EXTERNAL test handles the case of global symbols.
15690 Those need the calculated base, not just the pipe compensation the linker will need. */
15691 if (fixP->fx_pcrel
15692 && fixP->fx_addsy != NULL
15693 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
15694 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
15695 return base + 8;
15696 return base;
15697 #else
15698 return base + 8;
15699 #endif
15700
15701 /* ARM mode loads relative to PC are also offset by +8. Unlike
15702 branches, the Windows CE loader *does* expect the relocation
15703 to take this into account. */
15704 case BFD_RELOC_ARM_OFFSET_IMM:
15705 case BFD_RELOC_ARM_OFFSET_IMM8:
15706 case BFD_RELOC_ARM_HWLITERAL:
15707 case BFD_RELOC_ARM_LITERAL:
15708 case BFD_RELOC_ARM_CP_OFF_IMM:
15709 return base + 8;
15710
15711
15712 /* Other PC-relative relocations are un-offset. */
15713 default:
15714 return base;
15715 }
15716 }
15717
15718 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15719 Otherwise we have no need to default values of symbols. */
15720
15721 symbolS *
15722 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15723 {
15724 #ifdef OBJ_ELF
15725 if (name[0] == '_' && name[1] == 'G'
15726 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15727 {
15728 if (!GOT_symbol)
15729 {
15730 if (symbol_find (name))
15731 as_bad ("GOT already in the symbol table");
15732
15733 GOT_symbol = symbol_new (name, undefined_section,
15734 (valueT) 0, & zero_address_frag);
15735 }
15736
15737 return GOT_symbol;
15738 }
15739 #endif
15740
15741 return 0;
15742 }
15743
15744 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15745 computed as two separate immediate values, added together. We
15746 already know that this value cannot be computed by just one ARM
15747 instruction. */
15748
15749 static unsigned int
15750 validate_immediate_twopart (unsigned int val,
15751 unsigned int * highpart)
15752 {
15753 unsigned int a;
15754 unsigned int i;
15755
15756 for (i = 0; i < 32; i += 2)
15757 if (((a = rotate_left (val, i)) & 0xff) != 0)
15758 {
15759 if (a & 0xff00)
15760 {
15761 if (a & ~ 0xffff)
15762 continue;
15763 * highpart = (a >> 8) | ((i + 24) << 7);
15764 }
15765 else if (a & 0xff0000)
15766 {
15767 if (a & 0xff000000)
15768 continue;
15769 * highpart = (a >> 16) | ((i + 16) << 7);
15770 }
15771 else
15772 {
15773 assert (a & 0xff000000);
15774 * highpart = (a >> 24) | ((i + 8) << 7);
15775 }
15776
15777 return (a & 0xff) | (i << 7);
15778 }
15779
15780 return FAIL;
15781 }
15782
15783 static int
15784 validate_offset_imm (unsigned int val, int hwse)
15785 {
15786 if ((hwse && val > 255) || val > 4095)
15787 return FAIL;
15788 return val;
15789 }
15790
15791 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15792 negative immediate constant by altering the instruction. A bit of
15793 a hack really.
15794 MOV <-> MVN
15795 AND <-> BIC
15796 ADC <-> SBC
15797 by inverting the second operand, and
15798 ADD <-> SUB
15799 CMP <-> CMN
15800 by negating the second operand. */
15801
15802 static int
15803 negate_data_op (unsigned long * instruction,
15804 unsigned long value)
15805 {
15806 int op, new_inst;
15807 unsigned long negated, inverted;
15808
15809 negated = encode_arm_immediate (-value);
15810 inverted = encode_arm_immediate (~value);
15811
15812 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15813 switch (op)
15814 {
15815 /* First negates. */
15816 case OPCODE_SUB: /* ADD <-> SUB */
15817 new_inst = OPCODE_ADD;
15818 value = negated;
15819 break;
15820
15821 case OPCODE_ADD:
15822 new_inst = OPCODE_SUB;
15823 value = negated;
15824 break;
15825
15826 case OPCODE_CMP: /* CMP <-> CMN */
15827 new_inst = OPCODE_CMN;
15828 value = negated;
15829 break;
15830
15831 case OPCODE_CMN:
15832 new_inst = OPCODE_CMP;
15833 value = negated;
15834 break;
15835
15836 /* Now Inverted ops. */
15837 case OPCODE_MOV: /* MOV <-> MVN */
15838 new_inst = OPCODE_MVN;
15839 value = inverted;
15840 break;
15841
15842 case OPCODE_MVN:
15843 new_inst = OPCODE_MOV;
15844 value = inverted;
15845 break;
15846
15847 case OPCODE_AND: /* AND <-> BIC */
15848 new_inst = OPCODE_BIC;
15849 value = inverted;
15850 break;
15851
15852 case OPCODE_BIC:
15853 new_inst = OPCODE_AND;
15854 value = inverted;
15855 break;
15856
15857 case OPCODE_ADC: /* ADC <-> SBC */
15858 new_inst = OPCODE_SBC;
15859 value = inverted;
15860 break;
15861
15862 case OPCODE_SBC:
15863 new_inst = OPCODE_ADC;
15864 value = inverted;
15865 break;
15866
15867 /* We cannot do anything. */
15868 default:
15869 return FAIL;
15870 }
15871
15872 if (value == (unsigned) FAIL)
15873 return FAIL;
15874
15875 *instruction &= OPCODE_MASK;
15876 *instruction |= new_inst << DATA_OP_SHIFT;
15877 return value;
15878 }
15879
15880 /* Like negate_data_op, but for Thumb-2. */
15881
15882 static unsigned int
15883 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15884 {
15885 int op, new_inst;
15886 int rd;
15887 offsetT negated, inverted;
15888
15889 negated = encode_thumb32_immediate (-value);
15890 inverted = encode_thumb32_immediate (~value);
15891
15892 rd = (*instruction >> 8) & 0xf;
15893 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15894 switch (op)
15895 {
15896 /* ADD <-> SUB. Includes CMP <-> CMN. */
15897 case T2_OPCODE_SUB:
15898 new_inst = T2_OPCODE_ADD;
15899 value = negated;
15900 break;
15901
15902 case T2_OPCODE_ADD:
15903 new_inst = T2_OPCODE_SUB;
15904 value = negated;
15905 break;
15906
15907 /* ORR <-> ORN. Includes MOV <-> MVN. */
15908 case T2_OPCODE_ORR:
15909 new_inst = T2_OPCODE_ORN;
15910 value = inverted;
15911 break;
15912
15913 case T2_OPCODE_ORN:
15914 new_inst = T2_OPCODE_ORR;
15915 value = inverted;
15916 break;
15917
15918 /* AND <-> BIC. TST has no inverted equivalent. */
15919 case T2_OPCODE_AND:
15920 new_inst = T2_OPCODE_BIC;
15921 if (rd == 15)
15922 value = FAIL;
15923 else
15924 value = inverted;
15925 break;
15926
15927 case T2_OPCODE_BIC:
15928 new_inst = T2_OPCODE_AND;
15929 value = inverted;
15930 break;
15931
15932 /* ADC <-> SBC */
15933 case T2_OPCODE_ADC:
15934 new_inst = T2_OPCODE_SBC;
15935 value = inverted;
15936 break;
15937
15938 case T2_OPCODE_SBC:
15939 new_inst = T2_OPCODE_ADC;
15940 value = inverted;
15941 break;
15942
15943 /* We cannot do anything. */
15944 default:
15945 return FAIL;
15946 }
15947
15948 if (value == FAIL)
15949 return FAIL;
15950
15951 *instruction &= T2_OPCODE_MASK;
15952 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15953 return value;
15954 }
15955
15956 /* Read a 32-bit thumb instruction from buf. */
15957 static unsigned long
15958 get_thumb32_insn (char * buf)
15959 {
15960 unsigned long insn;
15961 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15962 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15963
15964 return insn;
15965 }
15966
15967
15968 /* We usually want to set the low bit on the address of thumb function
15969 symbols. In particular .word foo - . should have the low bit set.
15970 Generic code tries to fold the difference of two symbols to
15971 a constant. Prevent this and force a relocation when the first symbols
15972 is a thumb function. */
15973 int
15974 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15975 {
15976 if (op == O_subtract
15977 && l->X_op == O_symbol
15978 && r->X_op == O_symbol
15979 && THUMB_IS_FUNC (l->X_add_symbol))
15980 {
15981 l->X_op = O_subtract;
15982 l->X_op_symbol = r->X_add_symbol;
15983 l->X_add_number -= r->X_add_number;
15984 return 1;
15985 }
15986 /* Process as normal. */
15987 return 0;
15988 }
15989
15990 void
15991 md_apply_fix (fixS * fixP,
15992 valueT * valP,
15993 segT seg)
15994 {
15995 offsetT value = * valP;
15996 offsetT newval;
15997 unsigned int newimm;
15998 unsigned long temp;
15999 int sign;
16000 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
16001
16002 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
16003
16004 /* Note whether this will delete the relocation. */
16005 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
16006 fixP->fx_done = 1;
16007
16008 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16009 consistency with the behavior on 32-bit hosts. Remember value
16010 for emit_reloc. */
16011 value &= 0xffffffff;
16012 value ^= 0x80000000;
16013 value -= 0x80000000;
16014
16015 *valP = value;
16016 fixP->fx_addnumber = value;
16017
16018 /* Same treatment for fixP->fx_offset. */
16019 fixP->fx_offset &= 0xffffffff;
16020 fixP->fx_offset ^= 0x80000000;
16021 fixP->fx_offset -= 0x80000000;
16022
16023 switch (fixP->fx_r_type)
16024 {
16025 case BFD_RELOC_NONE:
16026 /* This will need to go in the object file. */
16027 fixP->fx_done = 0;
16028 break;
16029
16030 case BFD_RELOC_ARM_IMMEDIATE:
16031 /* We claim that this fixup has been processed here,
16032 even if in fact we generate an error because we do
16033 not have a reloc for it, so tc_gen_reloc will reject it. */
16034 fixP->fx_done = 1;
16035
16036 if (fixP->fx_addsy
16037 && ! S_IS_DEFINED (fixP->fx_addsy))
16038 {
16039 as_bad_where (fixP->fx_file, fixP->fx_line,
16040 _("undefined symbol %s used as an immediate value"),
16041 S_GET_NAME (fixP->fx_addsy));
16042 break;
16043 }
16044
16045 newimm = encode_arm_immediate (value);
16046 temp = md_chars_to_number (buf, INSN_SIZE);
16047
16048 /* If the instruction will fail, see if we can fix things up by
16049 changing the opcode. */
16050 if (newimm == (unsigned int) FAIL
16051 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
16052 {
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("invalid constant (%lx) after fixup"),
16055 (unsigned long) value);
16056 break;
16057 }
16058
16059 newimm |= (temp & 0xfffff000);
16060 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16061 break;
16062
16063 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16064 {
16065 unsigned int highpart = 0;
16066 unsigned int newinsn = 0xe1a00000; /* nop. */
16067
16068 newimm = encode_arm_immediate (value);
16069 temp = md_chars_to_number (buf, INSN_SIZE);
16070
16071 /* If the instruction will fail, see if we can fix things up by
16072 changing the opcode. */
16073 if (newimm == (unsigned int) FAIL
16074 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
16075 {
16076 /* No ? OK - try using two ADD instructions to generate
16077 the value. */
16078 newimm = validate_immediate_twopart (value, & highpart);
16079
16080 /* Yes - then make sure that the second instruction is
16081 also an add. */
16082 if (newimm != (unsigned int) FAIL)
16083 newinsn = temp;
16084 /* Still No ? Try using a negated value. */
16085 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
16086 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
16087 /* Otherwise - give up. */
16088 else
16089 {
16090 as_bad_where (fixP->fx_file, fixP->fx_line,
16091 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
16092 (long) value);
16093 break;
16094 }
16095
16096 /* Replace the first operand in the 2nd instruction (which
16097 is the PC) with the destination register. We have
16098 already added in the PC in the first instruction and we
16099 do not want to do it again. */
16100 newinsn &= ~ 0xf0000;
16101 newinsn |= ((newinsn & 0x0f000) << 4);
16102 }
16103
16104 newimm |= (temp & 0xfffff000);
16105 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16106
16107 highpart |= (newinsn & 0xfffff000);
16108 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
16109 }
16110 break;
16111
16112 case BFD_RELOC_ARM_OFFSET_IMM:
16113 if (!fixP->fx_done && seg->use_rela_p)
16114 value = 0;
16115
16116 case BFD_RELOC_ARM_LITERAL:
16117 sign = value >= 0;
16118
16119 if (value < 0)
16120 value = - value;
16121
16122 if (validate_offset_imm (value, 0) == FAIL)
16123 {
16124 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
16125 as_bad_where (fixP->fx_file, fixP->fx_line,
16126 _("invalid literal constant: pool needs to be closer"));
16127 else
16128 as_bad_where (fixP->fx_file, fixP->fx_line,
16129 _("bad immediate value for offset (%ld)"),
16130 (long) value);
16131 break;
16132 }
16133
16134 newval = md_chars_to_number (buf, INSN_SIZE);
16135 newval &= 0xff7ff000;
16136 newval |= value | (sign ? INDEX_UP : 0);
16137 md_number_to_chars (buf, newval, INSN_SIZE);
16138 break;
16139
16140 case BFD_RELOC_ARM_OFFSET_IMM8:
16141 case BFD_RELOC_ARM_HWLITERAL:
16142 sign = value >= 0;
16143
16144 if (value < 0)
16145 value = - value;
16146
16147 if (validate_offset_imm (value, 1) == FAIL)
16148 {
16149 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
16150 as_bad_where (fixP->fx_file, fixP->fx_line,
16151 _("invalid literal constant: pool needs to be closer"));
16152 else
16153 as_bad (_("bad immediate value for half-word offset (%ld)"),
16154 (long) value);
16155 break;
16156 }
16157
16158 newval = md_chars_to_number (buf, INSN_SIZE);
16159 newval &= 0xff7ff0f0;
16160 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16161 md_number_to_chars (buf, newval, INSN_SIZE);
16162 break;
16163
16164 case BFD_RELOC_ARM_T32_OFFSET_U8:
16165 if (value < 0 || value > 1020 || value % 4 != 0)
16166 as_bad_where (fixP->fx_file, fixP->fx_line,
16167 _("bad immediate value for offset (%ld)"), (long) value);
16168 value /= 4;
16169
16170 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16171 newval |= value;
16172 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16173 break;
16174
16175 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16176 /* This is a complicated relocation used for all varieties of Thumb32
16177 load/store instruction with immediate offset:
16178
16179 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16180 *4, optional writeback(W)
16181 (doubleword load/store)
16182
16183 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16184 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16185 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16186 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16187 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16188
16189 Uppercase letters indicate bits that are already encoded at
16190 this point. Lowercase letters are our problem. For the
16191 second block of instructions, the secondary opcode nybble
16192 (bits 8..11) is present, and bit 23 is zero, even if this is
16193 a PC-relative operation. */
16194 newval = md_chars_to_number (buf, THUMB_SIZE);
16195 newval <<= 16;
16196 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16197
16198 if ((newval & 0xf0000000) == 0xe0000000)
16199 {
16200 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16201 if (value >= 0)
16202 newval |= (1 << 23);
16203 else
16204 value = -value;
16205 if (value % 4 != 0)
16206 {
16207 as_bad_where (fixP->fx_file, fixP->fx_line,
16208 _("offset not a multiple of 4"));
16209 break;
16210 }
16211 value /= 4;
16212 if (value > 0xff)
16213 {
16214 as_bad_where (fixP->fx_file, fixP->fx_line,
16215 _("offset out of range"));
16216 break;
16217 }
16218 newval &= ~0xff;
16219 }
16220 else if ((newval & 0x000f0000) == 0x000f0000)
16221 {
16222 /* PC-relative, 12-bit offset. */
16223 if (value >= 0)
16224 newval |= (1 << 23);
16225 else
16226 value = -value;
16227 if (value > 0xfff)
16228 {
16229 as_bad_where (fixP->fx_file, fixP->fx_line,
16230 _("offset out of range"));
16231 break;
16232 }
16233 newval &= ~0xfff;
16234 }
16235 else if ((newval & 0x00000100) == 0x00000100)
16236 {
16237 /* Writeback: 8-bit, +/- offset. */
16238 if (value >= 0)
16239 newval |= (1 << 9);
16240 else
16241 value = -value;
16242 if (value > 0xff)
16243 {
16244 as_bad_where (fixP->fx_file, fixP->fx_line,
16245 _("offset out of range"));
16246 break;
16247 }
16248 newval &= ~0xff;
16249 }
16250 else if ((newval & 0x00000f00) == 0x00000e00)
16251 {
16252 /* T-instruction: positive 8-bit offset. */
16253 if (value < 0 || value > 0xff)
16254 {
16255 as_bad_where (fixP->fx_file, fixP->fx_line,
16256 _("offset out of range"));
16257 break;
16258 }
16259 newval &= ~0xff;
16260 newval |= value;
16261 }
16262 else
16263 {
16264 /* Positive 12-bit or negative 8-bit offset. */
16265 int limit;
16266 if (value >= 0)
16267 {
16268 newval |= (1 << 23);
16269 limit = 0xfff;
16270 }
16271 else
16272 {
16273 value = -value;
16274 limit = 0xff;
16275 }
16276 if (value > limit)
16277 {
16278 as_bad_where (fixP->fx_file, fixP->fx_line,
16279 _("offset out of range"));
16280 break;
16281 }
16282 newval &= ~limit;
16283 }
16284
16285 newval |= value;
16286 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16287 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16288 break;
16289
16290 case BFD_RELOC_ARM_SHIFT_IMM:
16291 newval = md_chars_to_number (buf, INSN_SIZE);
16292 if (((unsigned long) value) > 32
16293 || (value == 32
16294 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16295 {
16296 as_bad_where (fixP->fx_file, fixP->fx_line,
16297 _("shift expression is too large"));
16298 break;
16299 }
16300
16301 if (value == 0)
16302 /* Shifts of zero must be done as lsl. */
16303 newval &= ~0x60;
16304 else if (value == 32)
16305 value = 0;
16306 newval &= 0xfffff07f;
16307 newval |= (value & 0x1f) << 7;
16308 md_number_to_chars (buf, newval, INSN_SIZE);
16309 break;
16310
16311 case BFD_RELOC_ARM_T32_IMMEDIATE:
16312 case BFD_RELOC_ARM_T32_IMM12:
16313 case BFD_RELOC_ARM_T32_ADD_PC12:
16314 /* We claim that this fixup has been processed here,
16315 even if in fact we generate an error because we do
16316 not have a reloc for it, so tc_gen_reloc will reject it. */
16317 fixP->fx_done = 1;
16318
16319 if (fixP->fx_addsy
16320 && ! S_IS_DEFINED (fixP->fx_addsy))
16321 {
16322 as_bad_where (fixP->fx_file, fixP->fx_line,
16323 _("undefined symbol %s used as an immediate value"),
16324 S_GET_NAME (fixP->fx_addsy));
16325 break;
16326 }
16327
16328 newval = md_chars_to_number (buf, THUMB_SIZE);
16329 newval <<= 16;
16330 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16331
16332 /* FUTURE: Implement analogue of negate_data_op for T32. */
16333 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16334 {
16335 newimm = encode_thumb32_immediate (value);
16336 if (newimm == (unsigned int) FAIL)
16337 newimm = thumb32_negate_data_op (&newval, value);
16338 }
16339 else
16340 {
16341 /* 12 bit immediate for addw/subw. */
16342 if (value < 0)
16343 {
16344 value = -value;
16345 newval ^= 0x00a00000;
16346 }
16347 if (value > 0xfff)
16348 newimm = (unsigned int) FAIL;
16349 else
16350 newimm = value;
16351 }
16352
16353 if (newimm == (unsigned int)FAIL)
16354 {
16355 as_bad_where (fixP->fx_file, fixP->fx_line,
16356 _("invalid constant (%lx) after fixup"),
16357 (unsigned long) value);
16358 break;
16359 }
16360
16361 newval |= (newimm & 0x800) << 15;
16362 newval |= (newimm & 0x700) << 4;
16363 newval |= (newimm & 0x0ff);
16364
16365 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16366 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16367 break;
16368
16369 case BFD_RELOC_ARM_SMC:
16370 if (((unsigned long) value) > 0xffff)
16371 as_bad_where (fixP->fx_file, fixP->fx_line,
16372 _("invalid smc expression"));
16373 newval = md_chars_to_number (buf, INSN_SIZE);
16374 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16375 md_number_to_chars (buf, newval, INSN_SIZE);
16376 break;
16377
16378 case BFD_RELOC_ARM_SWI:
16379 if (fixP->tc_fix_data != 0)
16380 {
16381 if (((unsigned long) value) > 0xff)
16382 as_bad_where (fixP->fx_file, fixP->fx_line,
16383 _("invalid swi expression"));
16384 newval = md_chars_to_number (buf, THUMB_SIZE);
16385 newval |= value;
16386 md_number_to_chars (buf, newval, THUMB_SIZE);
16387 }
16388 else
16389 {
16390 if (((unsigned long) value) > 0x00ffffff)
16391 as_bad_where (fixP->fx_file, fixP->fx_line,
16392 _("invalid swi expression"));
16393 newval = md_chars_to_number (buf, INSN_SIZE);
16394 newval |= value;
16395 md_number_to_chars (buf, newval, INSN_SIZE);
16396 }
16397 break;
16398
16399 case BFD_RELOC_ARM_MULTI:
16400 if (((unsigned long) value) > 0xffff)
16401 as_bad_where (fixP->fx_file, fixP->fx_line,
16402 _("invalid expression in load/store multiple"));
16403 newval = value | md_chars_to_number (buf, INSN_SIZE);
16404 md_number_to_chars (buf, newval, INSN_SIZE);
16405 break;
16406
16407 #ifdef OBJ_ELF
16408 case BFD_RELOC_ARM_PCREL_CALL:
16409 newval = md_chars_to_number (buf, INSN_SIZE);
16410 if ((newval & 0xf0000000) == 0xf0000000)
16411 temp = 1;
16412 else
16413 temp = 3;
16414 goto arm_branch_common;
16415
16416 case BFD_RELOC_ARM_PCREL_JUMP:
16417 case BFD_RELOC_ARM_PLT32:
16418 #endif
16419 case BFD_RELOC_ARM_PCREL_BRANCH:
16420 temp = 3;
16421 goto arm_branch_common;
16422
16423 case BFD_RELOC_ARM_PCREL_BLX:
16424 temp = 1;
16425 arm_branch_common:
16426 /* We are going to store value (shifted right by two) in the
16427 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16428 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16429 also be be clear. */
16430 if (value & temp)
16431 as_bad_where (fixP->fx_file, fixP->fx_line,
16432 _("misaligned branch destination"));
16433 if ((value & (offsetT)0xfe000000) != (offsetT)0
16434 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16435 as_bad_where (fixP->fx_file, fixP->fx_line,
16436 _("branch out of range"));
16437
16438 if (fixP->fx_done || !seg->use_rela_p)
16439 {
16440 newval = md_chars_to_number (buf, INSN_SIZE);
16441 newval |= (value >> 2) & 0x00ffffff;
16442 /* Set the H bit on BLX instructions. */
16443 if (temp == 1)
16444 {
16445 if (value & 2)
16446 newval |= 0x01000000;
16447 else
16448 newval &= ~0x01000000;
16449 }
16450 md_number_to_chars (buf, newval, INSN_SIZE);
16451 }
16452 break;
16453
16454 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16455 /* CZB can only branch forward. */
16456 if (value & ~0x7e)
16457 as_bad_where (fixP->fx_file, fixP->fx_line,
16458 _("branch out of range"));
16459
16460 if (fixP->fx_done || !seg->use_rela_p)
16461 {
16462 newval = md_chars_to_number (buf, THUMB_SIZE);
16463 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16464 md_number_to_chars (buf, newval, THUMB_SIZE);
16465 }
16466 break;
16467
16468 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16469 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16470 as_bad_where (fixP->fx_file, fixP->fx_line,
16471 _("branch out of range"));
16472
16473 if (fixP->fx_done || !seg->use_rela_p)
16474 {
16475 newval = md_chars_to_number (buf, THUMB_SIZE);
16476 newval |= (value & 0x1ff) >> 1;
16477 md_number_to_chars (buf, newval, THUMB_SIZE);
16478 }
16479 break;
16480
16481 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16482 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16483 as_bad_where (fixP->fx_file, fixP->fx_line,
16484 _("branch out of range"));
16485
16486 if (fixP->fx_done || !seg->use_rela_p)
16487 {
16488 newval = md_chars_to_number (buf, THUMB_SIZE);
16489 newval |= (value & 0xfff) >> 1;
16490 md_number_to_chars (buf, newval, THUMB_SIZE);
16491 }
16492 break;
16493
16494 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16495 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16496 as_bad_where (fixP->fx_file, fixP->fx_line,
16497 _("conditional branch out of range"));
16498
16499 if (fixP->fx_done || !seg->use_rela_p)
16500 {
16501 offsetT newval2;
16502 addressT S, J1, J2, lo, hi;
16503
16504 S = (value & 0x00100000) >> 20;
16505 J2 = (value & 0x00080000) >> 19;
16506 J1 = (value & 0x00040000) >> 18;
16507 hi = (value & 0x0003f000) >> 12;
16508 lo = (value & 0x00000ffe) >> 1;
16509
16510 newval = md_chars_to_number (buf, THUMB_SIZE);
16511 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16512 newval |= (S << 10) | hi;
16513 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16514 md_number_to_chars (buf, newval, THUMB_SIZE);
16515 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16516 }
16517 break;
16518
16519 case BFD_RELOC_THUMB_PCREL_BLX:
16520 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16521 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16522 as_bad_where (fixP->fx_file, fixP->fx_line,
16523 _("branch out of range"));
16524
16525 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16526 /* For a BLX instruction, make sure that the relocation is rounded up
16527 to a word boundary. This follows the semantics of the instruction
16528 which specifies that bit 1 of the target address will come from bit
16529 1 of the base address. */
16530 value = (value + 1) & ~ 1;
16531
16532 if (fixP->fx_done || !seg->use_rela_p)
16533 {
16534 offsetT newval2;
16535
16536 newval = md_chars_to_number (buf, THUMB_SIZE);
16537 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16538 newval |= (value & 0x7fffff) >> 12;
16539 newval2 |= (value & 0xfff) >> 1;
16540 md_number_to_chars (buf, newval, THUMB_SIZE);
16541 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16542 }
16543 break;
16544
16545 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16546 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16547 as_bad_where (fixP->fx_file, fixP->fx_line,
16548 _("branch out of range"));
16549
16550 if (fixP->fx_done || !seg->use_rela_p)
16551 {
16552 offsetT newval2;
16553 addressT S, I1, I2, lo, hi;
16554
16555 S = (value & 0x01000000) >> 24;
16556 I1 = (value & 0x00800000) >> 23;
16557 I2 = (value & 0x00400000) >> 22;
16558 hi = (value & 0x003ff000) >> 12;
16559 lo = (value & 0x00000ffe) >> 1;
16560
16561 I1 = !(I1 ^ S);
16562 I2 = !(I2 ^ S);
16563
16564 newval = md_chars_to_number (buf, THUMB_SIZE);
16565 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16566 newval |= (S << 10) | hi;
16567 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16568 md_number_to_chars (buf, newval, THUMB_SIZE);
16569 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16570 }
16571 break;
16572
16573 case BFD_RELOC_8:
16574 if (fixP->fx_done || !seg->use_rela_p)
16575 md_number_to_chars (buf, value, 1);
16576 break;
16577
16578 case BFD_RELOC_16:
16579 if (fixP->fx_done || !seg->use_rela_p)
16580 md_number_to_chars (buf, value, 2);
16581 break;
16582
16583 #ifdef OBJ_ELF
16584 case BFD_RELOC_ARM_TLS_GD32:
16585 case BFD_RELOC_ARM_TLS_LE32:
16586 case BFD_RELOC_ARM_TLS_IE32:
16587 case BFD_RELOC_ARM_TLS_LDM32:
16588 case BFD_RELOC_ARM_TLS_LDO32:
16589 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16590 /* fall through */
16591
16592 case BFD_RELOC_ARM_GOT32:
16593 case BFD_RELOC_ARM_GOTOFF:
16594 case BFD_RELOC_ARM_TARGET2:
16595 if (fixP->fx_done || !seg->use_rela_p)
16596 md_number_to_chars (buf, 0, 4);
16597 break;
16598 #endif
16599
16600 case BFD_RELOC_RVA:
16601 case BFD_RELOC_32:
16602 case BFD_RELOC_ARM_TARGET1:
16603 case BFD_RELOC_ARM_ROSEGREL32:
16604 case BFD_RELOC_ARM_SBREL32:
16605 case BFD_RELOC_32_PCREL:
16606 if (fixP->fx_done || !seg->use_rela_p)
16607 #ifdef TE_WINCE
16608 /* For WinCE we only do this for pcrel fixups. */
16609 if (fixP->fx_done || fixP->fx_pcrel)
16610 #endif
16611 md_number_to_chars (buf, value, 4);
16612 break;
16613
16614 #ifdef OBJ_ELF
16615 case BFD_RELOC_ARM_PREL31:
16616 if (fixP->fx_done || !seg->use_rela_p)
16617 {
16618 newval = md_chars_to_number (buf, 4) & 0x80000000;
16619 if ((value ^ (value >> 1)) & 0x40000000)
16620 {
16621 as_bad_where (fixP->fx_file, fixP->fx_line,
16622 _("rel31 relocation overflow"));
16623 }
16624 newval |= value & 0x7fffffff;
16625 md_number_to_chars (buf, newval, 4);
16626 }
16627 break;
16628 #endif
16629
16630 case BFD_RELOC_ARM_CP_OFF_IMM:
16631 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16632 if (value < -1023 || value > 1023 || (value & 3))
16633 as_bad_where (fixP->fx_file, fixP->fx_line,
16634 _("co-processor offset out of range"));
16635 cp_off_common:
16636 sign = value >= 0;
16637 if (value < 0)
16638 value = -value;
16639 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16640 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16641 newval = md_chars_to_number (buf, INSN_SIZE);
16642 else
16643 newval = get_thumb32_insn (buf);
16644 newval &= 0xff7fff00;
16645 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16646 if (value == 0)
16647 newval &= ~WRITE_BACK;
16648 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16649 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16650 md_number_to_chars (buf, newval, INSN_SIZE);
16651 else
16652 put_thumb32_insn (buf, newval);
16653 break;
16654
16655 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16656 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16657 if (value < -255 || value > 255)
16658 as_bad_where (fixP->fx_file, fixP->fx_line,
16659 _("co-processor offset out of range"));
16660 value *= 4;
16661 goto cp_off_common;
16662
16663 case BFD_RELOC_ARM_THUMB_OFFSET:
16664 newval = md_chars_to_number (buf, THUMB_SIZE);
16665 /* Exactly what ranges, and where the offset is inserted depends
16666 on the type of instruction, we can establish this from the
16667 top 4 bits. */
16668 switch (newval >> 12)
16669 {
16670 case 4: /* PC load. */
16671 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16672 forced to zero for these loads; md_pcrel_from has already
16673 compensated for this. */
16674 if (value & 3)
16675 as_bad_where (fixP->fx_file, fixP->fx_line,
16676 _("invalid offset, target not word aligned (0x%08lX)"),
16677 (((unsigned long) fixP->fx_frag->fr_address
16678 + (unsigned long) fixP->fx_where) & ~3)
16679 + (unsigned long) value);
16680
16681 if (value & ~0x3fc)
16682 as_bad_where (fixP->fx_file, fixP->fx_line,
16683 _("invalid offset, value too big (0x%08lX)"),
16684 (long) value);
16685
16686 newval |= value >> 2;
16687 break;
16688
16689 case 9: /* SP load/store. */
16690 if (value & ~0x3fc)
16691 as_bad_where (fixP->fx_file, fixP->fx_line,
16692 _("invalid offset, value too big (0x%08lX)"),
16693 (long) value);
16694 newval |= value >> 2;
16695 break;
16696
16697 case 6: /* Word load/store. */
16698 if (value & ~0x7c)
16699 as_bad_where (fixP->fx_file, fixP->fx_line,
16700 _("invalid offset, value too big (0x%08lX)"),
16701 (long) value);
16702 newval |= value << 4; /* 6 - 2. */
16703 break;
16704
16705 case 7: /* Byte load/store. */
16706 if (value & ~0x1f)
16707 as_bad_where (fixP->fx_file, fixP->fx_line,
16708 _("invalid offset, value too big (0x%08lX)"),
16709 (long) value);
16710 newval |= value << 6;
16711 break;
16712
16713 case 8: /* Halfword load/store. */
16714 if (value & ~0x3e)
16715 as_bad_where (fixP->fx_file, fixP->fx_line,
16716 _("invalid offset, value too big (0x%08lX)"),
16717 (long) value);
16718 newval |= value << 5; /* 6 - 1. */
16719 break;
16720
16721 default:
16722 as_bad_where (fixP->fx_file, fixP->fx_line,
16723 "Unable to process relocation for thumb opcode: %lx",
16724 (unsigned long) newval);
16725 break;
16726 }
16727 md_number_to_chars (buf, newval, THUMB_SIZE);
16728 break;
16729
16730 case BFD_RELOC_ARM_THUMB_ADD:
16731 /* This is a complicated relocation, since we use it for all of
16732 the following immediate relocations:
16733
16734 3bit ADD/SUB
16735 8bit ADD/SUB
16736 9bit ADD/SUB SP word-aligned
16737 10bit ADD PC/SP word-aligned
16738
16739 The type of instruction being processed is encoded in the
16740 instruction field:
16741
16742 0x8000 SUB
16743 0x00F0 Rd
16744 0x000F Rs
16745 */
16746 newval = md_chars_to_number (buf, THUMB_SIZE);
16747 {
16748 int rd = (newval >> 4) & 0xf;
16749 int rs = newval & 0xf;
16750 int subtract = !!(newval & 0x8000);
16751
16752 /* Check for HI regs, only very restricted cases allowed:
16753 Adjusting SP, and using PC or SP to get an address. */
16754 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16755 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16756 as_bad_where (fixP->fx_file, fixP->fx_line,
16757 _("invalid Hi register with immediate"));
16758
16759 /* If value is negative, choose the opposite instruction. */
16760 if (value < 0)
16761 {
16762 value = -value;
16763 subtract = !subtract;
16764 if (value < 0)
16765 as_bad_where (fixP->fx_file, fixP->fx_line,
16766 _("immediate value out of range"));
16767 }
16768
16769 if (rd == REG_SP)
16770 {
16771 if (value & ~0x1fc)
16772 as_bad_where (fixP->fx_file, fixP->fx_line,
16773 _("invalid immediate for stack address calculation"));
16774 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16775 newval |= value >> 2;
16776 }
16777 else if (rs == REG_PC || rs == REG_SP)
16778 {
16779 if (subtract || value & ~0x3fc)
16780 as_bad_where (fixP->fx_file, fixP->fx_line,
16781 _("invalid immediate for address calculation (value = 0x%08lX)"),
16782 (unsigned long) value);
16783 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16784 newval |= rd << 8;
16785 newval |= value >> 2;
16786 }
16787 else if (rs == rd)
16788 {
16789 if (value & ~0xff)
16790 as_bad_where (fixP->fx_file, fixP->fx_line,
16791 _("immediate value out of range"));
16792 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16793 newval |= (rd << 8) | value;
16794 }
16795 else
16796 {
16797 if (value & ~0x7)
16798 as_bad_where (fixP->fx_file, fixP->fx_line,
16799 _("immediate value out of range"));
16800 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16801 newval |= rd | (rs << 3) | (value << 6);
16802 }
16803 }
16804 md_number_to_chars (buf, newval, THUMB_SIZE);
16805 break;
16806
16807 case BFD_RELOC_ARM_THUMB_IMM:
16808 newval = md_chars_to_number (buf, THUMB_SIZE);
16809 if (value < 0 || value > 255)
16810 as_bad_where (fixP->fx_file, fixP->fx_line,
16811 _("invalid immediate: %ld is too large"),
16812 (long) value);
16813 newval |= value;
16814 md_number_to_chars (buf, newval, THUMB_SIZE);
16815 break;
16816
16817 case BFD_RELOC_ARM_THUMB_SHIFT:
16818 /* 5bit shift value (0..32). LSL cannot take 32. */
16819 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16820 temp = newval & 0xf800;
16821 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16822 as_bad_where (fixP->fx_file, fixP->fx_line,
16823 _("invalid shift value: %ld"), (long) value);
16824 /* Shifts of zero must be encoded as LSL. */
16825 if (value == 0)
16826 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16827 /* Shifts of 32 are encoded as zero. */
16828 else if (value == 32)
16829 value = 0;
16830 newval |= value << 6;
16831 md_number_to_chars (buf, newval, THUMB_SIZE);
16832 break;
16833
16834 case BFD_RELOC_VTABLE_INHERIT:
16835 case BFD_RELOC_VTABLE_ENTRY:
16836 fixP->fx_done = 0;
16837 return;
16838
16839 case BFD_RELOC_ARM_MOVW:
16840 case BFD_RELOC_ARM_MOVT:
16841 case BFD_RELOC_ARM_THUMB_MOVW:
16842 case BFD_RELOC_ARM_THUMB_MOVT:
16843 if (fixP->fx_done || !seg->use_rela_p)
16844 {
16845 /* REL format relocations are limited to a 16-bit addend. */
16846 if (!fixP->fx_done)
16847 {
16848 if (value < -0x1000 || value > 0xffff)
16849 as_bad_where (fixP->fx_file, fixP->fx_line,
16850 _("offset too big"));
16851 }
16852 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
16853 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16854 {
16855 value >>= 16;
16856 }
16857
16858 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
16859 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16860 {
16861 newval = get_thumb32_insn (buf);
16862 newval &= 0xfbf08f00;
16863 newval |= (value & 0xf000) << 4;
16864 newval |= (value & 0x0800) << 15;
16865 newval |= (value & 0x0700) << 4;
16866 newval |= (value & 0x00ff);
16867 put_thumb32_insn (buf, newval);
16868 }
16869 else
16870 {
16871 newval = md_chars_to_number (buf, 4);
16872 newval &= 0xfff0f000;
16873 newval |= value & 0x0fff;
16874 newval |= (value & 0xf000) << 4;
16875 md_number_to_chars (buf, newval, 4);
16876 }
16877 }
16878 return;
16879
16880 case BFD_RELOC_UNUSED:
16881 default:
16882 as_bad_where (fixP->fx_file, fixP->fx_line,
16883 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16884 }
16885 }
16886
16887 /* Translate internal representation of relocation info to BFD target
16888 format. */
16889
16890 arelent *
16891 tc_gen_reloc (asection *section, fixS *fixp)
16892 {
16893 arelent * reloc;
16894 bfd_reloc_code_real_type code;
16895
16896 reloc = xmalloc (sizeof (arelent));
16897
16898 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16899 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16900 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16901
16902 if (fixp->fx_pcrel)
16903 {
16904 if (section->use_rela_p)
16905 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16906 else
16907 fixp->fx_offset = reloc->address;
16908 }
16909 reloc->addend = fixp->fx_offset;
16910
16911 switch (fixp->fx_r_type)
16912 {
16913 case BFD_RELOC_8:
16914 if (fixp->fx_pcrel)
16915 {
16916 code = BFD_RELOC_8_PCREL;
16917 break;
16918 }
16919
16920 case BFD_RELOC_16:
16921 if (fixp->fx_pcrel)
16922 {
16923 code = BFD_RELOC_16_PCREL;
16924 break;
16925 }
16926
16927 case BFD_RELOC_32:
16928 if (fixp->fx_pcrel)
16929 {
16930 code = BFD_RELOC_32_PCREL;
16931 break;
16932 }
16933
16934 case BFD_RELOC_ARM_MOVW:
16935 if (fixp->fx_pcrel)
16936 {
16937 code = BFD_RELOC_ARM_MOVW_PCREL;
16938 break;
16939 }
16940
16941 case BFD_RELOC_ARM_MOVT:
16942 if (fixp->fx_pcrel)
16943 {
16944 code = BFD_RELOC_ARM_MOVT_PCREL;
16945 break;
16946 }
16947
16948 case BFD_RELOC_ARM_THUMB_MOVW:
16949 if (fixp->fx_pcrel)
16950 {
16951 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
16952 break;
16953 }
16954
16955 case BFD_RELOC_ARM_THUMB_MOVT:
16956 if (fixp->fx_pcrel)
16957 {
16958 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
16959 break;
16960 }
16961
16962 case BFD_RELOC_NONE:
16963 case BFD_RELOC_ARM_PCREL_BRANCH:
16964 case BFD_RELOC_ARM_PCREL_BLX:
16965 case BFD_RELOC_RVA:
16966 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16967 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16968 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16969 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16970 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16971 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16972 case BFD_RELOC_THUMB_PCREL_BLX:
16973 case BFD_RELOC_VTABLE_ENTRY:
16974 case BFD_RELOC_VTABLE_INHERIT:
16975 code = fixp->fx_r_type;
16976 break;
16977
16978 case BFD_RELOC_ARM_LITERAL:
16979 case BFD_RELOC_ARM_HWLITERAL:
16980 /* If this is called then the a literal has
16981 been referenced across a section boundary. */
16982 as_bad_where (fixp->fx_file, fixp->fx_line,
16983 _("literal referenced across section boundary"));
16984 return NULL;
16985
16986 #ifdef OBJ_ELF
16987 case BFD_RELOC_ARM_GOT32:
16988 case BFD_RELOC_ARM_GOTOFF:
16989 case BFD_RELOC_ARM_PLT32:
16990 case BFD_RELOC_ARM_TARGET1:
16991 case BFD_RELOC_ARM_ROSEGREL32:
16992 case BFD_RELOC_ARM_SBREL32:
16993 case BFD_RELOC_ARM_PREL31:
16994 case BFD_RELOC_ARM_TARGET2:
16995 case BFD_RELOC_ARM_TLS_LE32:
16996 case BFD_RELOC_ARM_TLS_LDO32:
16997 case BFD_RELOC_ARM_PCREL_CALL:
16998 case BFD_RELOC_ARM_PCREL_JUMP:
16999 code = fixp->fx_r_type;
17000 break;
17001
17002 case BFD_RELOC_ARM_TLS_GD32:
17003 case BFD_RELOC_ARM_TLS_IE32:
17004 case BFD_RELOC_ARM_TLS_LDM32:
17005 /* BFD will include the symbol's address in the addend.
17006 But we don't want that, so subtract it out again here. */
17007 if (!S_IS_COMMON (fixp->fx_addsy))
17008 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
17009 code = fixp->fx_r_type;
17010 break;
17011 #endif
17012
17013 case BFD_RELOC_ARM_IMMEDIATE:
17014 as_bad_where (fixp->fx_file, fixp->fx_line,
17015 _("internal relocation (type: IMMEDIATE) not fixed up"));
17016 return NULL;
17017
17018 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17019 as_bad_where (fixp->fx_file, fixp->fx_line,
17020 _("ADRL used for a symbol not defined in the same file"));
17021 return NULL;
17022
17023 case BFD_RELOC_ARM_OFFSET_IMM:
17024 if (section->use_rela_p)
17025 {
17026 code = fixp->fx_r_type;
17027 break;
17028 }
17029
17030 if (fixp->fx_addsy != NULL
17031 && !S_IS_DEFINED (fixp->fx_addsy)
17032 && S_IS_LOCAL (fixp->fx_addsy))
17033 {
17034 as_bad_where (fixp->fx_file, fixp->fx_line,
17035 _("undefined local label `%s'"),
17036 S_GET_NAME (fixp->fx_addsy));
17037 return NULL;
17038 }
17039
17040 as_bad_where (fixp->fx_file, fixp->fx_line,
17041 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
17042 return NULL;
17043
17044 default:
17045 {
17046 char * type;
17047
17048 switch (fixp->fx_r_type)
17049 {
17050 case BFD_RELOC_NONE: type = "NONE"; break;
17051 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
17052 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
17053 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
17054 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
17055 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
17056 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
17057 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
17058 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
17059 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
17060 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
17061 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
17062 default: type = _("<unknown>"); break;
17063 }
17064 as_bad_where (fixp->fx_file, fixp->fx_line,
17065 _("cannot represent %s relocation in this object file format"),
17066 type);
17067 return NULL;
17068 }
17069 }
17070
17071 #ifdef OBJ_ELF
17072 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
17073 && GOT_symbol
17074 && fixp->fx_addsy == GOT_symbol)
17075 {
17076 code = BFD_RELOC_ARM_GOTPC;
17077 reloc->addend = fixp->fx_offset = reloc->address;
17078 }
17079 #endif
17080
17081 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
17082
17083 if (reloc->howto == NULL)
17084 {
17085 as_bad_where (fixp->fx_file, fixp->fx_line,
17086 _("cannot represent %s relocation in this object file format"),
17087 bfd_get_reloc_code_name (code));
17088 return NULL;
17089 }
17090
17091 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
17092 vtable entry to be used in the relocation's section offset. */
17093 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17094 reloc->address = fixp->fx_offset;
17095
17096 return reloc;
17097 }
17098
17099 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
17100
17101 void
17102 cons_fix_new_arm (fragS * frag,
17103 int where,
17104 int size,
17105 expressionS * exp)
17106 {
17107 bfd_reloc_code_real_type type;
17108 int pcrel = 0;
17109
17110 /* Pick a reloc.
17111 FIXME: @@ Should look at CPU word size. */
17112 switch (size)
17113 {
17114 case 1:
17115 type = BFD_RELOC_8;
17116 break;
17117 case 2:
17118 type = BFD_RELOC_16;
17119 break;
17120 case 4:
17121 default:
17122 type = BFD_RELOC_32;
17123 break;
17124 case 8:
17125 type = BFD_RELOC_64;
17126 break;
17127 }
17128
17129 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
17130 }
17131
17132 #if defined OBJ_COFF || defined OBJ_ELF
17133 void
17134 arm_validate_fix (fixS * fixP)
17135 {
17136 /* If the destination of the branch is a defined symbol which does not have
17137 the THUMB_FUNC attribute, then we must be calling a function which has
17138 the (interfacearm) attribute. We look for the Thumb entry point to that
17139 function and change the branch to refer to that function instead. */
17140 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
17141 && fixP->fx_addsy != NULL
17142 && S_IS_DEFINED (fixP->fx_addsy)
17143 && ! THUMB_IS_FUNC (fixP->fx_addsy))
17144 {
17145 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
17146 }
17147 }
17148 #endif
17149
17150 int
17151 arm_force_relocation (struct fix * fixp)
17152 {
17153 #if defined (OBJ_COFF) && defined (TE_PE)
17154 if (fixp->fx_r_type == BFD_RELOC_RVA)
17155 return 1;
17156 #endif
17157
17158 /* Resolve these relocations even if the symbol is extern or weak. */
17159 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
17160 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
17161 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
17162 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17163 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
17164 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
17165 return 0;
17166
17167 return generic_force_reloc (fixp);
17168 }
17169
17170 #ifdef OBJ_COFF
17171 bfd_boolean
17172 arm_fix_adjustable (fixS * fixP)
17173 {
17174 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
17175 local labels from being added to the output symbol table when they
17176 are used with the ADRL pseudo op. The ADRL relocation should always
17177 be resolved before the binbary is emitted, so it is safe to say that
17178 it is adjustable. */
17179 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
17180 return 1;
17181
17182 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
17183 to be cloned, and without this test relocs would still be generated
17184 against the original, pre-cloned symbol. Such symbols would not appear
17185 in the symbol table however, and so a valid reloc could not be
17186 generated. So check to see if the fixup is against a symbol which has
17187 been removed from the symbol chain, and if it is, then allow it to be
17188 adjusted into a reloc against a section symbol. */
17189 if (fixP->fx_addsy != NULL
17190 && ! S_IS_LOCAL (fixP->fx_addsy)
17191 && symbol_next (fixP->fx_addsy) == NULL
17192 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
17193 return 1;
17194
17195 return 0;
17196 }
17197 #endif
17198
17199 #ifdef OBJ_ELF
17200 /* Relocations against function names must be left unadjusted,
17201 so that the linker can use this information to generate interworking
17202 stubs. The MIPS version of this function
17203 also prevents relocations that are mips-16 specific, but I do not
17204 know why it does this.
17205
17206 FIXME:
17207 There is one other problem that ought to be addressed here, but
17208 which currently is not: Taking the address of a label (rather
17209 than a function) and then later jumping to that address. Such
17210 addresses also ought to have their bottom bit set (assuming that
17211 they reside in Thumb code), but at the moment they will not. */
17212
17213 bfd_boolean
17214 arm_fix_adjustable (fixS * fixP)
17215 {
17216 if (fixP->fx_addsy == NULL)
17217 return 1;
17218
17219 /* Preserve relocations against symbols with function type. */
17220 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
17221 return 0;
17222
17223 if (THUMB_IS_FUNC (fixP->fx_addsy)
17224 && fixP->fx_subsy == NULL)
17225 return 0;
17226
17227 /* We need the symbol name for the VTABLE entries. */
17228 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
17229 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17230 return 0;
17231
17232 /* Don't allow symbols to be discarded on GOT related relocs. */
17233 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
17234 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
17235 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
17236 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
17237 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
17238 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
17239 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
17240 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
17241 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
17242 return 0;
17243
17244 return 1;
17245 }
17246
17247 const char *
17248 elf32_arm_target_format (void)
17249 {
17250 #ifdef TE_SYMBIAN
17251 return (target_big_endian
17252 ? "elf32-bigarm-symbian"
17253 : "elf32-littlearm-symbian");
17254 #elif defined (TE_VXWORKS)
17255 return (target_big_endian
17256 ? "elf32-bigarm-vxworks"
17257 : "elf32-littlearm-vxworks");
17258 #else
17259 if (target_big_endian)
17260 return "elf32-bigarm";
17261 else
17262 return "elf32-littlearm";
17263 #endif
17264 }
17265
17266 void
17267 armelf_frob_symbol (symbolS * symp,
17268 int * puntp)
17269 {
17270 elf_frob_symbol (symp, puntp);
17271 }
17272 #endif
17273
17274 /* MD interface: Finalization. */
17275
17276 /* A good place to do this, although this was probably not intended
17277 for this kind of use. We need to dump the literal pool before
17278 references are made to a null symbol pointer. */
17279
17280 void
17281 arm_cleanup (void)
17282 {
17283 literal_pool * pool;
17284
17285 for (pool = list_of_pools; pool; pool = pool->next)
17286 {
17287 /* Put it at the end of the relevent section. */
17288 subseg_set (pool->section, pool->sub_section);
17289 #ifdef OBJ_ELF
17290 arm_elf_change_section ();
17291 #endif
17292 s_ltorg (0);
17293 }
17294 }
17295
17296 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17297 ARM ones. */
17298
17299 void
17300 arm_adjust_symtab (void)
17301 {
17302 #ifdef OBJ_COFF
17303 symbolS * sym;
17304
17305 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17306 {
17307 if (ARM_IS_THUMB (sym))
17308 {
17309 if (THUMB_IS_FUNC (sym))
17310 {
17311 /* Mark the symbol as a Thumb function. */
17312 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17313 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17314 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17315
17316 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17317 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17318 else
17319 as_bad (_("%s: unexpected function type: %d"),
17320 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17321 }
17322 else switch (S_GET_STORAGE_CLASS (sym))
17323 {
17324 case C_EXT:
17325 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17326 break;
17327 case C_STAT:
17328 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17329 break;
17330 case C_LABEL:
17331 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17332 break;
17333 default:
17334 /* Do nothing. */
17335 break;
17336 }
17337 }
17338
17339 if (ARM_IS_INTERWORK (sym))
17340 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17341 }
17342 #endif
17343 #ifdef OBJ_ELF
17344 symbolS * sym;
17345 char bind;
17346
17347 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17348 {
17349 if (ARM_IS_THUMB (sym))
17350 {
17351 elf_symbol_type * elf_sym;
17352
17353 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17354 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17355
17356 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
17357 {
17358 /* If it's a .thumb_func, declare it as so,
17359 otherwise tag label as .code 16. */
17360 if (THUMB_IS_FUNC (sym))
17361 elf_sym->internal_elf_sym.st_info =
17362 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17363 else
17364 elf_sym->internal_elf_sym.st_info =
17365 ELF_ST_INFO (bind, STT_ARM_16BIT);
17366 }
17367 }
17368 }
17369 #endif
17370 }
17371
17372 /* MD interface: Initialization. */
17373
17374 static void
17375 set_constant_flonums (void)
17376 {
17377 int i;
17378
17379 for (i = 0; i < NUM_FLOAT_VALS; i++)
17380 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17381 abort ();
17382 }
17383
17384 void
17385 md_begin (void)
17386 {
17387 unsigned mach;
17388 unsigned int i;
17389
17390 if ( (arm_ops_hsh = hash_new ()) == NULL
17391 || (arm_cond_hsh = hash_new ()) == NULL
17392 || (arm_shift_hsh = hash_new ()) == NULL
17393 || (arm_psr_hsh = hash_new ()) == NULL
17394 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17395 || (arm_reg_hsh = hash_new ()) == NULL
17396 || (arm_reloc_hsh = hash_new ()) == NULL
17397 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17398 as_fatal (_("virtual memory exhausted"));
17399
17400 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17401 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17402 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17403 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17404 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17405 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17406 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17407 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17408 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17409 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17410 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17411 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17412 for (i = 0;
17413 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17414 i++)
17415 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17416 (PTR) (barrier_opt_names + i));
17417 #ifdef OBJ_ELF
17418 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17419 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17420 #endif
17421
17422 set_constant_flonums ();
17423
17424 /* Set the cpu variant based on the command-line options. We prefer
17425 -mcpu= over -march= if both are set (as for GCC); and we prefer
17426 -mfpu= over any other way of setting the floating point unit.
17427 Use of legacy options with new options are faulted. */
17428 if (legacy_cpu)
17429 {
17430 if (mcpu_cpu_opt || march_cpu_opt)
17431 as_bad (_("use of old and new-style options to set CPU type"));
17432
17433 mcpu_cpu_opt = legacy_cpu;
17434 }
17435 else if (!mcpu_cpu_opt)
17436 mcpu_cpu_opt = march_cpu_opt;
17437
17438 if (legacy_fpu)
17439 {
17440 if (mfpu_opt)
17441 as_bad (_("use of old and new-style options to set FPU type"));
17442
17443 mfpu_opt = legacy_fpu;
17444 }
17445 else if (!mfpu_opt)
17446 {
17447 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17448 /* Some environments specify a default FPU. If they don't, infer it
17449 from the processor. */
17450 if (mcpu_fpu_opt)
17451 mfpu_opt = mcpu_fpu_opt;
17452 else
17453 mfpu_opt = march_fpu_opt;
17454 #else
17455 mfpu_opt = &fpu_default;
17456 #endif
17457 }
17458
17459 if (!mfpu_opt)
17460 {
17461 if (!mcpu_cpu_opt)
17462 mfpu_opt = &fpu_default;
17463 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17464 mfpu_opt = &fpu_arch_vfp_v2;
17465 else
17466 mfpu_opt = &fpu_arch_fpa;
17467 }
17468
17469 #ifdef CPU_DEFAULT
17470 if (!mcpu_cpu_opt)
17471 {
17472 mcpu_cpu_opt = &cpu_default;
17473 selected_cpu = cpu_default;
17474 }
17475 #else
17476 if (mcpu_cpu_opt)
17477 selected_cpu = *mcpu_cpu_opt;
17478 else
17479 mcpu_cpu_opt = &arm_arch_any;
17480 #endif
17481
17482 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17483
17484 arm_arch_used = thumb_arch_used = arm_arch_none;
17485
17486 #if defined OBJ_COFF || defined OBJ_ELF
17487 {
17488 unsigned int flags = 0;
17489
17490 #if defined OBJ_ELF
17491 flags = meabi_flags;
17492
17493 switch (meabi_flags)
17494 {
17495 case EF_ARM_EABI_UNKNOWN:
17496 #endif
17497 /* Set the flags in the private structure. */
17498 if (uses_apcs_26) flags |= F_APCS26;
17499 if (support_interwork) flags |= F_INTERWORK;
17500 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17501 if (pic_code) flags |= F_PIC;
17502 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17503 flags |= F_SOFT_FLOAT;
17504
17505 switch (mfloat_abi_opt)
17506 {
17507 case ARM_FLOAT_ABI_SOFT:
17508 case ARM_FLOAT_ABI_SOFTFP:
17509 flags |= F_SOFT_FLOAT;
17510 break;
17511
17512 case ARM_FLOAT_ABI_HARD:
17513 if (flags & F_SOFT_FLOAT)
17514 as_bad (_("hard-float conflicts with specified fpu"));
17515 break;
17516 }
17517
17518 /* Using pure-endian doubles (even if soft-float). */
17519 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17520 flags |= F_VFP_FLOAT;
17521
17522 #if defined OBJ_ELF
17523 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17524 flags |= EF_ARM_MAVERICK_FLOAT;
17525 break;
17526
17527 case EF_ARM_EABI_VER4:
17528 case EF_ARM_EABI_VER5:
17529 /* No additional flags to set. */
17530 break;
17531
17532 default:
17533 abort ();
17534 }
17535 #endif
17536 bfd_set_private_flags (stdoutput, flags);
17537
17538 /* We have run out flags in the COFF header to encode the
17539 status of ATPCS support, so instead we create a dummy,
17540 empty, debug section called .arm.atpcs. */
17541 if (atpcs)
17542 {
17543 asection * sec;
17544
17545 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17546
17547 if (sec != NULL)
17548 {
17549 bfd_set_section_flags
17550 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17551 bfd_set_section_size (stdoutput, sec, 0);
17552 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17553 }
17554 }
17555 }
17556 #endif
17557
17558 /* Record the CPU type as well. */
17559 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17560 mach = bfd_mach_arm_iWMMXt;
17561 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17562 mach = bfd_mach_arm_XScale;
17563 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17564 mach = bfd_mach_arm_ep9312;
17565 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17566 mach = bfd_mach_arm_5TE;
17567 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17568 {
17569 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17570 mach = bfd_mach_arm_5T;
17571 else
17572 mach = bfd_mach_arm_5;
17573 }
17574 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17575 {
17576 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17577 mach = bfd_mach_arm_4T;
17578 else
17579 mach = bfd_mach_arm_4;
17580 }
17581 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17582 mach = bfd_mach_arm_3M;
17583 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17584 mach = bfd_mach_arm_3;
17585 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17586 mach = bfd_mach_arm_2a;
17587 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17588 mach = bfd_mach_arm_2;
17589 else
17590 mach = bfd_mach_arm_unknown;
17591
17592 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17593 }
17594
17595 /* Command line processing. */
17596
17597 /* md_parse_option
17598 Invocation line includes a switch not recognized by the base assembler.
17599 See if it's a processor-specific option.
17600
17601 This routine is somewhat complicated by the need for backwards
17602 compatibility (since older releases of gcc can't be changed).
17603 The new options try to make the interface as compatible as
17604 possible with GCC.
17605
17606 New options (supported) are:
17607
17608 -mcpu=<cpu name> Assemble for selected processor
17609 -march=<architecture name> Assemble for selected architecture
17610 -mfpu=<fpu architecture> Assemble for selected FPU.
17611 -EB/-mbig-endian Big-endian
17612 -EL/-mlittle-endian Little-endian
17613 -k Generate PIC code
17614 -mthumb Start in Thumb mode
17615 -mthumb-interwork Code supports ARM/Thumb interworking
17616
17617 For now we will also provide support for:
17618
17619 -mapcs-32 32-bit Program counter
17620 -mapcs-26 26-bit Program counter
17621 -macps-float Floats passed in FP registers
17622 -mapcs-reentrant Reentrant code
17623 -matpcs
17624 (sometime these will probably be replaced with -mapcs=<list of options>
17625 and -matpcs=<list of options>)
17626
17627 The remaining options are only supported for back-wards compatibility.
17628 Cpu variants, the arm part is optional:
17629 -m[arm]1 Currently not supported.
17630 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17631 -m[arm]3 Arm 3 processor
17632 -m[arm]6[xx], Arm 6 processors
17633 -m[arm]7[xx][t][[d]m] Arm 7 processors
17634 -m[arm]8[10] Arm 8 processors
17635 -m[arm]9[20][tdmi] Arm 9 processors
17636 -mstrongarm[110[0]] StrongARM processors
17637 -mxscale XScale processors
17638 -m[arm]v[2345[t[e]]] Arm architectures
17639 -mall All (except the ARM1)
17640 FP variants:
17641 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17642 -mfpe-old (No float load/store multiples)
17643 -mvfpxd VFP Single precision
17644 -mvfp All VFP
17645 -mno-fpu Disable all floating point instructions
17646
17647 The following CPU names are recognized:
17648 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17649 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17650 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17651 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17652 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17653 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17654 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17655
17656 */
17657
17658 const char * md_shortopts = "m:k";
17659
17660 #ifdef ARM_BI_ENDIAN
17661 #define OPTION_EB (OPTION_MD_BASE + 0)
17662 #define OPTION_EL (OPTION_MD_BASE + 1)
17663 #else
17664 #if TARGET_BYTES_BIG_ENDIAN
17665 #define OPTION_EB (OPTION_MD_BASE + 0)
17666 #else
17667 #define OPTION_EL (OPTION_MD_BASE + 1)
17668 #endif
17669 #endif
17670
17671 struct option md_longopts[] =
17672 {
17673 #ifdef OPTION_EB
17674 {"EB", no_argument, NULL, OPTION_EB},
17675 #endif
17676 #ifdef OPTION_EL
17677 {"EL", no_argument, NULL, OPTION_EL},
17678 #endif
17679 {NULL, no_argument, NULL, 0}
17680 };
17681
17682 size_t md_longopts_size = sizeof (md_longopts);
17683
17684 struct arm_option_table
17685 {
17686 char *option; /* Option name to match. */
17687 char *help; /* Help information. */
17688 int *var; /* Variable to change. */
17689 int value; /* What to change it to. */
17690 char *deprecated; /* If non-null, print this message. */
17691 };
17692
17693 struct arm_option_table arm_opts[] =
17694 {
17695 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17696 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17697 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17698 &support_interwork, 1, NULL},
17699 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17700 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17701 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17702 1, NULL},
17703 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17704 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17705 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17706 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17707 NULL},
17708
17709 /* These are recognized by the assembler, but have no affect on code. */
17710 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17711 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17712 {NULL, NULL, NULL, 0, NULL}
17713 };
17714
17715 struct arm_legacy_option_table
17716 {
17717 char *option; /* Option name to match. */
17718 const arm_feature_set **var; /* Variable to change. */
17719 const arm_feature_set value; /* What to change it to. */
17720 char *deprecated; /* If non-null, print this message. */
17721 };
17722
17723 const struct arm_legacy_option_table arm_legacy_opts[] =
17724 {
17725 /* DON'T add any new processors to this list -- we want the whole list
17726 to go away... Add them to the processors table instead. */
17727 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17728 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17729 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17730 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17731 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17732 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17733 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17734 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17735 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17736 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17737 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17738 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17739 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17740 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17741 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17742 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17743 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17744 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17745 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17746 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17747 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17748 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17749 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17750 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17751 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17752 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17753 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17754 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17755 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17756 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17757 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17758 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17759 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17760 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17761 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17762 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17763 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17764 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17765 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17766 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17767 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17768 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17769 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17770 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17771 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17772 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17773 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17774 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17775 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17776 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17777 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17778 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17779 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17780 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17781 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17782 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17783 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17784 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17785 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17786 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17787 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17788 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17789 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17790 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17791 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17792 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17793 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17794 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17795 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17796 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17797 N_("use -mcpu=strongarm110")},
17798 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17799 N_("use -mcpu=strongarm1100")},
17800 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17801 N_("use -mcpu=strongarm1110")},
17802 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17803 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17804 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17805
17806 /* Architecture variants -- don't add any more to this list either. */
17807 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17808 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17809 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17810 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17811 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17812 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17813 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17814 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17815 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17816 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17817 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17818 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17819 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17820 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17821 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17822 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17823 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17824 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17825
17826 /* Floating point variants -- don't add any more to this list either. */
17827 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17828 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17829 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17830 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17831 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17832
17833 {NULL, NULL, ARM_ARCH_NONE, NULL}
17834 };
17835
17836 struct arm_cpu_option_table
17837 {
17838 char *name;
17839 const arm_feature_set value;
17840 /* For some CPUs we assume an FPU unless the user explicitly sets
17841 -mfpu=... */
17842 const arm_feature_set default_fpu;
17843 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17844 case. */
17845 const char *canonical_name;
17846 };
17847
17848 /* This list should, at a minimum, contain all the cpu names
17849 recognized by GCC. */
17850 static const struct arm_cpu_option_table arm_cpus[] =
17851 {
17852 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17853 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17854 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17855 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17856 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17857 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17858 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17859 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17860 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17861 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17862 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17863 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17864 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17865 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17866 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17867 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17868 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17869 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17870 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17871 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17872 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17873 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17874 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17875 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17876 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17877 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17878 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17879 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17880 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17881 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17882 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17883 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17884 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17885 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17886 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17887 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17888 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17889 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17890 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17891 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17892 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17893 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17894 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17895 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17896 /* For V5 or later processors we default to using VFP; but the user
17897 should really set the FPU type explicitly. */
17898 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17899 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17900 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17901 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17902 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17903 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17904 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17905 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17906 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17907 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17908 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17909 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17910 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17911 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17912 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17913 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17914 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17915 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17916 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17917 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17918 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17919 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17920 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17921 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17922 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17923 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17924 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17925 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17926 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17927 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17928 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17929 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17930 | FPU_NEON_EXT_V1),
17931 NULL},
17932 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17933 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17934 /* ??? XSCALE is really an architecture. */
17935 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17936 /* ??? iwmmxt is not a processor. */
17937 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17938 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17939 /* Maverick */
17940 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17941 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17942 };
17943
17944 struct arm_arch_option_table
17945 {
17946 char *name;
17947 const arm_feature_set value;
17948 const arm_feature_set default_fpu;
17949 };
17950
17951 /* This list should, at a minimum, contain all the architecture names
17952 recognized by GCC. */
17953 static const struct arm_arch_option_table arm_archs[] =
17954 {
17955 {"all", ARM_ANY, FPU_ARCH_FPA},
17956 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17957 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17958 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17959 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17960 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17961 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17962 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17963 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17964 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17965 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17966 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17967 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17968 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17969 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17970 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17971 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17972 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17973 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17974 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17975 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17976 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17977 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17978 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17979 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17980 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17981 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17982 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17983 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17984 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17985 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17986 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17987 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17988 };
17989
17990 /* ISA extensions in the co-processor space. */
17991 struct arm_option_cpu_value_table
17992 {
17993 char *name;
17994 const arm_feature_set value;
17995 };
17996
17997 static const struct arm_option_cpu_value_table arm_extensions[] =
17998 {
17999 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
18000 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
18001 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
18002 {NULL, ARM_ARCH_NONE}
18003 };
18004
18005 /* This list should, at a minimum, contain all the fpu names
18006 recognized by GCC. */
18007 static const struct arm_option_cpu_value_table arm_fpus[] =
18008 {
18009 {"softfpa", FPU_NONE},
18010 {"fpe", FPU_ARCH_FPE},
18011 {"fpe2", FPU_ARCH_FPE},
18012 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
18013 {"fpa", FPU_ARCH_FPA},
18014 {"fpa10", FPU_ARCH_FPA},
18015 {"fpa11", FPU_ARCH_FPA},
18016 {"arm7500fe", FPU_ARCH_FPA},
18017 {"softvfp", FPU_ARCH_VFP},
18018 {"softvfp+vfp", FPU_ARCH_VFP_V2},
18019 {"vfp", FPU_ARCH_VFP_V2},
18020 {"vfp9", FPU_ARCH_VFP_V2},
18021 {"vfp3", FPU_ARCH_VFP_V3},
18022 {"vfp10", FPU_ARCH_VFP_V2},
18023 {"vfp10-r0", FPU_ARCH_VFP_V1},
18024 {"vfpxd", FPU_ARCH_VFP_V1xD},
18025 {"arm1020t", FPU_ARCH_VFP_V1},
18026 {"arm1020e", FPU_ARCH_VFP_V2},
18027 {"arm1136jfs", FPU_ARCH_VFP_V2},
18028 {"arm1136jf-s", FPU_ARCH_VFP_V2},
18029 {"maverick", FPU_ARCH_MAVERICK},
18030 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
18031 {NULL, ARM_ARCH_NONE}
18032 };
18033
18034 struct arm_option_value_table
18035 {
18036 char *name;
18037 long value;
18038 };
18039
18040 static const struct arm_option_value_table arm_float_abis[] =
18041 {
18042 {"hard", ARM_FLOAT_ABI_HARD},
18043 {"softfp", ARM_FLOAT_ABI_SOFTFP},
18044 {"soft", ARM_FLOAT_ABI_SOFT},
18045 {NULL, 0}
18046 };
18047
18048 #ifdef OBJ_ELF
18049 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
18050 static const struct arm_option_value_table arm_eabis[] =
18051 {
18052 {"gnu", EF_ARM_EABI_UNKNOWN},
18053 {"4", EF_ARM_EABI_VER4},
18054 {"5", EF_ARM_EABI_VER5},
18055 {NULL, 0}
18056 };
18057 #endif
18058
18059 struct arm_long_option_table
18060 {
18061 char * option; /* Substring to match. */
18062 char * help; /* Help information. */
18063 int (* func) (char * subopt); /* Function to decode sub-option. */
18064 char * deprecated; /* If non-null, print this message. */
18065 };
18066
18067 static int
18068 arm_parse_extension (char * str, const arm_feature_set **opt_p)
18069 {
18070 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
18071
18072 /* Copy the feature set, so that we can modify it. */
18073 *ext_set = **opt_p;
18074 *opt_p = ext_set;
18075
18076 while (str != NULL && *str != 0)
18077 {
18078 const struct arm_option_cpu_value_table * opt;
18079 char * ext;
18080 int optlen;
18081
18082 if (*str != '+')
18083 {
18084 as_bad (_("invalid architectural extension"));
18085 return 0;
18086 }
18087
18088 str++;
18089 ext = strchr (str, '+');
18090
18091 if (ext != NULL)
18092 optlen = ext - str;
18093 else
18094 optlen = strlen (str);
18095
18096 if (optlen == 0)
18097 {
18098 as_bad (_("missing architectural extension"));
18099 return 0;
18100 }
18101
18102 for (opt = arm_extensions; opt->name != NULL; opt++)
18103 if (strncmp (opt->name, str, optlen) == 0)
18104 {
18105 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
18106 break;
18107 }
18108
18109 if (opt->name == NULL)
18110 {
18111 as_bad (_("unknown architectural extnsion `%s'"), str);
18112 return 0;
18113 }
18114
18115 str = ext;
18116 };
18117
18118 return 1;
18119 }
18120
18121 static int
18122 arm_parse_cpu (char * str)
18123 {
18124 const struct arm_cpu_option_table * opt;
18125 char * ext = strchr (str, '+');
18126 int optlen;
18127
18128 if (ext != NULL)
18129 optlen = ext - str;
18130 else
18131 optlen = strlen (str);
18132
18133 if (optlen == 0)
18134 {
18135 as_bad (_("missing cpu name `%s'"), str);
18136 return 0;
18137 }
18138
18139 for (opt = arm_cpus; opt->name != NULL; opt++)
18140 if (strncmp (opt->name, str, optlen) == 0)
18141 {
18142 mcpu_cpu_opt = &opt->value;
18143 mcpu_fpu_opt = &opt->default_fpu;
18144 if (opt->canonical_name)
18145 strcpy(selected_cpu_name, opt->canonical_name);
18146 else
18147 {
18148 int i;
18149 for (i = 0; i < optlen; i++)
18150 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18151 selected_cpu_name[i] = 0;
18152 }
18153
18154 if (ext != NULL)
18155 return arm_parse_extension (ext, &mcpu_cpu_opt);
18156
18157 return 1;
18158 }
18159
18160 as_bad (_("unknown cpu `%s'"), str);
18161 return 0;
18162 }
18163
18164 static int
18165 arm_parse_arch (char * str)
18166 {
18167 const struct arm_arch_option_table *opt;
18168 char *ext = strchr (str, '+');
18169 int optlen;
18170
18171 if (ext != NULL)
18172 optlen = ext - str;
18173 else
18174 optlen = strlen (str);
18175
18176 if (optlen == 0)
18177 {
18178 as_bad (_("missing architecture name `%s'"), str);
18179 return 0;
18180 }
18181
18182 for (opt = arm_archs; opt->name != NULL; opt++)
18183 if (streq (opt->name, str))
18184 {
18185 march_cpu_opt = &opt->value;
18186 march_fpu_opt = &opt->default_fpu;
18187 strcpy(selected_cpu_name, opt->name);
18188
18189 if (ext != NULL)
18190 return arm_parse_extension (ext, &march_cpu_opt);
18191
18192 return 1;
18193 }
18194
18195 as_bad (_("unknown architecture `%s'\n"), str);
18196 return 0;
18197 }
18198
18199 static int
18200 arm_parse_fpu (char * str)
18201 {
18202 const struct arm_option_cpu_value_table * opt;
18203
18204 for (opt = arm_fpus; opt->name != NULL; opt++)
18205 if (streq (opt->name, str))
18206 {
18207 mfpu_opt = &opt->value;
18208 return 1;
18209 }
18210
18211 as_bad (_("unknown floating point format `%s'\n"), str);
18212 return 0;
18213 }
18214
18215 static int
18216 arm_parse_float_abi (char * str)
18217 {
18218 const struct arm_option_value_table * opt;
18219
18220 for (opt = arm_float_abis; opt->name != NULL; opt++)
18221 if (streq (opt->name, str))
18222 {
18223 mfloat_abi_opt = opt->value;
18224 return 1;
18225 }
18226
18227 as_bad (_("unknown floating point abi `%s'\n"), str);
18228 return 0;
18229 }
18230
18231 #ifdef OBJ_ELF
18232 static int
18233 arm_parse_eabi (char * str)
18234 {
18235 const struct arm_option_value_table *opt;
18236
18237 for (opt = arm_eabis; opt->name != NULL; opt++)
18238 if (streq (opt->name, str))
18239 {
18240 meabi_flags = opt->value;
18241 return 1;
18242 }
18243 as_bad (_("unknown EABI `%s'\n"), str);
18244 return 0;
18245 }
18246 #endif
18247
18248 struct arm_long_option_table arm_long_opts[] =
18249 {
18250 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18251 arm_parse_cpu, NULL},
18252 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18253 arm_parse_arch, NULL},
18254 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18255 arm_parse_fpu, NULL},
18256 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18257 arm_parse_float_abi, NULL},
18258 #ifdef OBJ_ELF
18259 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18260 arm_parse_eabi, NULL},
18261 #endif
18262 {NULL, NULL, 0, NULL}
18263 };
18264
18265 int
18266 md_parse_option (int c, char * arg)
18267 {
18268 struct arm_option_table *opt;
18269 const struct arm_legacy_option_table *fopt;
18270 struct arm_long_option_table *lopt;
18271
18272 switch (c)
18273 {
18274 #ifdef OPTION_EB
18275 case OPTION_EB:
18276 target_big_endian = 1;
18277 break;
18278 #endif
18279
18280 #ifdef OPTION_EL
18281 case OPTION_EL:
18282 target_big_endian = 0;
18283 break;
18284 #endif
18285
18286 case 'a':
18287 /* Listing option. Just ignore these, we don't support additional
18288 ones. */
18289 return 0;
18290
18291 default:
18292 for (opt = arm_opts; opt->option != NULL; opt++)
18293 {
18294 if (c == opt->option[0]
18295 && ((arg == NULL && opt->option[1] == 0)
18296 || streq (arg, opt->option + 1)))
18297 {
18298 #if WARN_DEPRECATED
18299 /* If the option is deprecated, tell the user. */
18300 if (opt->deprecated != NULL)
18301 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18302 arg ? arg : "", _(opt->deprecated));
18303 #endif
18304
18305 if (opt->var != NULL)
18306 *opt->var = opt->value;
18307
18308 return 1;
18309 }
18310 }
18311
18312 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18313 {
18314 if (c == fopt->option[0]
18315 && ((arg == NULL && fopt->option[1] == 0)
18316 || streq (arg, fopt->option + 1)))
18317 {
18318 #if WARN_DEPRECATED
18319 /* If the option is deprecated, tell the user. */
18320 if (fopt->deprecated != NULL)
18321 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18322 arg ? arg : "", _(fopt->deprecated));
18323 #endif
18324
18325 if (fopt->var != NULL)
18326 *fopt->var = &fopt->value;
18327
18328 return 1;
18329 }
18330 }
18331
18332 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18333 {
18334 /* These options are expected to have an argument. */
18335 if (c == lopt->option[0]
18336 && arg != NULL
18337 && strncmp (arg, lopt->option + 1,
18338 strlen (lopt->option + 1)) == 0)
18339 {
18340 #if WARN_DEPRECATED
18341 /* If the option is deprecated, tell the user. */
18342 if (lopt->deprecated != NULL)
18343 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18344 _(lopt->deprecated));
18345 #endif
18346
18347 /* Call the sup-option parser. */
18348 return lopt->func (arg + strlen (lopt->option) - 1);
18349 }
18350 }
18351
18352 return 0;
18353 }
18354
18355 return 1;
18356 }
18357
18358 void
18359 md_show_usage (FILE * fp)
18360 {
18361 struct arm_option_table *opt;
18362 struct arm_long_option_table *lopt;
18363
18364 fprintf (fp, _(" ARM-specific assembler options:\n"));
18365
18366 for (opt = arm_opts; opt->option != NULL; opt++)
18367 if (opt->help != NULL)
18368 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18369
18370 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18371 if (lopt->help != NULL)
18372 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18373
18374 #ifdef OPTION_EB
18375 fprintf (fp, _("\
18376 -EB assemble code for a big-endian cpu\n"));
18377 #endif
18378
18379 #ifdef OPTION_EL
18380 fprintf (fp, _("\
18381 -EL assemble code for a little-endian cpu\n"));
18382 #endif
18383 }
18384
18385
18386 #ifdef OBJ_ELF
18387 typedef struct
18388 {
18389 int val;
18390 arm_feature_set flags;
18391 } cpu_arch_ver_table;
18392
18393 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18394 least features first. */
18395 static const cpu_arch_ver_table cpu_arch_ver[] =
18396 {
18397 {1, ARM_ARCH_V4},
18398 {2, ARM_ARCH_V4T},
18399 {3, ARM_ARCH_V5},
18400 {4, ARM_ARCH_V5TE},
18401 {5, ARM_ARCH_V5TEJ},
18402 {6, ARM_ARCH_V6},
18403 {7, ARM_ARCH_V6Z},
18404 {8, ARM_ARCH_V6K},
18405 {9, ARM_ARCH_V6T2},
18406 {10, ARM_ARCH_V7A},
18407 {10, ARM_ARCH_V7R},
18408 {10, ARM_ARCH_V7M},
18409 {0, ARM_ARCH_NONE}
18410 };
18411
18412 /* Set the public EABI object attributes. */
18413 static void
18414 aeabi_set_public_attributes (void)
18415 {
18416 int arch;
18417 arm_feature_set flags;
18418 arm_feature_set tmp;
18419 const cpu_arch_ver_table *p;
18420
18421 /* Choose the architecture based on the capabilities of the requested cpu
18422 (if any) and/or the instructions actually used. */
18423 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18424 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18425 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18426
18427 tmp = flags;
18428 arch = 0;
18429 for (p = cpu_arch_ver; p->val; p++)
18430 {
18431 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18432 {
18433 arch = p->val;
18434 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18435 }
18436 }
18437
18438 /* Tag_CPU_name. */
18439 if (selected_cpu_name[0])
18440 {
18441 char *p;
18442
18443 p = selected_cpu_name;
18444 if (strncmp(p, "armv", 4) == 0)
18445 {
18446 int i;
18447
18448 p += 4;
18449 for (i = 0; p[i]; i++)
18450 p[i] = TOUPPER (p[i]);
18451 }
18452 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18453 }
18454 /* Tag_CPU_arch. */
18455 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18456 /* Tag_CPU_arch_profile. */
18457 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18458 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18459 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18460 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18461 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18462 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18463 /* Tag_ARM_ISA_use. */
18464 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18465 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18466 /* Tag_THUMB_ISA_use. */
18467 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18468 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18469 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18470 /* Tag_VFP_arch. */
18471 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18472 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18473 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18474 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18475 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18476 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18477 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18478 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18479 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18480 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18481 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18482 /* Tag_WMMX_arch. */
18483 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18484 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18485 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18486 /* Tag_NEON_arch. */
18487 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18488 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18489 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18490 }
18491
18492 /* Add the .ARM.attributes section. */
18493 void
18494 arm_md_end (void)
18495 {
18496 segT s;
18497 char *p;
18498 addressT addr;
18499 offsetT size;
18500
18501 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18502 return;
18503
18504 aeabi_set_public_attributes ();
18505 size = elf32_arm_eabi_attr_size (stdoutput);
18506 s = subseg_new (".ARM.attributes", 0);
18507 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18508 addr = frag_now_fix ();
18509 p = frag_more (size);
18510 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18511 }
18512 #endif /* OBJ_ELF */
18513
18514
18515 /* Parse a .cpu directive. */
18516
18517 static void
18518 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18519 {
18520 const struct arm_cpu_option_table *opt;
18521 char *name;
18522 char saved_char;
18523
18524 name = input_line_pointer;
18525 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18526 input_line_pointer++;
18527 saved_char = *input_line_pointer;
18528 *input_line_pointer = 0;
18529
18530 /* Skip the first "all" entry. */
18531 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18532 if (streq (opt->name, name))
18533 {
18534 mcpu_cpu_opt = &opt->value;
18535 selected_cpu = opt->value;
18536 if (opt->canonical_name)
18537 strcpy(selected_cpu_name, opt->canonical_name);
18538 else
18539 {
18540 int i;
18541 for (i = 0; opt->name[i]; i++)
18542 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18543 selected_cpu_name[i] = 0;
18544 }
18545 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18546 *input_line_pointer = saved_char;
18547 demand_empty_rest_of_line ();
18548 return;
18549 }
18550 as_bad (_("unknown cpu `%s'"), name);
18551 *input_line_pointer = saved_char;
18552 ignore_rest_of_line ();
18553 }
18554
18555
18556 /* Parse a .arch directive. */
18557
18558 static void
18559 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18560 {
18561 const struct arm_arch_option_table *opt;
18562 char saved_char;
18563 char *name;
18564
18565 name = input_line_pointer;
18566 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18567 input_line_pointer++;
18568 saved_char = *input_line_pointer;
18569 *input_line_pointer = 0;
18570
18571 /* Skip the first "all" entry. */
18572 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18573 if (streq (opt->name, name))
18574 {
18575 mcpu_cpu_opt = &opt->value;
18576 selected_cpu = opt->value;
18577 strcpy(selected_cpu_name, opt->name);
18578 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18579 *input_line_pointer = saved_char;
18580 demand_empty_rest_of_line ();
18581 return;
18582 }
18583
18584 as_bad (_("unknown architecture `%s'\n"), name);
18585 *input_line_pointer = saved_char;
18586 ignore_rest_of_line ();
18587 }
18588
18589
18590 /* Parse a .fpu directive. */
18591
18592 static void
18593 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18594 {
18595 const struct arm_option_cpu_value_table *opt;
18596 char saved_char;
18597 char *name;
18598
18599 name = input_line_pointer;
18600 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18601 input_line_pointer++;
18602 saved_char = *input_line_pointer;
18603 *input_line_pointer = 0;
18604
18605 for (opt = arm_fpus; opt->name != NULL; opt++)
18606 if (streq (opt->name, name))
18607 {
18608 mfpu_opt = &opt->value;
18609 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18610 *input_line_pointer = saved_char;
18611 demand_empty_rest_of_line ();
18612 return;
18613 }
18614
18615 as_bad (_("unknown floating point format `%s'\n"), name);
18616 *input_line_pointer = saved_char;
18617 ignore_rest_of_line ();
18618 }
18619
This page took 0.49075 seconds and 4 git commands to generate.