9872968b627b33f68a00c5eaccd3e3d078fd30a0
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
337 instructions. This allows us to disambiguate ARM <-> vector insns. */
338 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
339 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
340 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
341 unsigned issingle : 1; /* Operand is VFP single-precision register. */
342 unsigned hasreloc : 1; /* Operand has relocation suffix. */
343 unsigned writeback : 1; /* Operand has trailing ! */
344 unsigned preind : 1; /* Preindexed address. */
345 unsigned postind : 1; /* Postindexed address. */
346 unsigned negative : 1; /* Index register was negated. */
347 unsigned shifted : 1; /* Shift applied to operation. */
348 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
349 } operands[6];
350 };
351
352 static struct arm_it inst;
353
354 #define NUM_FLOAT_VALS 8
355
356 const char * fp_const[] =
357 {
358 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
359 };
360
361 /* Number of littlenums required to hold an extended precision number. */
362 #define MAX_LITTLENUMS 6
363
364 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
365
366 #define FAIL (-1)
367 #define SUCCESS (0)
368
369 #define SUFF_S 1
370 #define SUFF_D 2
371 #define SUFF_E 3
372 #define SUFF_P 4
373
374 #define CP_T_X 0x00008000
375 #define CP_T_Y 0x00400000
376
377 #define CONDS_BIT 0x00100000
378 #define LOAD_BIT 0x00100000
379
380 #define DOUBLE_LOAD_FLAG 0x00000001
381
382 struct asm_cond
383 {
384 const char * template;
385 unsigned long value;
386 };
387
388 #define COND_ALWAYS 0xE
389
390 struct asm_psr
391 {
392 const char *template;
393 unsigned long field;
394 };
395
396 struct asm_barrier_opt
397 {
398 const char *template;
399 unsigned long value;
400 };
401
402 /* The bit that distinguishes CPSR and SPSR. */
403 #define SPSR_BIT (1 << 22)
404
405 /* The individual PSR flag bits. */
406 #define PSR_c (1 << 16)
407 #define PSR_x (1 << 17)
408 #define PSR_s (1 << 18)
409 #define PSR_f (1 << 19)
410
411 struct reloc_entry
412 {
413 char *name;
414 bfd_reloc_code_real_type reloc;
415 };
416
417 enum vfp_reg_pos
418 {
419 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
420 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
421 };
422
423 enum vfp_ldstm_type
424 {
425 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
426 };
427
428 /* Bits for DEFINED field in neon_typed_alias. */
429 #define NTA_HASTYPE 1
430 #define NTA_HASINDEX 2
431
432 struct neon_typed_alias
433 {
434 unsigned char defined;
435 unsigned char index;
436 struct neon_type_el eltype;
437 };
438
439 /* ARM register categories. This includes coprocessor numbers and various
440 architecture extensions' registers. */
441 enum arm_reg_type
442 {
443 REG_TYPE_RN,
444 REG_TYPE_CP,
445 REG_TYPE_CN,
446 REG_TYPE_FN,
447 REG_TYPE_VFS,
448 REG_TYPE_VFD,
449 REG_TYPE_NQ,
450 REG_TYPE_VFSD,
451 REG_TYPE_NDQ,
452 REG_TYPE_NSDQ,
453 REG_TYPE_VFC,
454 REG_TYPE_MVF,
455 REG_TYPE_MVD,
456 REG_TYPE_MVFX,
457 REG_TYPE_MVDX,
458 REG_TYPE_MVAX,
459 REG_TYPE_DSPSC,
460 REG_TYPE_MMXWR,
461 REG_TYPE_MMXWC,
462 REG_TYPE_MMXWCG,
463 REG_TYPE_XSCALE,
464 };
465
466 /* Structure for a hash table entry for a register.
467 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
468 information which states whether a vector type or index is specified (for a
469 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
470 struct reg_entry
471 {
472 const char *name;
473 unsigned char number;
474 unsigned char type;
475 unsigned char builtin;
476 struct neon_typed_alias *neon;
477 };
478
479 /* Diagnostics used when we don't get a register of the expected type. */
480 const char *const reg_expected_msgs[] =
481 {
482 N_("ARM register expected"),
483 N_("bad or missing co-processor number"),
484 N_("co-processor register expected"),
485 N_("FPA register expected"),
486 N_("VFP single precision register expected"),
487 N_("VFP/Neon double precision register expected"),
488 N_("Neon quad precision register expected"),
489 N_("VFP single or double precision register expected"),
490 N_("Neon double or quad precision register expected"),
491 N_("VFP single, double or Neon quad precision register expected"),
492 N_("VFP system register expected"),
493 N_("Maverick MVF register expected"),
494 N_("Maverick MVD register expected"),
495 N_("Maverick MVFX register expected"),
496 N_("Maverick MVDX register expected"),
497 N_("Maverick MVAX register expected"),
498 N_("Maverick DSPSC register expected"),
499 N_("iWMMXt data register expected"),
500 N_("iWMMXt control register expected"),
501 N_("iWMMXt scalar register expected"),
502 N_("XScale accumulator register expected"),
503 };
504
505 /* Some well known registers that we refer to directly elsewhere. */
506 #define REG_SP 13
507 #define REG_LR 14
508 #define REG_PC 15
509
510 /* ARM instructions take 4bytes in the object file, Thumb instructions
511 take 2: */
512 #define INSN_SIZE 4
513
514 struct asm_opcode
515 {
516 /* Basic string to match. */
517 const char *template;
518
519 /* Parameters to instruction. */
520 unsigned char operands[8];
521
522 /* Conditional tag - see opcode_lookup. */
523 unsigned int tag : 4;
524
525 /* Basic instruction code. */
526 unsigned int avalue : 28;
527
528 /* Thumb-format instruction code. */
529 unsigned int tvalue;
530
531 /* Which architecture variant provides this instruction. */
532 const arm_feature_set *avariant;
533 const arm_feature_set *tvariant;
534
535 /* Function to call to encode instruction in ARM format. */
536 void (* aencode) (void);
537
538 /* Function to call to encode instruction in Thumb format. */
539 void (* tencode) (void);
540 };
541
542 /* Defines for various bits that we will want to toggle. */
543 #define INST_IMMEDIATE 0x02000000
544 #define OFFSET_REG 0x02000000
545 #define HWOFFSET_IMM 0x00400000
546 #define SHIFT_BY_REG 0x00000010
547 #define PRE_INDEX 0x01000000
548 #define INDEX_UP 0x00800000
549 #define WRITE_BACK 0x00200000
550 #define LDM_TYPE_2_OR_3 0x00400000
551 #define CPSI_MMOD 0x00020000
552
553 #define LITERAL_MASK 0xf000f000
554 #define OPCODE_MASK 0xfe1fffff
555 #define V4_STR_BIT 0x00000020
556
557 #define DATA_OP_SHIFT 21
558
559 #define T2_OPCODE_MASK 0xfe1fffff
560 #define T2_DATA_OP_SHIFT 21
561
562 /* Codes to distinguish the arithmetic instructions. */
563 #define OPCODE_AND 0
564 #define OPCODE_EOR 1
565 #define OPCODE_SUB 2
566 #define OPCODE_RSB 3
567 #define OPCODE_ADD 4
568 #define OPCODE_ADC 5
569 #define OPCODE_SBC 6
570 #define OPCODE_RSC 7
571 #define OPCODE_TST 8
572 #define OPCODE_TEQ 9
573 #define OPCODE_CMP 10
574 #define OPCODE_CMN 11
575 #define OPCODE_ORR 12
576 #define OPCODE_MOV 13
577 #define OPCODE_BIC 14
578 #define OPCODE_MVN 15
579
580 #define T2_OPCODE_AND 0
581 #define T2_OPCODE_BIC 1
582 #define T2_OPCODE_ORR 2
583 #define T2_OPCODE_ORN 3
584 #define T2_OPCODE_EOR 4
585 #define T2_OPCODE_ADD 8
586 #define T2_OPCODE_ADC 10
587 #define T2_OPCODE_SBC 11
588 #define T2_OPCODE_SUB 13
589 #define T2_OPCODE_RSB 14
590
591 #define T_OPCODE_MUL 0x4340
592 #define T_OPCODE_TST 0x4200
593 #define T_OPCODE_CMN 0x42c0
594 #define T_OPCODE_NEG 0x4240
595 #define T_OPCODE_MVN 0x43c0
596
597 #define T_OPCODE_ADD_R3 0x1800
598 #define T_OPCODE_SUB_R3 0x1a00
599 #define T_OPCODE_ADD_HI 0x4400
600 #define T_OPCODE_ADD_ST 0xb000
601 #define T_OPCODE_SUB_ST 0xb080
602 #define T_OPCODE_ADD_SP 0xa800
603 #define T_OPCODE_ADD_PC 0xa000
604 #define T_OPCODE_ADD_I8 0x3000
605 #define T_OPCODE_SUB_I8 0x3800
606 #define T_OPCODE_ADD_I3 0x1c00
607 #define T_OPCODE_SUB_I3 0x1e00
608
609 #define T_OPCODE_ASR_R 0x4100
610 #define T_OPCODE_LSL_R 0x4080
611 #define T_OPCODE_LSR_R 0x40c0
612 #define T_OPCODE_ROR_R 0x41c0
613 #define T_OPCODE_ASR_I 0x1000
614 #define T_OPCODE_LSL_I 0x0000
615 #define T_OPCODE_LSR_I 0x0800
616
617 #define T_OPCODE_MOV_I8 0x2000
618 #define T_OPCODE_CMP_I8 0x2800
619 #define T_OPCODE_CMP_LR 0x4280
620 #define T_OPCODE_MOV_HR 0x4600
621 #define T_OPCODE_CMP_HR 0x4500
622
623 #define T_OPCODE_LDR_PC 0x4800
624 #define T_OPCODE_LDR_SP 0x9800
625 #define T_OPCODE_STR_SP 0x9000
626 #define T_OPCODE_LDR_IW 0x6800
627 #define T_OPCODE_STR_IW 0x6000
628 #define T_OPCODE_LDR_IH 0x8800
629 #define T_OPCODE_STR_IH 0x8000
630 #define T_OPCODE_LDR_IB 0x7800
631 #define T_OPCODE_STR_IB 0x7000
632 #define T_OPCODE_LDR_RW 0x5800
633 #define T_OPCODE_STR_RW 0x5000
634 #define T_OPCODE_LDR_RH 0x5a00
635 #define T_OPCODE_STR_RH 0x5200
636 #define T_OPCODE_LDR_RB 0x5c00
637 #define T_OPCODE_STR_RB 0x5400
638
639 #define T_OPCODE_PUSH 0xb400
640 #define T_OPCODE_POP 0xbc00
641
642 #define T_OPCODE_BRANCH 0xe000
643
644 #define THUMB_SIZE 2 /* Size of thumb instruction. */
645 #define THUMB_PP_PC_LR 0x0100
646 #define THUMB_LOAD_BIT 0x0800
647 #define THUMB2_LOAD_BIT 0x00100000
648
649 #define BAD_ARGS _("bad arguments to instruction")
650 #define BAD_PC _("r15 not allowed here")
651 #define BAD_COND _("instruction cannot be conditional")
652 #define BAD_OVERLAP _("registers may not be the same")
653 #define BAD_HIREG _("lo register required")
654 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
655 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
656 #define BAD_BRANCH _("branch must be last instruction in IT block")
657 #define BAD_NOT_IT _("instruction not allowed in IT block")
658 #define BAD_FPU _("selected FPU does not support instruction")
659
660 static struct hash_control *arm_ops_hsh;
661 static struct hash_control *arm_cond_hsh;
662 static struct hash_control *arm_shift_hsh;
663 static struct hash_control *arm_psr_hsh;
664 static struct hash_control *arm_v7m_psr_hsh;
665 static struct hash_control *arm_reg_hsh;
666 static struct hash_control *arm_reloc_hsh;
667 static struct hash_control *arm_barrier_opt_hsh;
668
669 /* Stuff needed to resolve the label ambiguity
670 As:
671 ...
672 label: <insn>
673 may differ from:
674 ...
675 label:
676 <insn>
677 */
678
679 symbolS * last_label_seen;
680 static int label_is_thumb_function_name = FALSE;
681 \f
682 /* Literal pool structure. Held on a per-section
683 and per-sub-section basis. */
684
685 #define MAX_LITERAL_POOL_SIZE 1024
686 typedef struct literal_pool
687 {
688 expressionS literals [MAX_LITERAL_POOL_SIZE];
689 unsigned int next_free_entry;
690 unsigned int id;
691 symbolS * symbol;
692 segT section;
693 subsegT sub_section;
694 struct literal_pool * next;
695 } literal_pool;
696
697 /* Pointer to a linked list of literal pools. */
698 literal_pool * list_of_pools = NULL;
699
700 /* State variables for IT block handling. */
701 static bfd_boolean current_it_mask = 0;
702 static int current_cc;
703
704 \f
705 /* Pure syntax. */
706
707 /* This array holds the chars that always start a comment. If the
708 pre-processor is disabled, these aren't very useful. */
709 const char comment_chars[] = "@";
710
711 /* This array holds the chars that only start a comment at the beginning of
712 a line. If the line seems to have the form '# 123 filename'
713 .line and .file directives will appear in the pre-processed output. */
714 /* Note that input_file.c hand checks for '#' at the beginning of the
715 first line of the input file. This is because the compiler outputs
716 #NO_APP at the beginning of its output. */
717 /* Also note that comments like this one will always work. */
718 const char line_comment_chars[] = "#";
719
720 const char line_separator_chars[] = ";";
721
722 /* Chars that can be used to separate mant
723 from exp in floating point numbers. */
724 const char EXP_CHARS[] = "eE";
725
726 /* Chars that mean this number is a floating point constant. */
727 /* As in 0f12.456 */
728 /* or 0d1.2345e12 */
729
730 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
731
732 /* Prefix characters that indicate the start of an immediate
733 value. */
734 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
735
736 /* Separator character handling. */
737
738 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
739
740 static inline int
741 skip_past_char (char ** str, char c)
742 {
743 if (**str == c)
744 {
745 (*str)++;
746 return SUCCESS;
747 }
748 else
749 return FAIL;
750 }
751 #define skip_past_comma(str) skip_past_char (str, ',')
752
753 /* Arithmetic expressions (possibly involving symbols). */
754
755 /* Return TRUE if anything in the expression is a bignum. */
756
757 static int
758 walk_no_bignums (symbolS * sp)
759 {
760 if (symbol_get_value_expression (sp)->X_op == O_big)
761 return 1;
762
763 if (symbol_get_value_expression (sp)->X_add_symbol)
764 {
765 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
766 || (symbol_get_value_expression (sp)->X_op_symbol
767 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
768 }
769
770 return 0;
771 }
772
773 static int in_my_get_expression = 0;
774
775 /* Third argument to my_get_expression. */
776 #define GE_NO_PREFIX 0
777 #define GE_IMM_PREFIX 1
778 #define GE_OPT_PREFIX 2
779 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
780 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
781 #define GE_OPT_PREFIX_BIG 3
782
783 static int
784 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
785 {
786 char * save_in;
787 segT seg;
788
789 /* In unified syntax, all prefixes are optional. */
790 if (unified_syntax)
791 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
792 : GE_OPT_PREFIX;
793
794 switch (prefix_mode)
795 {
796 case GE_NO_PREFIX: break;
797 case GE_IMM_PREFIX:
798 if (!is_immediate_prefix (**str))
799 {
800 inst.error = _("immediate expression requires a # prefix");
801 return FAIL;
802 }
803 (*str)++;
804 break;
805 case GE_OPT_PREFIX:
806 case GE_OPT_PREFIX_BIG:
807 if (is_immediate_prefix (**str))
808 (*str)++;
809 break;
810 default: abort ();
811 }
812
813 memset (ep, 0, sizeof (expressionS));
814
815 save_in = input_line_pointer;
816 input_line_pointer = *str;
817 in_my_get_expression = 1;
818 seg = expression (ep);
819 in_my_get_expression = 0;
820
821 if (ep->X_op == O_illegal)
822 {
823 /* We found a bad expression in md_operand(). */
824 *str = input_line_pointer;
825 input_line_pointer = save_in;
826 if (inst.error == NULL)
827 inst.error = _("bad expression");
828 return 1;
829 }
830
831 #ifdef OBJ_AOUT
832 if (seg != absolute_section
833 && seg != text_section
834 && seg != data_section
835 && seg != bss_section
836 && seg != undefined_section)
837 {
838 inst.error = _("bad segment");
839 *str = input_line_pointer;
840 input_line_pointer = save_in;
841 return 1;
842 }
843 #endif
844
845 /* Get rid of any bignums now, so that we don't generate an error for which
846 we can't establish a line number later on. Big numbers are never valid
847 in instructions, which is where this routine is always called. */
848 if (prefix_mode != GE_OPT_PREFIX_BIG
849 && (ep->X_op == O_big
850 || (ep->X_add_symbol
851 && (walk_no_bignums (ep->X_add_symbol)
852 || (ep->X_op_symbol
853 && walk_no_bignums (ep->X_op_symbol))))))
854 {
855 inst.error = _("invalid constant");
856 *str = input_line_pointer;
857 input_line_pointer = save_in;
858 return 1;
859 }
860
861 *str = input_line_pointer;
862 input_line_pointer = save_in;
863 return 0;
864 }
865
866 /* Turn a string in input_line_pointer into a floating point constant
867 of type TYPE, and store the appropriate bytes in *LITP. The number
868 of LITTLENUMS emitted is stored in *SIZEP. An error message is
869 returned, or NULL on OK.
870
871 Note that fp constants aren't represent in the normal way on the ARM.
872 In big endian mode, things are as expected. However, in little endian
873 mode fp constants are big-endian word-wise, and little-endian byte-wise
874 within the words. For example, (double) 1.1 in big endian mode is
875 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
876 the byte sequence 99 99 f1 3f 9a 99 99 99.
877
878 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
879
880 char *
881 md_atof (int type, char * litP, int * sizeP)
882 {
883 int prec;
884 LITTLENUM_TYPE words[MAX_LITTLENUMS];
885 char *t;
886 int i;
887
888 switch (type)
889 {
890 case 'f':
891 case 'F':
892 case 's':
893 case 'S':
894 prec = 2;
895 break;
896
897 case 'd':
898 case 'D':
899 case 'r':
900 case 'R':
901 prec = 4;
902 break;
903
904 case 'x':
905 case 'X':
906 prec = 6;
907 break;
908
909 case 'p':
910 case 'P':
911 prec = 6;
912 break;
913
914 default:
915 *sizeP = 0;
916 return _("bad call to MD_ATOF()");
917 }
918
919 t = atof_ieee (input_line_pointer, type, words);
920 if (t)
921 input_line_pointer = t;
922 *sizeP = prec * 2;
923
924 if (target_big_endian)
925 {
926 for (i = 0; i < prec; i++)
927 {
928 md_number_to_chars (litP, (valueT) words[i], 2);
929 litP += 2;
930 }
931 }
932 else
933 {
934 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
935 for (i = prec - 1; i >= 0; i--)
936 {
937 md_number_to_chars (litP, (valueT) words[i], 2);
938 litP += 2;
939 }
940 else
941 /* For a 4 byte float the order of elements in `words' is 1 0.
942 For an 8 byte float the order is 1 0 3 2. */
943 for (i = 0; i < prec; i += 2)
944 {
945 md_number_to_chars (litP, (valueT) words[i + 1], 2);
946 md_number_to_chars (litP + 2, (valueT) words[i], 2);
947 litP += 4;
948 }
949 }
950
951 return 0;
952 }
953
954 /* We handle all bad expressions here, so that we can report the faulty
955 instruction in the error message. */
956 void
957 md_operand (expressionS * expr)
958 {
959 if (in_my_get_expression)
960 expr->X_op = O_illegal;
961 }
962
963 /* Immediate values. */
964
965 /* Generic immediate-value read function for use in directives.
966 Accepts anything that 'expression' can fold to a constant.
967 *val receives the number. */
968 #ifdef OBJ_ELF
969 static int
970 immediate_for_directive (int *val)
971 {
972 expressionS exp;
973 exp.X_op = O_illegal;
974
975 if (is_immediate_prefix (*input_line_pointer))
976 {
977 input_line_pointer++;
978 expression (&exp);
979 }
980
981 if (exp.X_op != O_constant)
982 {
983 as_bad (_("expected #constant"));
984 ignore_rest_of_line ();
985 return FAIL;
986 }
987 *val = exp.X_add_number;
988 return SUCCESS;
989 }
990 #endif
991
992 /* Register parsing. */
993
994 /* Generic register parser. CCP points to what should be the
995 beginning of a register name. If it is indeed a valid register
996 name, advance CCP over it and return the reg_entry structure;
997 otherwise return NULL. Does not issue diagnostics. */
998
999 static struct reg_entry *
1000 arm_reg_parse_multi (char **ccp)
1001 {
1002 char *start = *ccp;
1003 char *p;
1004 struct reg_entry *reg;
1005
1006 #ifdef REGISTER_PREFIX
1007 if (*start != REGISTER_PREFIX)
1008 return NULL;
1009 start++;
1010 #endif
1011 #ifdef OPTIONAL_REGISTER_PREFIX
1012 if (*start == OPTIONAL_REGISTER_PREFIX)
1013 start++;
1014 #endif
1015
1016 p = start;
1017 if (!ISALPHA (*p) || !is_name_beginner (*p))
1018 return NULL;
1019
1020 do
1021 p++;
1022 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1023
1024 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1025
1026 if (!reg)
1027 return NULL;
1028
1029 *ccp = p;
1030 return reg;
1031 }
1032
1033 static int
1034 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1035 enum arm_reg_type type)
1036 {
1037 /* Alternative syntaxes are accepted for a few register classes. */
1038 switch (type)
1039 {
1040 case REG_TYPE_MVF:
1041 case REG_TYPE_MVD:
1042 case REG_TYPE_MVFX:
1043 case REG_TYPE_MVDX:
1044 /* Generic coprocessor register names are allowed for these. */
1045 if (reg && reg->type == REG_TYPE_CN)
1046 return reg->number;
1047 break;
1048
1049 case REG_TYPE_CP:
1050 /* For backward compatibility, a bare number is valid here. */
1051 {
1052 unsigned long processor = strtoul (start, ccp, 10);
1053 if (*ccp != start && processor <= 15)
1054 return processor;
1055 }
1056
1057 case REG_TYPE_MMXWC:
1058 /* WC includes WCG. ??? I'm not sure this is true for all
1059 instructions that take WC registers. */
1060 if (reg && reg->type == REG_TYPE_MMXWCG)
1061 return reg->number;
1062 break;
1063
1064 default:
1065 break;
1066 }
1067
1068 return FAIL;
1069 }
1070
1071 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1072 return value is the register number or FAIL. */
1073
1074 static int
1075 arm_reg_parse (char **ccp, enum arm_reg_type type)
1076 {
1077 char *start = *ccp;
1078 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1079 int ret;
1080
1081 /* Do not allow a scalar (reg+index) to parse as a register. */
1082 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1083 return FAIL;
1084
1085 if (reg && reg->type == type)
1086 return reg->number;
1087
1088 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1089 return ret;
1090
1091 *ccp = start;
1092 return FAIL;
1093 }
1094
1095 /* Parse a Neon type specifier. *STR should point at the leading '.'
1096 character. Does no verification at this stage that the type fits the opcode
1097 properly. E.g.,
1098
1099 .i32.i32.s16
1100 .s32.f32
1101 .u16
1102
1103 Can all be legally parsed by this function.
1104
1105 Fills in neon_type struct pointer with parsed information, and updates STR
1106 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1107 type, FAIL if not. */
1108
1109 static int
1110 parse_neon_type (struct neon_type *type, char **str)
1111 {
1112 char *ptr = *str;
1113
1114 if (type)
1115 type->elems = 0;
1116
1117 while (type->elems < NEON_MAX_TYPE_ELS)
1118 {
1119 enum neon_el_type thistype = NT_untyped;
1120 unsigned thissize = -1u;
1121
1122 if (*ptr != '.')
1123 break;
1124
1125 ptr++;
1126
1127 /* Just a size without an explicit type. */
1128 if (ISDIGIT (*ptr))
1129 goto parsesize;
1130
1131 switch (TOLOWER (*ptr))
1132 {
1133 case 'i': thistype = NT_integer; break;
1134 case 'f': thistype = NT_float; break;
1135 case 'p': thistype = NT_poly; break;
1136 case 's': thistype = NT_signed; break;
1137 case 'u': thistype = NT_unsigned; break;
1138 case 'd':
1139 thistype = NT_float;
1140 thissize = 64;
1141 ptr++;
1142 goto done;
1143 default:
1144 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1145 return FAIL;
1146 }
1147
1148 ptr++;
1149
1150 /* .f is an abbreviation for .f32. */
1151 if (thistype == NT_float && !ISDIGIT (*ptr))
1152 thissize = 32;
1153 else
1154 {
1155 parsesize:
1156 thissize = strtoul (ptr, &ptr, 10);
1157
1158 if (thissize != 8 && thissize != 16 && thissize != 32
1159 && thissize != 64)
1160 {
1161 as_bad (_("bad size %d in type specifier"), thissize);
1162 return FAIL;
1163 }
1164 }
1165
1166 done:
1167 if (type)
1168 {
1169 type->el[type->elems].type = thistype;
1170 type->el[type->elems].size = thissize;
1171 type->elems++;
1172 }
1173 }
1174
1175 /* Empty/missing type is not a successful parse. */
1176 if (type->elems == 0)
1177 return FAIL;
1178
1179 *str = ptr;
1180
1181 return SUCCESS;
1182 }
1183
1184 /* Errors may be set multiple times during parsing or bit encoding
1185 (particularly in the Neon bits), but usually the earliest error which is set
1186 will be the most meaningful. Avoid overwriting it with later (cascading)
1187 errors by calling this function. */
1188
1189 static void
1190 first_error (const char *err)
1191 {
1192 if (!inst.error)
1193 inst.error = err;
1194 }
1195
1196 /* Parse a single type, e.g. ".s32", leading period included. */
1197 static int
1198 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1199 {
1200 char *str = *ccp;
1201 struct neon_type optype;
1202
1203 if (*str == '.')
1204 {
1205 if (parse_neon_type (&optype, &str) == SUCCESS)
1206 {
1207 if (optype.elems == 1)
1208 *vectype = optype.el[0];
1209 else
1210 {
1211 first_error (_("only one type should be specified for operand"));
1212 return FAIL;
1213 }
1214 }
1215 else
1216 {
1217 first_error (_("vector type expected"));
1218 return FAIL;
1219 }
1220 }
1221 else
1222 return FAIL;
1223
1224 *ccp = str;
1225
1226 return SUCCESS;
1227 }
1228
1229 /* Special meanings for indices (which have a range of 0-7), which will fit into
1230 a 4-bit integer. */
1231
1232 #define NEON_ALL_LANES 15
1233 #define NEON_INTERLEAVE_LANES 14
1234
1235 /* Parse either a register or a scalar, with an optional type. Return the
1236 register number, and optionally fill in the actual type of the register
1237 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1238 type/index information in *TYPEINFO. */
1239
1240 static int
1241 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1242 enum arm_reg_type *rtype,
1243 struct neon_typed_alias *typeinfo)
1244 {
1245 char *str = *ccp;
1246 struct reg_entry *reg = arm_reg_parse_multi (&str);
1247 struct neon_typed_alias atype;
1248 struct neon_type_el parsetype;
1249
1250 atype.defined = 0;
1251 atype.index = -1;
1252 atype.eltype.type = NT_invtype;
1253 atype.eltype.size = -1;
1254
1255 /* Try alternate syntax for some types of register. Note these are mutually
1256 exclusive with the Neon syntax extensions. */
1257 if (reg == NULL)
1258 {
1259 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1260 if (altreg != FAIL)
1261 *ccp = str;
1262 if (typeinfo)
1263 *typeinfo = atype;
1264 return altreg;
1265 }
1266
1267 /* Undo polymorphism when a set of register types may be accepted. */
1268 if ((type == REG_TYPE_NDQ
1269 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1270 || (type == REG_TYPE_VFSD
1271 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1272 || (type == REG_TYPE_NSDQ
1273 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1274 || reg->type == REG_TYPE_NQ))
1275 || (type == REG_TYPE_MMXWC
1276 && (reg->type == REG_TYPE_MMXWCG)))
1277 type = reg->type;
1278
1279 if (type != reg->type)
1280 return FAIL;
1281
1282 if (reg->neon)
1283 atype = *reg->neon;
1284
1285 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1286 {
1287 if ((atype.defined & NTA_HASTYPE) != 0)
1288 {
1289 first_error (_("can't redefine type for operand"));
1290 return FAIL;
1291 }
1292 atype.defined |= NTA_HASTYPE;
1293 atype.eltype = parsetype;
1294 }
1295
1296 if (skip_past_char (&str, '[') == SUCCESS)
1297 {
1298 if (type != REG_TYPE_VFD)
1299 {
1300 first_error (_("only D registers may be indexed"));
1301 return FAIL;
1302 }
1303
1304 if ((atype.defined & NTA_HASINDEX) != 0)
1305 {
1306 first_error (_("can't change index for operand"));
1307 return FAIL;
1308 }
1309
1310 atype.defined |= NTA_HASINDEX;
1311
1312 if (skip_past_char (&str, ']') == SUCCESS)
1313 atype.index = NEON_ALL_LANES;
1314 else
1315 {
1316 expressionS exp;
1317
1318 my_get_expression (&exp, &str, GE_NO_PREFIX);
1319
1320 if (exp.X_op != O_constant)
1321 {
1322 first_error (_("constant expression required"));
1323 return FAIL;
1324 }
1325
1326 if (skip_past_char (&str, ']') == FAIL)
1327 return FAIL;
1328
1329 atype.index = exp.X_add_number;
1330 }
1331 }
1332
1333 if (typeinfo)
1334 *typeinfo = atype;
1335
1336 if (rtype)
1337 *rtype = type;
1338
1339 *ccp = str;
1340
1341 return reg->number;
1342 }
1343
1344 /* Like arm_reg_parse, but allow allow the following extra features:
1345 - If RTYPE is non-zero, return the (possibly restricted) type of the
1346 register (e.g. Neon double or quad reg when either has been requested).
1347 - If this is a Neon vector type with additional type information, fill
1348 in the struct pointed to by VECTYPE (if non-NULL).
1349 This function will fault on encountering a scalar.
1350 */
1351
1352 static int
1353 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1354 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1355 {
1356 struct neon_typed_alias atype;
1357 char *str = *ccp;
1358 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1359
1360 if (reg == FAIL)
1361 return FAIL;
1362
1363 /* Do not allow a scalar (reg+index) to parse as a register. */
1364 if ((atype.defined & NTA_HASINDEX) != 0)
1365 {
1366 first_error (_("register operand expected, but got scalar"));
1367 return FAIL;
1368 }
1369
1370 if (vectype)
1371 *vectype = atype.eltype;
1372
1373 *ccp = str;
1374
1375 return reg;
1376 }
1377
1378 #define NEON_SCALAR_REG(X) ((X) >> 4)
1379 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1380
1381 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1382 have enough information to be able to do a good job bounds-checking. So, we
1383 just do easy checks here, and do further checks later. */
1384
1385 static int
1386 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1387 {
1388 int reg;
1389 char *str = *ccp;
1390 struct neon_typed_alias atype;
1391
1392 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1393
1394 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1395 return FAIL;
1396
1397 if (atype.index == NEON_ALL_LANES)
1398 {
1399 first_error (_("scalar must have an index"));
1400 return FAIL;
1401 }
1402 else if (atype.index >= 64 / elsize)
1403 {
1404 first_error (_("scalar index out of range"));
1405 return FAIL;
1406 }
1407
1408 if (type)
1409 *type = atype.eltype;
1410
1411 *ccp = str;
1412
1413 return reg * 16 + atype.index;
1414 }
1415
1416 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1417 static long
1418 parse_reg_list (char ** strp)
1419 {
1420 char * str = * strp;
1421 long range = 0;
1422 int another_range;
1423
1424 /* We come back here if we get ranges concatenated by '+' or '|'. */
1425 do
1426 {
1427 another_range = 0;
1428
1429 if (*str == '{')
1430 {
1431 int in_range = 0;
1432 int cur_reg = -1;
1433
1434 str++;
1435 do
1436 {
1437 int reg;
1438
1439 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1440 {
1441 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1442 return FAIL;
1443 }
1444
1445 if (in_range)
1446 {
1447 int i;
1448
1449 if (reg <= cur_reg)
1450 {
1451 first_error (_("bad range in register list"));
1452 return FAIL;
1453 }
1454
1455 for (i = cur_reg + 1; i < reg; i++)
1456 {
1457 if (range & (1 << i))
1458 as_tsktsk
1459 (_("Warning: duplicated register (r%d) in register list"),
1460 i);
1461 else
1462 range |= 1 << i;
1463 }
1464 in_range = 0;
1465 }
1466
1467 if (range & (1 << reg))
1468 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1469 reg);
1470 else if (reg <= cur_reg)
1471 as_tsktsk (_("Warning: register range not in ascending order"));
1472
1473 range |= 1 << reg;
1474 cur_reg = reg;
1475 }
1476 while (skip_past_comma (&str) != FAIL
1477 || (in_range = 1, *str++ == '-'));
1478 str--;
1479
1480 if (*str++ != '}')
1481 {
1482 first_error (_("missing `}'"));
1483 return FAIL;
1484 }
1485 }
1486 else
1487 {
1488 expressionS expr;
1489
1490 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1491 return FAIL;
1492
1493 if (expr.X_op == O_constant)
1494 {
1495 if (expr.X_add_number
1496 != (expr.X_add_number & 0x0000ffff))
1497 {
1498 inst.error = _("invalid register mask");
1499 return FAIL;
1500 }
1501
1502 if ((range & expr.X_add_number) != 0)
1503 {
1504 int regno = range & expr.X_add_number;
1505
1506 regno &= -regno;
1507 regno = (1 << regno) - 1;
1508 as_tsktsk
1509 (_("Warning: duplicated register (r%d) in register list"),
1510 regno);
1511 }
1512
1513 range |= expr.X_add_number;
1514 }
1515 else
1516 {
1517 if (inst.reloc.type != 0)
1518 {
1519 inst.error = _("expression too complex");
1520 return FAIL;
1521 }
1522
1523 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1524 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1525 inst.reloc.pc_rel = 0;
1526 }
1527 }
1528
1529 if (*str == '|' || *str == '+')
1530 {
1531 str++;
1532 another_range = 1;
1533 }
1534 }
1535 while (another_range);
1536
1537 *strp = str;
1538 return range;
1539 }
1540
1541 /* Types of registers in a list. */
1542
1543 enum reg_list_els
1544 {
1545 REGLIST_VFP_S,
1546 REGLIST_VFP_D,
1547 REGLIST_NEON_D
1548 };
1549
1550 /* Parse a VFP register list. If the string is invalid return FAIL.
1551 Otherwise return the number of registers, and set PBASE to the first
1552 register. Parses registers of type ETYPE.
1553 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1554 - Q registers can be used to specify pairs of D registers
1555 - { } can be omitted from around a singleton register list
1556 FIXME: This is not implemented, as it would require backtracking in
1557 some cases, e.g.:
1558 vtbl.8 d3,d4,d5
1559 This could be done (the meaning isn't really ambiguous), but doesn't
1560 fit in well with the current parsing framework.
1561 - 32 D registers may be used (also true for VFPv3).
1562 FIXME: Types are ignored in these register lists, which is probably a
1563 bug. */
1564
1565 static int
1566 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1567 {
1568 char *str = *ccp;
1569 int base_reg;
1570 int new_base;
1571 enum arm_reg_type regtype = 0;
1572 int max_regs = 0;
1573 int count = 0;
1574 int warned = 0;
1575 unsigned long mask = 0;
1576 int i;
1577
1578 if (*str != '{')
1579 {
1580 inst.error = _("expecting {");
1581 return FAIL;
1582 }
1583
1584 str++;
1585
1586 switch (etype)
1587 {
1588 case REGLIST_VFP_S:
1589 regtype = REG_TYPE_VFS;
1590 max_regs = 32;
1591 break;
1592
1593 case REGLIST_VFP_D:
1594 regtype = REG_TYPE_VFD;
1595 break;
1596
1597 case REGLIST_NEON_D:
1598 regtype = REG_TYPE_NDQ;
1599 break;
1600 }
1601
1602 if (etype != REGLIST_VFP_S)
1603 {
1604 /* VFPv3 allows 32 D registers. */
1605 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1606 {
1607 max_regs = 32;
1608 if (thumb_mode)
1609 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1610 fpu_vfp_ext_v3);
1611 else
1612 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1613 fpu_vfp_ext_v3);
1614 }
1615 else
1616 max_regs = 16;
1617 }
1618
1619 base_reg = max_regs;
1620
1621 do
1622 {
1623 int setmask = 1, addregs = 1;
1624
1625 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1626
1627 if (new_base == FAIL)
1628 {
1629 first_error (_(reg_expected_msgs[regtype]));
1630 return FAIL;
1631 }
1632
1633 if (new_base >= max_regs)
1634 {
1635 first_error (_("register out of range in list"));
1636 return FAIL;
1637 }
1638
1639 /* Note: a value of 2 * n is returned for the register Q<n>. */
1640 if (regtype == REG_TYPE_NQ)
1641 {
1642 setmask = 3;
1643 addregs = 2;
1644 }
1645
1646 if (new_base < base_reg)
1647 base_reg = new_base;
1648
1649 if (mask & (setmask << new_base))
1650 {
1651 first_error (_("invalid register list"));
1652 return FAIL;
1653 }
1654
1655 if ((mask >> new_base) != 0 && ! warned)
1656 {
1657 as_tsktsk (_("register list not in ascending order"));
1658 warned = 1;
1659 }
1660
1661 mask |= setmask << new_base;
1662 count += addregs;
1663
1664 if (*str == '-') /* We have the start of a range expression */
1665 {
1666 int high_range;
1667
1668 str++;
1669
1670 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1671 == FAIL)
1672 {
1673 inst.error = gettext (reg_expected_msgs[regtype]);
1674 return FAIL;
1675 }
1676
1677 if (high_range >= max_regs)
1678 {
1679 first_error (_("register out of range in list"));
1680 return FAIL;
1681 }
1682
1683 if (regtype == REG_TYPE_NQ)
1684 high_range = high_range + 1;
1685
1686 if (high_range <= new_base)
1687 {
1688 inst.error = _("register range not in ascending order");
1689 return FAIL;
1690 }
1691
1692 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1693 {
1694 if (mask & (setmask << new_base))
1695 {
1696 inst.error = _("invalid register list");
1697 return FAIL;
1698 }
1699
1700 mask |= setmask << new_base;
1701 count += addregs;
1702 }
1703 }
1704 }
1705 while (skip_past_comma (&str) != FAIL);
1706
1707 str++;
1708
1709 /* Sanity check -- should have raised a parse error above. */
1710 if (count == 0 || count > max_regs)
1711 abort ();
1712
1713 *pbase = base_reg;
1714
1715 /* Final test -- the registers must be consecutive. */
1716 mask >>= base_reg;
1717 for (i = 0; i < count; i++)
1718 {
1719 if ((mask & (1u << i)) == 0)
1720 {
1721 inst.error = _("non-contiguous register range");
1722 return FAIL;
1723 }
1724 }
1725
1726 *ccp = str;
1727
1728 return count;
1729 }
1730
1731 /* True if two alias types are the same. */
1732
1733 static int
1734 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1735 {
1736 if (!a && !b)
1737 return 1;
1738
1739 if (!a || !b)
1740 return 0;
1741
1742 if (a->defined != b->defined)
1743 return 0;
1744
1745 if ((a->defined & NTA_HASTYPE) != 0
1746 && (a->eltype.type != b->eltype.type
1747 || a->eltype.size != b->eltype.size))
1748 return 0;
1749
1750 if ((a->defined & NTA_HASINDEX) != 0
1751 && (a->index != b->index))
1752 return 0;
1753
1754 return 1;
1755 }
1756
1757 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1758 The base register is put in *PBASE.
1759 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1760 the return value.
1761 The register stride (minus one) is put in bit 4 of the return value.
1762 Bits [6:5] encode the list length (minus one).
1763 The type of the list elements is put in *ELTYPE, if non-NULL. */
1764
1765 #define NEON_LANE(X) ((X) & 0xf)
1766 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1767 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1768
1769 static int
1770 parse_neon_el_struct_list (char **str, unsigned *pbase,
1771 struct neon_type_el *eltype)
1772 {
1773 char *ptr = *str;
1774 int base_reg = -1;
1775 int reg_incr = -1;
1776 int count = 0;
1777 int lane = -1;
1778 int leading_brace = 0;
1779 enum arm_reg_type rtype = REG_TYPE_NDQ;
1780 int addregs = 1;
1781 const char *const incr_error = "register stride must be 1 or 2";
1782 const char *const type_error = "mismatched element/structure types in list";
1783 struct neon_typed_alias firsttype;
1784
1785 if (skip_past_char (&ptr, '{') == SUCCESS)
1786 leading_brace = 1;
1787
1788 do
1789 {
1790 struct neon_typed_alias atype;
1791 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1792
1793 if (getreg == FAIL)
1794 {
1795 first_error (_(reg_expected_msgs[rtype]));
1796 return FAIL;
1797 }
1798
1799 if (base_reg == -1)
1800 {
1801 base_reg = getreg;
1802 if (rtype == REG_TYPE_NQ)
1803 {
1804 reg_incr = 1;
1805 addregs = 2;
1806 }
1807 firsttype = atype;
1808 }
1809 else if (reg_incr == -1)
1810 {
1811 reg_incr = getreg - base_reg;
1812 if (reg_incr < 1 || reg_incr > 2)
1813 {
1814 first_error (_(incr_error));
1815 return FAIL;
1816 }
1817 }
1818 else if (getreg != base_reg + reg_incr * count)
1819 {
1820 first_error (_(incr_error));
1821 return FAIL;
1822 }
1823
1824 if (!neon_alias_types_same (&atype, &firsttype))
1825 {
1826 first_error (_(type_error));
1827 return FAIL;
1828 }
1829
1830 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1831 modes. */
1832 if (ptr[0] == '-')
1833 {
1834 struct neon_typed_alias htype;
1835 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1836 if (lane == -1)
1837 lane = NEON_INTERLEAVE_LANES;
1838 else if (lane != NEON_INTERLEAVE_LANES)
1839 {
1840 first_error (_(type_error));
1841 return FAIL;
1842 }
1843 if (reg_incr == -1)
1844 reg_incr = 1;
1845 else if (reg_incr != 1)
1846 {
1847 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1848 return FAIL;
1849 }
1850 ptr++;
1851 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1852 if (hireg == FAIL)
1853 {
1854 first_error (_(reg_expected_msgs[rtype]));
1855 return FAIL;
1856 }
1857 if (!neon_alias_types_same (&htype, &firsttype))
1858 {
1859 first_error (_(type_error));
1860 return FAIL;
1861 }
1862 count += hireg + dregs - getreg;
1863 continue;
1864 }
1865
1866 /* If we're using Q registers, we can't use [] or [n] syntax. */
1867 if (rtype == REG_TYPE_NQ)
1868 {
1869 count += 2;
1870 continue;
1871 }
1872
1873 if ((atype.defined & NTA_HASINDEX) != 0)
1874 {
1875 if (lane == -1)
1876 lane = atype.index;
1877 else if (lane != atype.index)
1878 {
1879 first_error (_(type_error));
1880 return FAIL;
1881 }
1882 }
1883 else if (lane == -1)
1884 lane = NEON_INTERLEAVE_LANES;
1885 else if (lane != NEON_INTERLEAVE_LANES)
1886 {
1887 first_error (_(type_error));
1888 return FAIL;
1889 }
1890 count++;
1891 }
1892 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1893
1894 /* No lane set by [x]. We must be interleaving structures. */
1895 if (lane == -1)
1896 lane = NEON_INTERLEAVE_LANES;
1897
1898 /* Sanity check. */
1899 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1900 || (count > 1 && reg_incr == -1))
1901 {
1902 first_error (_("error parsing element/structure list"));
1903 return FAIL;
1904 }
1905
1906 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1907 {
1908 first_error (_("expected }"));
1909 return FAIL;
1910 }
1911
1912 if (reg_incr == -1)
1913 reg_incr = 1;
1914
1915 if (eltype)
1916 *eltype = firsttype.eltype;
1917
1918 *pbase = base_reg;
1919 *str = ptr;
1920
1921 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1922 }
1923
1924 /* Parse an explicit relocation suffix on an expression. This is
1925 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1926 arm_reloc_hsh contains no entries, so this function can only
1927 succeed if there is no () after the word. Returns -1 on error,
1928 BFD_RELOC_UNUSED if there wasn't any suffix. */
1929 static int
1930 parse_reloc (char **str)
1931 {
1932 struct reloc_entry *r;
1933 char *p, *q;
1934
1935 if (**str != '(')
1936 return BFD_RELOC_UNUSED;
1937
1938 p = *str + 1;
1939 q = p;
1940
1941 while (*q && *q != ')' && *q != ',')
1942 q++;
1943 if (*q != ')')
1944 return -1;
1945
1946 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1947 return -1;
1948
1949 *str = q + 1;
1950 return r->reloc;
1951 }
1952
1953 /* Directives: register aliases. */
1954
1955 static struct reg_entry *
1956 insert_reg_alias (char *str, int number, int type)
1957 {
1958 struct reg_entry *new;
1959 const char *name;
1960
1961 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1962 {
1963 if (new->builtin)
1964 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1965
1966 /* Only warn about a redefinition if it's not defined as the
1967 same register. */
1968 else if (new->number != number || new->type != type)
1969 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1970
1971 return 0;
1972 }
1973
1974 name = xstrdup (str);
1975 new = xmalloc (sizeof (struct reg_entry));
1976
1977 new->name = name;
1978 new->number = number;
1979 new->type = type;
1980 new->builtin = FALSE;
1981 new->neon = NULL;
1982
1983 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1984 abort ();
1985
1986 return new;
1987 }
1988
1989 static void
1990 insert_neon_reg_alias (char *str, int number, int type,
1991 struct neon_typed_alias *atype)
1992 {
1993 struct reg_entry *reg = insert_reg_alias (str, number, type);
1994
1995 if (!reg)
1996 {
1997 first_error (_("attempt to redefine typed alias"));
1998 return;
1999 }
2000
2001 if (atype)
2002 {
2003 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2004 *reg->neon = *atype;
2005 }
2006 }
2007
2008 /* Look for the .req directive. This is of the form:
2009
2010 new_register_name .req existing_register_name
2011
2012 If we find one, or if it looks sufficiently like one that we want to
2013 handle any error here, return non-zero. Otherwise return zero. */
2014
2015 static int
2016 create_register_alias (char * newname, char *p)
2017 {
2018 struct reg_entry *old;
2019 char *oldname, *nbuf;
2020 size_t nlen;
2021
2022 /* The input scrubber ensures that whitespace after the mnemonic is
2023 collapsed to single spaces. */
2024 oldname = p;
2025 if (strncmp (oldname, " .req ", 6) != 0)
2026 return 0;
2027
2028 oldname += 6;
2029 if (*oldname == '\0')
2030 return 0;
2031
2032 old = hash_find (arm_reg_hsh, oldname);
2033 if (!old)
2034 {
2035 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2036 return 1;
2037 }
2038
2039 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2040 the desired alias name, and p points to its end. If not, then
2041 the desired alias name is in the global original_case_string. */
2042 #ifdef TC_CASE_SENSITIVE
2043 nlen = p - newname;
2044 #else
2045 newname = original_case_string;
2046 nlen = strlen (newname);
2047 #endif
2048
2049 nbuf = alloca (nlen + 1);
2050 memcpy (nbuf, newname, nlen);
2051 nbuf[nlen] = '\0';
2052
2053 /* Create aliases under the new name as stated; an all-lowercase
2054 version of the new name; and an all-uppercase version of the new
2055 name. */
2056 insert_reg_alias (nbuf, old->number, old->type);
2057
2058 for (p = nbuf; *p; p++)
2059 *p = TOUPPER (*p);
2060
2061 if (strncmp (nbuf, newname, nlen))
2062 insert_reg_alias (nbuf, old->number, old->type);
2063
2064 for (p = nbuf; *p; p++)
2065 *p = TOLOWER (*p);
2066
2067 if (strncmp (nbuf, newname, nlen))
2068 insert_reg_alias (nbuf, old->number, old->type);
2069
2070 return 1;
2071 }
2072
2073 /* Create a Neon typed/indexed register alias using directives, e.g.:
2074 X .dn d5.s32[1]
2075 Y .qn 6.s16
2076 Z .dn d7
2077 T .dn Z[0]
2078 These typed registers can be used instead of the types specified after the
2079 Neon mnemonic, so long as all operands given have types. Types can also be
2080 specified directly, e.g.:
2081 vadd d0.s32, d1.s32, d2.s32
2082 */
2083
2084 static int
2085 create_neon_reg_alias (char *newname, char *p)
2086 {
2087 enum arm_reg_type basetype;
2088 struct reg_entry *basereg;
2089 struct reg_entry mybasereg;
2090 struct neon_type ntype;
2091 struct neon_typed_alias typeinfo;
2092 char *namebuf, *nameend;
2093 int namelen;
2094
2095 typeinfo.defined = 0;
2096 typeinfo.eltype.type = NT_invtype;
2097 typeinfo.eltype.size = -1;
2098 typeinfo.index = -1;
2099
2100 nameend = p;
2101
2102 if (strncmp (p, " .dn ", 5) == 0)
2103 basetype = REG_TYPE_VFD;
2104 else if (strncmp (p, " .qn ", 5) == 0)
2105 basetype = REG_TYPE_NQ;
2106 else
2107 return 0;
2108
2109 p += 5;
2110
2111 if (*p == '\0')
2112 return 0;
2113
2114 basereg = arm_reg_parse_multi (&p);
2115
2116 if (basereg && basereg->type != basetype)
2117 {
2118 as_bad (_("bad type for register"));
2119 return 0;
2120 }
2121
2122 if (basereg == NULL)
2123 {
2124 expressionS exp;
2125 /* Try parsing as an integer. */
2126 my_get_expression (&exp, &p, GE_NO_PREFIX);
2127 if (exp.X_op != O_constant)
2128 {
2129 as_bad (_("expression must be constant"));
2130 return 0;
2131 }
2132 basereg = &mybasereg;
2133 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2134 : exp.X_add_number;
2135 basereg->neon = 0;
2136 }
2137
2138 if (basereg->neon)
2139 typeinfo = *basereg->neon;
2140
2141 if (parse_neon_type (&ntype, &p) == SUCCESS)
2142 {
2143 /* We got a type. */
2144 if (typeinfo.defined & NTA_HASTYPE)
2145 {
2146 as_bad (_("can't redefine the type of a register alias"));
2147 return 0;
2148 }
2149
2150 typeinfo.defined |= NTA_HASTYPE;
2151 if (ntype.elems != 1)
2152 {
2153 as_bad (_("you must specify a single type only"));
2154 return 0;
2155 }
2156 typeinfo.eltype = ntype.el[0];
2157 }
2158
2159 if (skip_past_char (&p, '[') == SUCCESS)
2160 {
2161 expressionS exp;
2162 /* We got a scalar index. */
2163
2164 if (typeinfo.defined & NTA_HASINDEX)
2165 {
2166 as_bad (_("can't redefine the index of a scalar alias"));
2167 return 0;
2168 }
2169
2170 my_get_expression (&exp, &p, GE_NO_PREFIX);
2171
2172 if (exp.X_op != O_constant)
2173 {
2174 as_bad (_("scalar index must be constant"));
2175 return 0;
2176 }
2177
2178 typeinfo.defined |= NTA_HASINDEX;
2179 typeinfo.index = exp.X_add_number;
2180
2181 if (skip_past_char (&p, ']') == FAIL)
2182 {
2183 as_bad (_("expecting ]"));
2184 return 0;
2185 }
2186 }
2187
2188 namelen = nameend - newname;
2189 namebuf = alloca (namelen + 1);
2190 strncpy (namebuf, newname, namelen);
2191 namebuf[namelen] = '\0';
2192
2193 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2194 typeinfo.defined != 0 ? &typeinfo : NULL);
2195
2196 /* Insert name in all uppercase. */
2197 for (p = namebuf; *p; p++)
2198 *p = TOUPPER (*p);
2199
2200 if (strncmp (namebuf, newname, namelen))
2201 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2202 typeinfo.defined != 0 ? &typeinfo : NULL);
2203
2204 /* Insert name in all lowercase. */
2205 for (p = namebuf; *p; p++)
2206 *p = TOLOWER (*p);
2207
2208 if (strncmp (namebuf, newname, namelen))
2209 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2210 typeinfo.defined != 0 ? &typeinfo : NULL);
2211
2212 return 1;
2213 }
2214
2215 /* Should never be called, as .req goes between the alias and the
2216 register name, not at the beginning of the line. */
2217 static void
2218 s_req (int a ATTRIBUTE_UNUSED)
2219 {
2220 as_bad (_("invalid syntax for .req directive"));
2221 }
2222
2223 static void
2224 s_dn (int a ATTRIBUTE_UNUSED)
2225 {
2226 as_bad (_("invalid syntax for .dn directive"));
2227 }
2228
2229 static void
2230 s_qn (int a ATTRIBUTE_UNUSED)
2231 {
2232 as_bad (_("invalid syntax for .qn directive"));
2233 }
2234
2235 /* The .unreq directive deletes an alias which was previously defined
2236 by .req. For example:
2237
2238 my_alias .req r11
2239 .unreq my_alias */
2240
2241 static void
2242 s_unreq (int a ATTRIBUTE_UNUSED)
2243 {
2244 char * name;
2245 char saved_char;
2246
2247 name = input_line_pointer;
2248
2249 while (*input_line_pointer != 0
2250 && *input_line_pointer != ' '
2251 && *input_line_pointer != '\n')
2252 ++input_line_pointer;
2253
2254 saved_char = *input_line_pointer;
2255 *input_line_pointer = 0;
2256
2257 if (!*name)
2258 as_bad (_("invalid syntax for .unreq directive"));
2259 else
2260 {
2261 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2262
2263 if (!reg)
2264 as_bad (_("unknown register alias '%s'"), name);
2265 else if (reg->builtin)
2266 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2267 name);
2268 else
2269 {
2270 hash_delete (arm_reg_hsh, name);
2271 free ((char *) reg->name);
2272 if (reg->neon)
2273 free (reg->neon);
2274 free (reg);
2275 }
2276 }
2277
2278 *input_line_pointer = saved_char;
2279 demand_empty_rest_of_line ();
2280 }
2281
2282 /* Directives: Instruction set selection. */
2283
2284 #ifdef OBJ_ELF
2285 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2286 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2287 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2288 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2289
2290 static enum mstate mapstate = MAP_UNDEFINED;
2291
2292 void
2293 mapping_state (enum mstate state)
2294 {
2295 symbolS * symbolP;
2296 const char * symname;
2297 int type;
2298
2299 if (mapstate == state)
2300 /* The mapping symbol has already been emitted.
2301 There is nothing else to do. */
2302 return;
2303
2304 mapstate = state;
2305
2306 switch (state)
2307 {
2308 case MAP_DATA:
2309 symname = "$d";
2310 type = BSF_NO_FLAGS;
2311 break;
2312 case MAP_ARM:
2313 symname = "$a";
2314 type = BSF_NO_FLAGS;
2315 break;
2316 case MAP_THUMB:
2317 symname = "$t";
2318 type = BSF_NO_FLAGS;
2319 break;
2320 case MAP_UNDEFINED:
2321 return;
2322 default:
2323 abort ();
2324 }
2325
2326 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2327
2328 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2329 symbol_table_insert (symbolP);
2330 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2331
2332 switch (state)
2333 {
2334 case MAP_ARM:
2335 THUMB_SET_FUNC (symbolP, 0);
2336 ARM_SET_THUMB (symbolP, 0);
2337 ARM_SET_INTERWORK (symbolP, support_interwork);
2338 break;
2339
2340 case MAP_THUMB:
2341 THUMB_SET_FUNC (symbolP, 1);
2342 ARM_SET_THUMB (symbolP, 1);
2343 ARM_SET_INTERWORK (symbolP, support_interwork);
2344 break;
2345
2346 case MAP_DATA:
2347 default:
2348 return;
2349 }
2350 }
2351 #else
2352 #define mapping_state(x) /* nothing */
2353 #endif
2354
2355 /* Find the real, Thumb encoded start of a Thumb function. */
2356
2357 static symbolS *
2358 find_real_start (symbolS * symbolP)
2359 {
2360 char * real_start;
2361 const char * name = S_GET_NAME (symbolP);
2362 symbolS * new_target;
2363
2364 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2365 #define STUB_NAME ".real_start_of"
2366
2367 if (name == NULL)
2368 abort ();
2369
2370 /* The compiler may generate BL instructions to local labels because
2371 it needs to perform a branch to a far away location. These labels
2372 do not have a corresponding ".real_start_of" label. We check
2373 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2374 the ".real_start_of" convention for nonlocal branches. */
2375 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2376 return symbolP;
2377
2378 real_start = ACONCAT ((STUB_NAME, name, NULL));
2379 new_target = symbol_find (real_start);
2380
2381 if (new_target == NULL)
2382 {
2383 as_warn ("Failed to find real start of function: %s\n", name);
2384 new_target = symbolP;
2385 }
2386
2387 return new_target;
2388 }
2389
2390 static void
2391 opcode_select (int width)
2392 {
2393 switch (width)
2394 {
2395 case 16:
2396 if (! thumb_mode)
2397 {
2398 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2399 as_bad (_("selected processor does not support THUMB opcodes"));
2400
2401 thumb_mode = 1;
2402 /* No need to force the alignment, since we will have been
2403 coming from ARM mode, which is word-aligned. */
2404 record_alignment (now_seg, 1);
2405 }
2406 mapping_state (MAP_THUMB);
2407 break;
2408
2409 case 32:
2410 if (thumb_mode)
2411 {
2412 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2413 as_bad (_("selected processor does not support ARM opcodes"));
2414
2415 thumb_mode = 0;
2416
2417 if (!need_pass_2)
2418 frag_align (2, 0, 0);
2419
2420 record_alignment (now_seg, 1);
2421 }
2422 mapping_state (MAP_ARM);
2423 break;
2424
2425 default:
2426 as_bad (_("invalid instruction size selected (%d)"), width);
2427 }
2428 }
2429
2430 static void
2431 s_arm (int ignore ATTRIBUTE_UNUSED)
2432 {
2433 opcode_select (32);
2434 demand_empty_rest_of_line ();
2435 }
2436
2437 static void
2438 s_thumb (int ignore ATTRIBUTE_UNUSED)
2439 {
2440 opcode_select (16);
2441 demand_empty_rest_of_line ();
2442 }
2443
2444 static void
2445 s_code (int unused ATTRIBUTE_UNUSED)
2446 {
2447 int temp;
2448
2449 temp = get_absolute_expression ();
2450 switch (temp)
2451 {
2452 case 16:
2453 case 32:
2454 opcode_select (temp);
2455 break;
2456
2457 default:
2458 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2459 }
2460 }
2461
2462 static void
2463 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2464 {
2465 /* If we are not already in thumb mode go into it, EVEN if
2466 the target processor does not support thumb instructions.
2467 This is used by gcc/config/arm/lib1funcs.asm for example
2468 to compile interworking support functions even if the
2469 target processor should not support interworking. */
2470 if (! thumb_mode)
2471 {
2472 thumb_mode = 2;
2473 record_alignment (now_seg, 1);
2474 }
2475
2476 demand_empty_rest_of_line ();
2477 }
2478
2479 static void
2480 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2481 {
2482 s_thumb (0);
2483
2484 /* The following label is the name/address of the start of a Thumb function.
2485 We need to know this for the interworking support. */
2486 label_is_thumb_function_name = TRUE;
2487 }
2488
2489 /* Perform a .set directive, but also mark the alias as
2490 being a thumb function. */
2491
2492 static void
2493 s_thumb_set (int equiv)
2494 {
2495 /* XXX the following is a duplicate of the code for s_set() in read.c
2496 We cannot just call that code as we need to get at the symbol that
2497 is created. */
2498 char * name;
2499 char delim;
2500 char * end_name;
2501 symbolS * symbolP;
2502
2503 /* Especial apologies for the random logic:
2504 This just grew, and could be parsed much more simply!
2505 Dean - in haste. */
2506 name = input_line_pointer;
2507 delim = get_symbol_end ();
2508 end_name = input_line_pointer;
2509 *end_name = delim;
2510
2511 if (*input_line_pointer != ',')
2512 {
2513 *end_name = 0;
2514 as_bad (_("expected comma after name \"%s\""), name);
2515 *end_name = delim;
2516 ignore_rest_of_line ();
2517 return;
2518 }
2519
2520 input_line_pointer++;
2521 *end_name = 0;
2522
2523 if (name[0] == '.' && name[1] == '\0')
2524 {
2525 /* XXX - this should not happen to .thumb_set. */
2526 abort ();
2527 }
2528
2529 if ((symbolP = symbol_find (name)) == NULL
2530 && (symbolP = md_undefined_symbol (name)) == NULL)
2531 {
2532 #ifndef NO_LISTING
2533 /* When doing symbol listings, play games with dummy fragments living
2534 outside the normal fragment chain to record the file and line info
2535 for this symbol. */
2536 if (listing & LISTING_SYMBOLS)
2537 {
2538 extern struct list_info_struct * listing_tail;
2539 fragS * dummy_frag = xmalloc (sizeof (fragS));
2540
2541 memset (dummy_frag, 0, sizeof (fragS));
2542 dummy_frag->fr_type = rs_fill;
2543 dummy_frag->line = listing_tail;
2544 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2545 dummy_frag->fr_symbol = symbolP;
2546 }
2547 else
2548 #endif
2549 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2550
2551 #ifdef OBJ_COFF
2552 /* "set" symbols are local unless otherwise specified. */
2553 SF_SET_LOCAL (symbolP);
2554 #endif /* OBJ_COFF */
2555 } /* Make a new symbol. */
2556
2557 symbol_table_insert (symbolP);
2558
2559 * end_name = delim;
2560
2561 if (equiv
2562 && S_IS_DEFINED (symbolP)
2563 && S_GET_SEGMENT (symbolP) != reg_section)
2564 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2565
2566 pseudo_set (symbolP);
2567
2568 demand_empty_rest_of_line ();
2569
2570 /* XXX Now we come to the Thumb specific bit of code. */
2571
2572 THUMB_SET_FUNC (symbolP, 1);
2573 ARM_SET_THUMB (symbolP, 1);
2574 #if defined OBJ_ELF || defined OBJ_COFF
2575 ARM_SET_INTERWORK (symbolP, support_interwork);
2576 #endif
2577 }
2578
2579 /* Directives: Mode selection. */
2580
2581 /* .syntax [unified|divided] - choose the new unified syntax
2582 (same for Arm and Thumb encoding, modulo slight differences in what
2583 can be represented) or the old divergent syntax for each mode. */
2584 static void
2585 s_syntax (int unused ATTRIBUTE_UNUSED)
2586 {
2587 char *name, delim;
2588
2589 name = input_line_pointer;
2590 delim = get_symbol_end ();
2591
2592 if (!strcasecmp (name, "unified"))
2593 unified_syntax = TRUE;
2594 else if (!strcasecmp (name, "divided"))
2595 unified_syntax = FALSE;
2596 else
2597 {
2598 as_bad (_("unrecognized syntax mode \"%s\""), name);
2599 return;
2600 }
2601 *input_line_pointer = delim;
2602 demand_empty_rest_of_line ();
2603 }
2604
2605 /* Directives: sectioning and alignment. */
2606
2607 /* Same as s_align_ptwo but align 0 => align 2. */
2608
2609 static void
2610 s_align (int unused ATTRIBUTE_UNUSED)
2611 {
2612 int temp;
2613 long temp_fill;
2614 long max_alignment = 15;
2615
2616 temp = get_absolute_expression ();
2617 if (temp > max_alignment)
2618 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2619 else if (temp < 0)
2620 {
2621 as_bad (_("alignment negative. 0 assumed."));
2622 temp = 0;
2623 }
2624
2625 if (*input_line_pointer == ',')
2626 {
2627 input_line_pointer++;
2628 temp_fill = get_absolute_expression ();
2629 }
2630 else
2631 temp_fill = 0;
2632
2633 if (!temp)
2634 temp = 2;
2635
2636 /* Only make a frag if we HAVE to. */
2637 if (temp && !need_pass_2)
2638 frag_align (temp, (int) temp_fill, 0);
2639 demand_empty_rest_of_line ();
2640
2641 record_alignment (now_seg, temp);
2642 }
2643
2644 static void
2645 s_bss (int ignore ATTRIBUTE_UNUSED)
2646 {
2647 /* We don't support putting frags in the BSS segment, we fake it by
2648 marking in_bss, then looking at s_skip for clues. */
2649 subseg_set (bss_section, 0);
2650 demand_empty_rest_of_line ();
2651 mapping_state (MAP_DATA);
2652 }
2653
2654 static void
2655 s_even (int ignore ATTRIBUTE_UNUSED)
2656 {
2657 /* Never make frag if expect extra pass. */
2658 if (!need_pass_2)
2659 frag_align (1, 0, 0);
2660
2661 record_alignment (now_seg, 1);
2662
2663 demand_empty_rest_of_line ();
2664 }
2665
2666 /* Directives: Literal pools. */
2667
2668 static literal_pool *
2669 find_literal_pool (void)
2670 {
2671 literal_pool * pool;
2672
2673 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2674 {
2675 if (pool->section == now_seg
2676 && pool->sub_section == now_subseg)
2677 break;
2678 }
2679
2680 return pool;
2681 }
2682
2683 static literal_pool *
2684 find_or_make_literal_pool (void)
2685 {
2686 /* Next literal pool ID number. */
2687 static unsigned int latest_pool_num = 1;
2688 literal_pool * pool;
2689
2690 pool = find_literal_pool ();
2691
2692 if (pool == NULL)
2693 {
2694 /* Create a new pool. */
2695 pool = xmalloc (sizeof (* pool));
2696 if (! pool)
2697 return NULL;
2698
2699 pool->next_free_entry = 0;
2700 pool->section = now_seg;
2701 pool->sub_section = now_subseg;
2702 pool->next = list_of_pools;
2703 pool->symbol = NULL;
2704
2705 /* Add it to the list. */
2706 list_of_pools = pool;
2707 }
2708
2709 /* New pools, and emptied pools, will have a NULL symbol. */
2710 if (pool->symbol == NULL)
2711 {
2712 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2713 (valueT) 0, &zero_address_frag);
2714 pool->id = latest_pool_num ++;
2715 }
2716
2717 /* Done. */
2718 return pool;
2719 }
2720
2721 /* Add the literal in the global 'inst'
2722 structure to the relevent literal pool. */
2723
2724 static int
2725 add_to_lit_pool (void)
2726 {
2727 literal_pool * pool;
2728 unsigned int entry;
2729
2730 pool = find_or_make_literal_pool ();
2731
2732 /* Check if this literal value is already in the pool. */
2733 for (entry = 0; entry < pool->next_free_entry; entry ++)
2734 {
2735 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2736 && (inst.reloc.exp.X_op == O_constant)
2737 && (pool->literals[entry].X_add_number
2738 == inst.reloc.exp.X_add_number)
2739 && (pool->literals[entry].X_unsigned
2740 == inst.reloc.exp.X_unsigned))
2741 break;
2742
2743 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2744 && (inst.reloc.exp.X_op == O_symbol)
2745 && (pool->literals[entry].X_add_number
2746 == inst.reloc.exp.X_add_number)
2747 && (pool->literals[entry].X_add_symbol
2748 == inst.reloc.exp.X_add_symbol)
2749 && (pool->literals[entry].X_op_symbol
2750 == inst.reloc.exp.X_op_symbol))
2751 break;
2752 }
2753
2754 /* Do we need to create a new entry? */
2755 if (entry == pool->next_free_entry)
2756 {
2757 if (entry >= MAX_LITERAL_POOL_SIZE)
2758 {
2759 inst.error = _("literal pool overflow");
2760 return FAIL;
2761 }
2762
2763 pool->literals[entry] = inst.reloc.exp;
2764 pool->next_free_entry += 1;
2765 }
2766
2767 inst.reloc.exp.X_op = O_symbol;
2768 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2769 inst.reloc.exp.X_add_symbol = pool->symbol;
2770
2771 return SUCCESS;
2772 }
2773
2774 /* Can't use symbol_new here, so have to create a symbol and then at
2775 a later date assign it a value. Thats what these functions do. */
2776
2777 static void
2778 symbol_locate (symbolS * symbolP,
2779 const char * name, /* It is copied, the caller can modify. */
2780 segT segment, /* Segment identifier (SEG_<something>). */
2781 valueT valu, /* Symbol value. */
2782 fragS * frag) /* Associated fragment. */
2783 {
2784 unsigned int name_length;
2785 char * preserved_copy_of_name;
2786
2787 name_length = strlen (name) + 1; /* +1 for \0. */
2788 obstack_grow (&notes, name, name_length);
2789 preserved_copy_of_name = obstack_finish (&notes);
2790
2791 #ifdef tc_canonicalize_symbol_name
2792 preserved_copy_of_name =
2793 tc_canonicalize_symbol_name (preserved_copy_of_name);
2794 #endif
2795
2796 S_SET_NAME (symbolP, preserved_copy_of_name);
2797
2798 S_SET_SEGMENT (symbolP, segment);
2799 S_SET_VALUE (symbolP, valu);
2800 symbol_clear_list_pointers (symbolP);
2801
2802 symbol_set_frag (symbolP, frag);
2803
2804 /* Link to end of symbol chain. */
2805 {
2806 extern int symbol_table_frozen;
2807
2808 if (symbol_table_frozen)
2809 abort ();
2810 }
2811
2812 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2813
2814 obj_symbol_new_hook (symbolP);
2815
2816 #ifdef tc_symbol_new_hook
2817 tc_symbol_new_hook (symbolP);
2818 #endif
2819
2820 #ifdef DEBUG_SYMS
2821 verify_symbol_chain (symbol_rootP, symbol_lastP);
2822 #endif /* DEBUG_SYMS */
2823 }
2824
2825
2826 static void
2827 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2828 {
2829 unsigned int entry;
2830 literal_pool * pool;
2831 char sym_name[20];
2832
2833 pool = find_literal_pool ();
2834 if (pool == NULL
2835 || pool->symbol == NULL
2836 || pool->next_free_entry == 0)
2837 return;
2838
2839 mapping_state (MAP_DATA);
2840
2841 /* Align pool as you have word accesses.
2842 Only make a frag if we have to. */
2843 if (!need_pass_2)
2844 frag_align (2, 0, 0);
2845
2846 record_alignment (now_seg, 2);
2847
2848 sprintf (sym_name, "$$lit_\002%x", pool->id);
2849
2850 symbol_locate (pool->symbol, sym_name, now_seg,
2851 (valueT) frag_now_fix (), frag_now);
2852 symbol_table_insert (pool->symbol);
2853
2854 ARM_SET_THUMB (pool->symbol, thumb_mode);
2855
2856 #if defined OBJ_COFF || defined OBJ_ELF
2857 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2858 #endif
2859
2860 for (entry = 0; entry < pool->next_free_entry; entry ++)
2861 /* First output the expression in the instruction to the pool. */
2862 emit_expr (&(pool->literals[entry]), 4); /* .word */
2863
2864 /* Mark the pool as empty. */
2865 pool->next_free_entry = 0;
2866 pool->symbol = NULL;
2867 }
2868
2869 #ifdef OBJ_ELF
2870 /* Forward declarations for functions below, in the MD interface
2871 section. */
2872 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2873 static valueT create_unwind_entry (int);
2874 static void start_unwind_section (const segT, int);
2875 static void add_unwind_opcode (valueT, int);
2876 static void flush_pending_unwind (void);
2877
2878 /* Directives: Data. */
2879
2880 static void
2881 s_arm_elf_cons (int nbytes)
2882 {
2883 expressionS exp;
2884
2885 #ifdef md_flush_pending_output
2886 md_flush_pending_output ();
2887 #endif
2888
2889 if (is_it_end_of_statement ())
2890 {
2891 demand_empty_rest_of_line ();
2892 return;
2893 }
2894
2895 #ifdef md_cons_align
2896 md_cons_align (nbytes);
2897 #endif
2898
2899 mapping_state (MAP_DATA);
2900 do
2901 {
2902 int reloc;
2903 char *base = input_line_pointer;
2904
2905 expression (& exp);
2906
2907 if (exp.X_op != O_symbol)
2908 emit_expr (&exp, (unsigned int) nbytes);
2909 else
2910 {
2911 char *before_reloc = input_line_pointer;
2912 reloc = parse_reloc (&input_line_pointer);
2913 if (reloc == -1)
2914 {
2915 as_bad (_("unrecognized relocation suffix"));
2916 ignore_rest_of_line ();
2917 return;
2918 }
2919 else if (reloc == BFD_RELOC_UNUSED)
2920 emit_expr (&exp, (unsigned int) nbytes);
2921 else
2922 {
2923 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2924 int size = bfd_get_reloc_size (howto);
2925
2926 if (reloc == BFD_RELOC_ARM_PLT32)
2927 {
2928 as_bad (_("(plt) is only valid on branch targets"));
2929 reloc = BFD_RELOC_UNUSED;
2930 size = 0;
2931 }
2932
2933 if (size > nbytes)
2934 as_bad (_("%s relocations do not fit in %d bytes"),
2935 howto->name, nbytes);
2936 else
2937 {
2938 /* We've parsed an expression stopping at O_symbol.
2939 But there may be more expression left now that we
2940 have parsed the relocation marker. Parse it again.
2941 XXX Surely there is a cleaner way to do this. */
2942 char *p = input_line_pointer;
2943 int offset;
2944 char *save_buf = alloca (input_line_pointer - base);
2945 memcpy (save_buf, base, input_line_pointer - base);
2946 memmove (base + (input_line_pointer - before_reloc),
2947 base, before_reloc - base);
2948
2949 input_line_pointer = base + (input_line_pointer-before_reloc);
2950 expression (&exp);
2951 memcpy (base, save_buf, p - base);
2952
2953 offset = nbytes - size;
2954 p = frag_more ((int) nbytes);
2955 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2956 size, &exp, 0, reloc);
2957 }
2958 }
2959 }
2960 }
2961 while (*input_line_pointer++ == ',');
2962
2963 /* Put terminator back into stream. */
2964 input_line_pointer --;
2965 demand_empty_rest_of_line ();
2966 }
2967
2968
2969 /* Parse a .rel31 directive. */
2970
2971 static void
2972 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2973 {
2974 expressionS exp;
2975 char *p;
2976 valueT highbit;
2977
2978 highbit = 0;
2979 if (*input_line_pointer == '1')
2980 highbit = 0x80000000;
2981 else if (*input_line_pointer != '0')
2982 as_bad (_("expected 0 or 1"));
2983
2984 input_line_pointer++;
2985 if (*input_line_pointer != ',')
2986 as_bad (_("missing comma"));
2987 input_line_pointer++;
2988
2989 #ifdef md_flush_pending_output
2990 md_flush_pending_output ();
2991 #endif
2992
2993 #ifdef md_cons_align
2994 md_cons_align (4);
2995 #endif
2996
2997 mapping_state (MAP_DATA);
2998
2999 expression (&exp);
3000
3001 p = frag_more (4);
3002 md_number_to_chars (p, highbit, 4);
3003 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3004 BFD_RELOC_ARM_PREL31);
3005
3006 demand_empty_rest_of_line ();
3007 }
3008
3009 /* Directives: AEABI stack-unwind tables. */
3010
3011 /* Parse an unwind_fnstart directive. Simply records the current location. */
3012
3013 static void
3014 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3015 {
3016 demand_empty_rest_of_line ();
3017 /* Mark the start of the function. */
3018 unwind.proc_start = expr_build_dot ();
3019
3020 /* Reset the rest of the unwind info. */
3021 unwind.opcode_count = 0;
3022 unwind.table_entry = NULL;
3023 unwind.personality_routine = NULL;
3024 unwind.personality_index = -1;
3025 unwind.frame_size = 0;
3026 unwind.fp_offset = 0;
3027 unwind.fp_reg = 13;
3028 unwind.fp_used = 0;
3029 unwind.sp_restored = 0;
3030 }
3031
3032
3033 /* Parse a handlerdata directive. Creates the exception handling table entry
3034 for the function. */
3035
3036 static void
3037 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3038 {
3039 demand_empty_rest_of_line ();
3040 if (unwind.table_entry)
3041 as_bad (_("dupicate .handlerdata directive"));
3042
3043 create_unwind_entry (1);
3044 }
3045
3046 /* Parse an unwind_fnend directive. Generates the index table entry. */
3047
3048 static void
3049 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3050 {
3051 long where;
3052 char *ptr;
3053 valueT val;
3054
3055 demand_empty_rest_of_line ();
3056
3057 /* Add eh table entry. */
3058 if (unwind.table_entry == NULL)
3059 val = create_unwind_entry (0);
3060 else
3061 val = 0;
3062
3063 /* Add index table entry. This is two words. */
3064 start_unwind_section (unwind.saved_seg, 1);
3065 frag_align (2, 0, 0);
3066 record_alignment (now_seg, 2);
3067
3068 ptr = frag_more (8);
3069 where = frag_now_fix () - 8;
3070
3071 /* Self relative offset of the function start. */
3072 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3073 BFD_RELOC_ARM_PREL31);
3074
3075 /* Indicate dependency on EHABI-defined personality routines to the
3076 linker, if it hasn't been done already. */
3077 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3078 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3079 {
3080 static const char *const name[] = {
3081 "__aeabi_unwind_cpp_pr0",
3082 "__aeabi_unwind_cpp_pr1",
3083 "__aeabi_unwind_cpp_pr2"
3084 };
3085 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3086 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3087 marked_pr_dependency |= 1 << unwind.personality_index;
3088 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3089 = marked_pr_dependency;
3090 }
3091
3092 if (val)
3093 /* Inline exception table entry. */
3094 md_number_to_chars (ptr + 4, val, 4);
3095 else
3096 /* Self relative offset of the table entry. */
3097 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3098 BFD_RELOC_ARM_PREL31);
3099
3100 /* Restore the original section. */
3101 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3102 }
3103
3104
3105 /* Parse an unwind_cantunwind directive. */
3106
3107 static void
3108 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3109 {
3110 demand_empty_rest_of_line ();
3111 if (unwind.personality_routine || unwind.personality_index != -1)
3112 as_bad (_("personality routine specified for cantunwind frame"));
3113
3114 unwind.personality_index = -2;
3115 }
3116
3117
3118 /* Parse a personalityindex directive. */
3119
3120 static void
3121 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3122 {
3123 expressionS exp;
3124
3125 if (unwind.personality_routine || unwind.personality_index != -1)
3126 as_bad (_("duplicate .personalityindex directive"));
3127
3128 expression (&exp);
3129
3130 if (exp.X_op != O_constant
3131 || exp.X_add_number < 0 || exp.X_add_number > 15)
3132 {
3133 as_bad (_("bad personality routine number"));
3134 ignore_rest_of_line ();
3135 return;
3136 }
3137
3138 unwind.personality_index = exp.X_add_number;
3139
3140 demand_empty_rest_of_line ();
3141 }
3142
3143
3144 /* Parse a personality directive. */
3145
3146 static void
3147 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3148 {
3149 char *name, *p, c;
3150
3151 if (unwind.personality_routine || unwind.personality_index != -1)
3152 as_bad (_("duplicate .personality directive"));
3153
3154 name = input_line_pointer;
3155 c = get_symbol_end ();
3156 p = input_line_pointer;
3157 unwind.personality_routine = symbol_find_or_make (name);
3158 *p = c;
3159 demand_empty_rest_of_line ();
3160 }
3161
3162
3163 /* Parse a directive saving core registers. */
3164
3165 static void
3166 s_arm_unwind_save_core (void)
3167 {
3168 valueT op;
3169 long range;
3170 int n;
3171
3172 range = parse_reg_list (&input_line_pointer);
3173 if (range == FAIL)
3174 {
3175 as_bad (_("expected register list"));
3176 ignore_rest_of_line ();
3177 return;
3178 }
3179
3180 demand_empty_rest_of_line ();
3181
3182 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3183 into .unwind_save {..., sp...}. We aren't bothered about the value of
3184 ip because it is clobbered by calls. */
3185 if (unwind.sp_restored && unwind.fp_reg == 12
3186 && (range & 0x3000) == 0x1000)
3187 {
3188 unwind.opcode_count--;
3189 unwind.sp_restored = 0;
3190 range = (range | 0x2000) & ~0x1000;
3191 unwind.pending_offset = 0;
3192 }
3193
3194 /* Pop r4-r15. */
3195 if (range & 0xfff0)
3196 {
3197 /* See if we can use the short opcodes. These pop a block of up to 8
3198 registers starting with r4, plus maybe r14. */
3199 for (n = 0; n < 8; n++)
3200 {
3201 /* Break at the first non-saved register. */
3202 if ((range & (1 << (n + 4))) == 0)
3203 break;
3204 }
3205 /* See if there are any other bits set. */
3206 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3207 {
3208 /* Use the long form. */
3209 op = 0x8000 | ((range >> 4) & 0xfff);
3210 add_unwind_opcode (op, 2);
3211 }
3212 else
3213 {
3214 /* Use the short form. */
3215 if (range & 0x4000)
3216 op = 0xa8; /* Pop r14. */
3217 else
3218 op = 0xa0; /* Do not pop r14. */
3219 op |= (n - 1);
3220 add_unwind_opcode (op, 1);
3221 }
3222 }
3223
3224 /* Pop r0-r3. */
3225 if (range & 0xf)
3226 {
3227 op = 0xb100 | (range & 0xf);
3228 add_unwind_opcode (op, 2);
3229 }
3230
3231 /* Record the number of bytes pushed. */
3232 for (n = 0; n < 16; n++)
3233 {
3234 if (range & (1 << n))
3235 unwind.frame_size += 4;
3236 }
3237 }
3238
3239
3240 /* Parse a directive saving FPA registers. */
3241
3242 static void
3243 s_arm_unwind_save_fpa (int reg)
3244 {
3245 expressionS exp;
3246 int num_regs;
3247 valueT op;
3248
3249 /* Get Number of registers to transfer. */
3250 if (skip_past_comma (&input_line_pointer) != FAIL)
3251 expression (&exp);
3252 else
3253 exp.X_op = O_illegal;
3254
3255 if (exp.X_op != O_constant)
3256 {
3257 as_bad (_("expected , <constant>"));
3258 ignore_rest_of_line ();
3259 return;
3260 }
3261
3262 num_regs = exp.X_add_number;
3263
3264 if (num_regs < 1 || num_regs > 4)
3265 {
3266 as_bad (_("number of registers must be in the range [1:4]"));
3267 ignore_rest_of_line ();
3268 return;
3269 }
3270
3271 demand_empty_rest_of_line ();
3272
3273 if (reg == 4)
3274 {
3275 /* Short form. */
3276 op = 0xb4 | (num_regs - 1);
3277 add_unwind_opcode (op, 1);
3278 }
3279 else
3280 {
3281 /* Long form. */
3282 op = 0xc800 | (reg << 4) | (num_regs - 1);
3283 add_unwind_opcode (op, 2);
3284 }
3285 unwind.frame_size += num_regs * 12;
3286 }
3287
3288
3289 /* Parse a directive saving VFP registers for ARMv6 and above. */
3290
3291 static void
3292 s_arm_unwind_save_vfp_armv6 (void)
3293 {
3294 int count;
3295 unsigned int start;
3296 valueT op;
3297 int num_vfpv3_regs = 0;
3298 int num_regs_below_16;
3299
3300 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3301 if (count == FAIL)
3302 {
3303 as_bad (_("expected register list"));
3304 ignore_rest_of_line ();
3305 return;
3306 }
3307
3308 demand_empty_rest_of_line ();
3309
3310 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3311 than FSTMX/FLDMX-style ones). */
3312
3313 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3314 if (start >= 16)
3315 num_vfpv3_regs = count;
3316 else if (start + count > 16)
3317 num_vfpv3_regs = start + count - 16;
3318
3319 if (num_vfpv3_regs > 0)
3320 {
3321 int start_offset = start > 16 ? start - 16 : 0;
3322 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3323 add_unwind_opcode (op, 2);
3324 }
3325
3326 /* Generate opcode for registers numbered in the range 0 .. 15. */
3327 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3328 assert (num_regs_below_16 + num_vfpv3_regs == count);
3329 if (num_regs_below_16 > 0)
3330 {
3331 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3332 add_unwind_opcode (op, 2);
3333 }
3334
3335 unwind.frame_size += count * 8;
3336 }
3337
3338
3339 /* Parse a directive saving VFP registers for pre-ARMv6. */
3340
3341 static void
3342 s_arm_unwind_save_vfp (void)
3343 {
3344 int count;
3345 unsigned int reg;
3346 valueT op;
3347
3348 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3349 if (count == FAIL)
3350 {
3351 as_bad (_("expected register list"));
3352 ignore_rest_of_line ();
3353 return;
3354 }
3355
3356 demand_empty_rest_of_line ();
3357
3358 if (reg == 8)
3359 {
3360 /* Short form. */
3361 op = 0xb8 | (count - 1);
3362 add_unwind_opcode (op, 1);
3363 }
3364 else
3365 {
3366 /* Long form. */
3367 op = 0xb300 | (reg << 4) | (count - 1);
3368 add_unwind_opcode (op, 2);
3369 }
3370 unwind.frame_size += count * 8 + 4;
3371 }
3372
3373
3374 /* Parse a directive saving iWMMXt data registers. */
3375
3376 static void
3377 s_arm_unwind_save_mmxwr (void)
3378 {
3379 int reg;
3380 int hi_reg;
3381 int i;
3382 unsigned mask = 0;
3383 valueT op;
3384
3385 if (*input_line_pointer == '{')
3386 input_line_pointer++;
3387
3388 do
3389 {
3390 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3391
3392 if (reg == FAIL)
3393 {
3394 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3395 goto error;
3396 }
3397
3398 if (mask >> reg)
3399 as_tsktsk (_("register list not in ascending order"));
3400 mask |= 1 << reg;
3401
3402 if (*input_line_pointer == '-')
3403 {
3404 input_line_pointer++;
3405 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3406 if (hi_reg == FAIL)
3407 {
3408 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3409 goto error;
3410 }
3411 else if (reg >= hi_reg)
3412 {
3413 as_bad (_("bad register range"));
3414 goto error;
3415 }
3416 for (; reg < hi_reg; reg++)
3417 mask |= 1 << reg;
3418 }
3419 }
3420 while (skip_past_comma (&input_line_pointer) != FAIL);
3421
3422 if (*input_line_pointer == '}')
3423 input_line_pointer++;
3424
3425 demand_empty_rest_of_line ();
3426
3427 /* Generate any deferred opcodes because we're going to be looking at
3428 the list. */
3429 flush_pending_unwind ();
3430
3431 for (i = 0; i < 16; i++)
3432 {
3433 if (mask & (1 << i))
3434 unwind.frame_size += 8;
3435 }
3436
3437 /* Attempt to combine with a previous opcode. We do this because gcc
3438 likes to output separate unwind directives for a single block of
3439 registers. */
3440 if (unwind.opcode_count > 0)
3441 {
3442 i = unwind.opcodes[unwind.opcode_count - 1];
3443 if ((i & 0xf8) == 0xc0)
3444 {
3445 i &= 7;
3446 /* Only merge if the blocks are contiguous. */
3447 if (i < 6)
3448 {
3449 if ((mask & 0xfe00) == (1 << 9))
3450 {
3451 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3452 unwind.opcode_count--;
3453 }
3454 }
3455 else if (i == 6 && unwind.opcode_count >= 2)
3456 {
3457 i = unwind.opcodes[unwind.opcode_count - 2];
3458 reg = i >> 4;
3459 i &= 0xf;
3460
3461 op = 0xffff << (reg - 1);
3462 if (reg > 0
3463 && ((mask & op) == (1u << (reg - 1))))
3464 {
3465 op = (1 << (reg + i + 1)) - 1;
3466 op &= ~((1 << reg) - 1);
3467 mask |= op;
3468 unwind.opcode_count -= 2;
3469 }
3470 }
3471 }
3472 }
3473
3474 hi_reg = 15;
3475 /* We want to generate opcodes in the order the registers have been
3476 saved, ie. descending order. */
3477 for (reg = 15; reg >= -1; reg--)
3478 {
3479 /* Save registers in blocks. */
3480 if (reg < 0
3481 || !(mask & (1 << reg)))
3482 {
3483 /* We found an unsaved reg. Generate opcodes to save the
3484 preceeding block. */
3485 if (reg != hi_reg)
3486 {
3487 if (reg == 9)
3488 {
3489 /* Short form. */
3490 op = 0xc0 | (hi_reg - 10);
3491 add_unwind_opcode (op, 1);
3492 }
3493 else
3494 {
3495 /* Long form. */
3496 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3497 add_unwind_opcode (op, 2);
3498 }
3499 }
3500 hi_reg = reg - 1;
3501 }
3502 }
3503
3504 return;
3505 error:
3506 ignore_rest_of_line ();
3507 }
3508
3509 static void
3510 s_arm_unwind_save_mmxwcg (void)
3511 {
3512 int reg;
3513 int hi_reg;
3514 unsigned mask = 0;
3515 valueT op;
3516
3517 if (*input_line_pointer == '{')
3518 input_line_pointer++;
3519
3520 do
3521 {
3522 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3523
3524 if (reg == FAIL)
3525 {
3526 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3527 goto error;
3528 }
3529
3530 reg -= 8;
3531 if (mask >> reg)
3532 as_tsktsk (_("register list not in ascending order"));
3533 mask |= 1 << reg;
3534
3535 if (*input_line_pointer == '-')
3536 {
3537 input_line_pointer++;
3538 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3539 if (hi_reg == FAIL)
3540 {
3541 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3542 goto error;
3543 }
3544 else if (reg >= hi_reg)
3545 {
3546 as_bad (_("bad register range"));
3547 goto error;
3548 }
3549 for (; reg < hi_reg; reg++)
3550 mask |= 1 << reg;
3551 }
3552 }
3553 while (skip_past_comma (&input_line_pointer) != FAIL);
3554
3555 if (*input_line_pointer == '}')
3556 input_line_pointer++;
3557
3558 demand_empty_rest_of_line ();
3559
3560 /* Generate any deferred opcodes because we're going to be looking at
3561 the list. */
3562 flush_pending_unwind ();
3563
3564 for (reg = 0; reg < 16; reg++)
3565 {
3566 if (mask & (1 << reg))
3567 unwind.frame_size += 4;
3568 }
3569 op = 0xc700 | mask;
3570 add_unwind_opcode (op, 2);
3571 return;
3572 error:
3573 ignore_rest_of_line ();
3574 }
3575
3576
3577 /* Parse an unwind_save directive.
3578 If the argument is non-zero, this is a .vsave directive. */
3579
3580 static void
3581 s_arm_unwind_save (int arch_v6)
3582 {
3583 char *peek;
3584 struct reg_entry *reg;
3585 bfd_boolean had_brace = FALSE;
3586
3587 /* Figure out what sort of save we have. */
3588 peek = input_line_pointer;
3589
3590 if (*peek == '{')
3591 {
3592 had_brace = TRUE;
3593 peek++;
3594 }
3595
3596 reg = arm_reg_parse_multi (&peek);
3597
3598 if (!reg)
3599 {
3600 as_bad (_("register expected"));
3601 ignore_rest_of_line ();
3602 return;
3603 }
3604
3605 switch (reg->type)
3606 {
3607 case REG_TYPE_FN:
3608 if (had_brace)
3609 {
3610 as_bad (_("FPA .unwind_save does not take a register list"));
3611 ignore_rest_of_line ();
3612 return;
3613 }
3614 s_arm_unwind_save_fpa (reg->number);
3615 return;
3616
3617 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3618 case REG_TYPE_VFD:
3619 if (arch_v6)
3620 s_arm_unwind_save_vfp_armv6 ();
3621 else
3622 s_arm_unwind_save_vfp ();
3623 return;
3624 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3625 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3626
3627 default:
3628 as_bad (_(".unwind_save does not support this kind of register"));
3629 ignore_rest_of_line ();
3630 }
3631 }
3632
3633
3634 /* Parse an unwind_movsp directive. */
3635
3636 static void
3637 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3638 {
3639 int reg;
3640 valueT op;
3641 int offset;
3642
3643 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3644 if (reg == FAIL)
3645 {
3646 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3647 ignore_rest_of_line ();
3648 return;
3649 }
3650
3651 /* Optional constant. */
3652 if (skip_past_comma (&input_line_pointer) != FAIL)
3653 {
3654 if (immediate_for_directive (&offset) == FAIL)
3655 return;
3656 }
3657 else
3658 offset = 0;
3659
3660 demand_empty_rest_of_line ();
3661
3662 if (reg == REG_SP || reg == REG_PC)
3663 {
3664 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3665 return;
3666 }
3667
3668 if (unwind.fp_reg != REG_SP)
3669 as_bad (_("unexpected .unwind_movsp directive"));
3670
3671 /* Generate opcode to restore the value. */
3672 op = 0x90 | reg;
3673 add_unwind_opcode (op, 1);
3674
3675 /* Record the information for later. */
3676 unwind.fp_reg = reg;
3677 unwind.fp_offset = unwind.frame_size - offset;
3678 unwind.sp_restored = 1;
3679 }
3680
3681 /* Parse an unwind_pad directive. */
3682
3683 static void
3684 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3685 {
3686 int offset;
3687
3688 if (immediate_for_directive (&offset) == FAIL)
3689 return;
3690
3691 if (offset & 3)
3692 {
3693 as_bad (_("stack increment must be multiple of 4"));
3694 ignore_rest_of_line ();
3695 return;
3696 }
3697
3698 /* Don't generate any opcodes, just record the details for later. */
3699 unwind.frame_size += offset;
3700 unwind.pending_offset += offset;
3701
3702 demand_empty_rest_of_line ();
3703 }
3704
3705 /* Parse an unwind_setfp directive. */
3706
3707 static void
3708 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3709 {
3710 int sp_reg;
3711 int fp_reg;
3712 int offset;
3713
3714 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3715 if (skip_past_comma (&input_line_pointer) == FAIL)
3716 sp_reg = FAIL;
3717 else
3718 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3719
3720 if (fp_reg == FAIL || sp_reg == FAIL)
3721 {
3722 as_bad (_("expected <reg>, <reg>"));
3723 ignore_rest_of_line ();
3724 return;
3725 }
3726
3727 /* Optional constant. */
3728 if (skip_past_comma (&input_line_pointer) != FAIL)
3729 {
3730 if (immediate_for_directive (&offset) == FAIL)
3731 return;
3732 }
3733 else
3734 offset = 0;
3735
3736 demand_empty_rest_of_line ();
3737
3738 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3739 {
3740 as_bad (_("register must be either sp or set by a previous"
3741 "unwind_movsp directive"));
3742 return;
3743 }
3744
3745 /* Don't generate any opcodes, just record the information for later. */
3746 unwind.fp_reg = fp_reg;
3747 unwind.fp_used = 1;
3748 if (sp_reg == 13)
3749 unwind.fp_offset = unwind.frame_size - offset;
3750 else
3751 unwind.fp_offset -= offset;
3752 }
3753
3754 /* Parse an unwind_raw directive. */
3755
3756 static void
3757 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3758 {
3759 expressionS exp;
3760 /* This is an arbitrary limit. */
3761 unsigned char op[16];
3762 int count;
3763
3764 expression (&exp);
3765 if (exp.X_op == O_constant
3766 && skip_past_comma (&input_line_pointer) != FAIL)
3767 {
3768 unwind.frame_size += exp.X_add_number;
3769 expression (&exp);
3770 }
3771 else
3772 exp.X_op = O_illegal;
3773
3774 if (exp.X_op != O_constant)
3775 {
3776 as_bad (_("expected <offset>, <opcode>"));
3777 ignore_rest_of_line ();
3778 return;
3779 }
3780
3781 count = 0;
3782
3783 /* Parse the opcode. */
3784 for (;;)
3785 {
3786 if (count >= 16)
3787 {
3788 as_bad (_("unwind opcode too long"));
3789 ignore_rest_of_line ();
3790 }
3791 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3792 {
3793 as_bad (_("invalid unwind opcode"));
3794 ignore_rest_of_line ();
3795 return;
3796 }
3797 op[count++] = exp.X_add_number;
3798
3799 /* Parse the next byte. */
3800 if (skip_past_comma (&input_line_pointer) == FAIL)
3801 break;
3802
3803 expression (&exp);
3804 }
3805
3806 /* Add the opcode bytes in reverse order. */
3807 while (count--)
3808 add_unwind_opcode (op[count], 1);
3809
3810 demand_empty_rest_of_line ();
3811 }
3812
3813
3814 /* Parse a .eabi_attribute directive. */
3815
3816 static void
3817 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3818 {
3819 expressionS exp;
3820 bfd_boolean is_string;
3821 int tag;
3822 unsigned int i = 0;
3823 char *s = NULL;
3824 char saved_char;
3825
3826 expression (& exp);
3827 if (exp.X_op != O_constant)
3828 goto bad;
3829
3830 tag = exp.X_add_number;
3831 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3832 is_string = 1;
3833 else
3834 is_string = 0;
3835
3836 if (skip_past_comma (&input_line_pointer) == FAIL)
3837 goto bad;
3838 if (tag == 32 || !is_string)
3839 {
3840 expression (& exp);
3841 if (exp.X_op != O_constant)
3842 {
3843 as_bad (_("expected numeric constant"));
3844 ignore_rest_of_line ();
3845 return;
3846 }
3847 i = exp.X_add_number;
3848 }
3849 if (tag == Tag_compatibility
3850 && skip_past_comma (&input_line_pointer) == FAIL)
3851 {
3852 as_bad (_("expected comma"));
3853 ignore_rest_of_line ();
3854 return;
3855 }
3856 if (is_string)
3857 {
3858 skip_whitespace(input_line_pointer);
3859 if (*input_line_pointer != '"')
3860 goto bad_string;
3861 input_line_pointer++;
3862 s = input_line_pointer;
3863 while (*input_line_pointer && *input_line_pointer != '"')
3864 input_line_pointer++;
3865 if (*input_line_pointer != '"')
3866 goto bad_string;
3867 saved_char = *input_line_pointer;
3868 *input_line_pointer = 0;
3869 }
3870 else
3871 {
3872 s = NULL;
3873 saved_char = 0;
3874 }
3875
3876 if (tag == Tag_compatibility)
3877 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3878 else if (is_string)
3879 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3880 else
3881 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3882
3883 if (s)
3884 {
3885 *input_line_pointer = saved_char;
3886 input_line_pointer++;
3887 }
3888 demand_empty_rest_of_line ();
3889 return;
3890 bad_string:
3891 as_bad (_("bad string constant"));
3892 ignore_rest_of_line ();
3893 return;
3894 bad:
3895 as_bad (_("expected <tag> , <value>"));
3896 ignore_rest_of_line ();
3897 }
3898 #endif /* OBJ_ELF */
3899
3900 static void s_arm_arch (int);
3901 static void s_arm_object_arch (int);
3902 static void s_arm_cpu (int);
3903 static void s_arm_fpu (int);
3904
3905 #ifdef TE_PE
3906
3907 static void
3908 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3909 {
3910 expressionS exp;
3911
3912 do
3913 {
3914 expression (&exp);
3915 if (exp.X_op == O_symbol)
3916 exp.X_op = O_secrel;
3917
3918 emit_expr (&exp, 4);
3919 }
3920 while (*input_line_pointer++ == ',');
3921
3922 input_line_pointer--;
3923 demand_empty_rest_of_line ();
3924 }
3925 #endif /* TE_PE */
3926
3927 /* This table describes all the machine specific pseudo-ops the assembler
3928 has to support. The fields are:
3929 pseudo-op name without dot
3930 function to call to execute this pseudo-op
3931 Integer arg to pass to the function. */
3932
3933 const pseudo_typeS md_pseudo_table[] =
3934 {
3935 /* Never called because '.req' does not start a line. */
3936 { "req", s_req, 0 },
3937 /* Following two are likewise never called. */
3938 { "dn", s_dn, 0 },
3939 { "qn", s_qn, 0 },
3940 { "unreq", s_unreq, 0 },
3941 { "bss", s_bss, 0 },
3942 { "align", s_align, 0 },
3943 { "arm", s_arm, 0 },
3944 { "thumb", s_thumb, 0 },
3945 { "code", s_code, 0 },
3946 { "force_thumb", s_force_thumb, 0 },
3947 { "thumb_func", s_thumb_func, 0 },
3948 { "thumb_set", s_thumb_set, 0 },
3949 { "even", s_even, 0 },
3950 { "ltorg", s_ltorg, 0 },
3951 { "pool", s_ltorg, 0 },
3952 { "syntax", s_syntax, 0 },
3953 { "cpu", s_arm_cpu, 0 },
3954 { "arch", s_arm_arch, 0 },
3955 { "object_arch", s_arm_object_arch, 0 },
3956 { "fpu", s_arm_fpu, 0 },
3957 #ifdef OBJ_ELF
3958 { "word", s_arm_elf_cons, 4 },
3959 { "long", s_arm_elf_cons, 4 },
3960 { "rel31", s_arm_rel31, 0 },
3961 { "fnstart", s_arm_unwind_fnstart, 0 },
3962 { "fnend", s_arm_unwind_fnend, 0 },
3963 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3964 { "personality", s_arm_unwind_personality, 0 },
3965 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3966 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3967 { "save", s_arm_unwind_save, 0 },
3968 { "vsave", s_arm_unwind_save, 1 },
3969 { "movsp", s_arm_unwind_movsp, 0 },
3970 { "pad", s_arm_unwind_pad, 0 },
3971 { "setfp", s_arm_unwind_setfp, 0 },
3972 { "unwind_raw", s_arm_unwind_raw, 0 },
3973 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3974 #else
3975 { "word", cons, 4},
3976
3977 /* These are used for dwarf. */
3978 {"2byte", cons, 2},
3979 {"4byte", cons, 4},
3980 {"8byte", cons, 8},
3981 /* These are used for dwarf2. */
3982 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3983 { "loc", dwarf2_directive_loc, 0 },
3984 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3985 #endif
3986 { "extend", float_cons, 'x' },
3987 { "ldouble", float_cons, 'x' },
3988 { "packed", float_cons, 'p' },
3989 #ifdef TE_PE
3990 {"secrel32", pe_directive_secrel, 0},
3991 #endif
3992 { 0, 0, 0 }
3993 };
3994 \f
3995 /* Parser functions used exclusively in instruction operands. */
3996
3997 /* Generic immediate-value read function for use in insn parsing.
3998 STR points to the beginning of the immediate (the leading #);
3999 VAL receives the value; if the value is outside [MIN, MAX]
4000 issue an error. PREFIX_OPT is true if the immediate prefix is
4001 optional. */
4002
4003 static int
4004 parse_immediate (char **str, int *val, int min, int max,
4005 bfd_boolean prefix_opt)
4006 {
4007 expressionS exp;
4008 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4009 if (exp.X_op != O_constant)
4010 {
4011 inst.error = _("constant expression required");
4012 return FAIL;
4013 }
4014
4015 if (exp.X_add_number < min || exp.X_add_number > max)
4016 {
4017 inst.error = _("immediate value out of range");
4018 return FAIL;
4019 }
4020
4021 *val = exp.X_add_number;
4022 return SUCCESS;
4023 }
4024
4025 /* Less-generic immediate-value read function with the possibility of loading a
4026 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4027 instructions. Puts the result directly in inst.operands[i]. */
4028
4029 static int
4030 parse_big_immediate (char **str, int i)
4031 {
4032 expressionS exp;
4033 char *ptr = *str;
4034
4035 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4036
4037 if (exp.X_op == O_constant)
4038 {
4039 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4040 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4041 O_constant. We have to be careful not to break compilation for
4042 32-bit X_add_number, though. */
4043 if ((exp.X_add_number & ~0xffffffffl) != 0)
4044 {
4045 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4046 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4047 inst.operands[i].regisimm = 1;
4048 }
4049 }
4050 else if (exp.X_op == O_big
4051 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4052 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4053 {
4054 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4055 /* Bignums have their least significant bits in
4056 generic_bignum[0]. Make sure we put 32 bits in imm and
4057 32 bits in reg, in a (hopefully) portable way. */
4058 assert (parts != 0);
4059 inst.operands[i].imm = 0;
4060 for (j = 0; j < parts; j++, idx++)
4061 inst.operands[i].imm |= generic_bignum[idx]
4062 << (LITTLENUM_NUMBER_OF_BITS * j);
4063 inst.operands[i].reg = 0;
4064 for (j = 0; j < parts; j++, idx++)
4065 inst.operands[i].reg |= generic_bignum[idx]
4066 << (LITTLENUM_NUMBER_OF_BITS * j);
4067 inst.operands[i].regisimm = 1;
4068 }
4069 else
4070 return FAIL;
4071
4072 *str = ptr;
4073
4074 return SUCCESS;
4075 }
4076
4077 /* Returns the pseudo-register number of an FPA immediate constant,
4078 or FAIL if there isn't a valid constant here. */
4079
4080 static int
4081 parse_fpa_immediate (char ** str)
4082 {
4083 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4084 char * save_in;
4085 expressionS exp;
4086 int i;
4087 int j;
4088
4089 /* First try and match exact strings, this is to guarantee
4090 that some formats will work even for cross assembly. */
4091
4092 for (i = 0; fp_const[i]; i++)
4093 {
4094 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4095 {
4096 char *start = *str;
4097
4098 *str += strlen (fp_const[i]);
4099 if (is_end_of_line[(unsigned char) **str])
4100 return i + 8;
4101 *str = start;
4102 }
4103 }
4104
4105 /* Just because we didn't get a match doesn't mean that the constant
4106 isn't valid, just that it is in a format that we don't
4107 automatically recognize. Try parsing it with the standard
4108 expression routines. */
4109
4110 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4111
4112 /* Look for a raw floating point number. */
4113 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4114 && is_end_of_line[(unsigned char) *save_in])
4115 {
4116 for (i = 0; i < NUM_FLOAT_VALS; i++)
4117 {
4118 for (j = 0; j < MAX_LITTLENUMS; j++)
4119 {
4120 if (words[j] != fp_values[i][j])
4121 break;
4122 }
4123
4124 if (j == MAX_LITTLENUMS)
4125 {
4126 *str = save_in;
4127 return i + 8;
4128 }
4129 }
4130 }
4131
4132 /* Try and parse a more complex expression, this will probably fail
4133 unless the code uses a floating point prefix (eg "0f"). */
4134 save_in = input_line_pointer;
4135 input_line_pointer = *str;
4136 if (expression (&exp) == absolute_section
4137 && exp.X_op == O_big
4138 && exp.X_add_number < 0)
4139 {
4140 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4141 Ditto for 15. */
4142 if (gen_to_words (words, 5, (long) 15) == 0)
4143 {
4144 for (i = 0; i < NUM_FLOAT_VALS; i++)
4145 {
4146 for (j = 0; j < MAX_LITTLENUMS; j++)
4147 {
4148 if (words[j] != fp_values[i][j])
4149 break;
4150 }
4151
4152 if (j == MAX_LITTLENUMS)
4153 {
4154 *str = input_line_pointer;
4155 input_line_pointer = save_in;
4156 return i + 8;
4157 }
4158 }
4159 }
4160 }
4161
4162 *str = input_line_pointer;
4163 input_line_pointer = save_in;
4164 inst.error = _("invalid FPA immediate expression");
4165 return FAIL;
4166 }
4167
4168 /* Returns 1 if a number has "quarter-precision" float format
4169 0baBbbbbbc defgh000 00000000 00000000. */
4170
4171 static int
4172 is_quarter_float (unsigned imm)
4173 {
4174 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4175 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4176 }
4177
4178 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4179 0baBbbbbbc defgh000 00000000 00000000.
4180 The minus-zero case needs special handling, since it can't be encoded in the
4181 "quarter-precision" float format, but can nonetheless be loaded as an integer
4182 constant. */
4183
4184 static unsigned
4185 parse_qfloat_immediate (char **ccp, int *immed)
4186 {
4187 char *str = *ccp;
4188 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4189
4190 skip_past_char (&str, '#');
4191
4192 if ((str = atof_ieee (str, 's', words)) != NULL)
4193 {
4194 unsigned fpword = 0;
4195 int i;
4196
4197 /* Our FP word must be 32 bits (single-precision FP). */
4198 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4199 {
4200 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4201 fpword |= words[i];
4202 }
4203
4204 if (is_quarter_float (fpword) || fpword == 0x80000000)
4205 *immed = fpword;
4206 else
4207 return FAIL;
4208
4209 *ccp = str;
4210
4211 return SUCCESS;
4212 }
4213
4214 return FAIL;
4215 }
4216
4217 /* Shift operands. */
4218 enum shift_kind
4219 {
4220 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4221 };
4222
4223 struct asm_shift_name
4224 {
4225 const char *name;
4226 enum shift_kind kind;
4227 };
4228
4229 /* Third argument to parse_shift. */
4230 enum parse_shift_mode
4231 {
4232 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4233 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4234 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4235 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4236 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4237 };
4238
4239 /* Parse a <shift> specifier on an ARM data processing instruction.
4240 This has three forms:
4241
4242 (LSL|LSR|ASL|ASR|ROR) Rs
4243 (LSL|LSR|ASL|ASR|ROR) #imm
4244 RRX
4245
4246 Note that ASL is assimilated to LSL in the instruction encoding, and
4247 RRX to ROR #0 (which cannot be written as such). */
4248
4249 static int
4250 parse_shift (char **str, int i, enum parse_shift_mode mode)
4251 {
4252 const struct asm_shift_name *shift_name;
4253 enum shift_kind shift;
4254 char *s = *str;
4255 char *p = s;
4256 int reg;
4257
4258 for (p = *str; ISALPHA (*p); p++)
4259 ;
4260
4261 if (p == *str)
4262 {
4263 inst.error = _("shift expression expected");
4264 return FAIL;
4265 }
4266
4267 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4268
4269 if (shift_name == NULL)
4270 {
4271 inst.error = _("shift expression expected");
4272 return FAIL;
4273 }
4274
4275 shift = shift_name->kind;
4276
4277 switch (mode)
4278 {
4279 case NO_SHIFT_RESTRICT:
4280 case SHIFT_IMMEDIATE: break;
4281
4282 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4283 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4284 {
4285 inst.error = _("'LSL' or 'ASR' required");
4286 return FAIL;
4287 }
4288 break;
4289
4290 case SHIFT_LSL_IMMEDIATE:
4291 if (shift != SHIFT_LSL)
4292 {
4293 inst.error = _("'LSL' required");
4294 return FAIL;
4295 }
4296 break;
4297
4298 case SHIFT_ASR_IMMEDIATE:
4299 if (shift != SHIFT_ASR)
4300 {
4301 inst.error = _("'ASR' required");
4302 return FAIL;
4303 }
4304 break;
4305
4306 default: abort ();
4307 }
4308
4309 if (shift != SHIFT_RRX)
4310 {
4311 /* Whitespace can appear here if the next thing is a bare digit. */
4312 skip_whitespace (p);
4313
4314 if (mode == NO_SHIFT_RESTRICT
4315 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4316 {
4317 inst.operands[i].imm = reg;
4318 inst.operands[i].immisreg = 1;
4319 }
4320 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4321 return FAIL;
4322 }
4323 inst.operands[i].shift_kind = shift;
4324 inst.operands[i].shifted = 1;
4325 *str = p;
4326 return SUCCESS;
4327 }
4328
4329 /* Parse a <shifter_operand> for an ARM data processing instruction:
4330
4331 #<immediate>
4332 #<immediate>, <rotate>
4333 <Rm>
4334 <Rm>, <shift>
4335
4336 where <shift> is defined by parse_shift above, and <rotate> is a
4337 multiple of 2 between 0 and 30. Validation of immediate operands
4338 is deferred to md_apply_fix. */
4339
4340 static int
4341 parse_shifter_operand (char **str, int i)
4342 {
4343 int value;
4344 expressionS expr;
4345
4346 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4347 {
4348 inst.operands[i].reg = value;
4349 inst.operands[i].isreg = 1;
4350
4351 /* parse_shift will override this if appropriate */
4352 inst.reloc.exp.X_op = O_constant;
4353 inst.reloc.exp.X_add_number = 0;
4354
4355 if (skip_past_comma (str) == FAIL)
4356 return SUCCESS;
4357
4358 /* Shift operation on register. */
4359 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4360 }
4361
4362 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4363 return FAIL;
4364
4365 if (skip_past_comma (str) == SUCCESS)
4366 {
4367 /* #x, y -- ie explicit rotation by Y. */
4368 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4369 return FAIL;
4370
4371 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4372 {
4373 inst.error = _("constant expression expected");
4374 return FAIL;
4375 }
4376
4377 value = expr.X_add_number;
4378 if (value < 0 || value > 30 || value % 2 != 0)
4379 {
4380 inst.error = _("invalid rotation");
4381 return FAIL;
4382 }
4383 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4384 {
4385 inst.error = _("invalid constant");
4386 return FAIL;
4387 }
4388
4389 /* Convert to decoded value. md_apply_fix will put it back. */
4390 inst.reloc.exp.X_add_number
4391 = (((inst.reloc.exp.X_add_number << (32 - value))
4392 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4393 }
4394
4395 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4396 inst.reloc.pc_rel = 0;
4397 return SUCCESS;
4398 }
4399
4400 /* Group relocation information. Each entry in the table contains the
4401 textual name of the relocation as may appear in assembler source
4402 and must end with a colon.
4403 Along with this textual name are the relocation codes to be used if
4404 the corresponding instruction is an ALU instruction (ADD or SUB only),
4405 an LDR, an LDRS, or an LDC. */
4406
4407 struct group_reloc_table_entry
4408 {
4409 const char *name;
4410 int alu_code;
4411 int ldr_code;
4412 int ldrs_code;
4413 int ldc_code;
4414 };
4415
4416 typedef enum
4417 {
4418 /* Varieties of non-ALU group relocation. */
4419
4420 GROUP_LDR,
4421 GROUP_LDRS,
4422 GROUP_LDC
4423 } group_reloc_type;
4424
4425 static struct group_reloc_table_entry group_reloc_table[] =
4426 { /* Program counter relative: */
4427 { "pc_g0_nc",
4428 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4429 0, /* LDR */
4430 0, /* LDRS */
4431 0 }, /* LDC */
4432 { "pc_g0",
4433 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4434 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4435 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4436 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4437 { "pc_g1_nc",
4438 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4439 0, /* LDR */
4440 0, /* LDRS */
4441 0 }, /* LDC */
4442 { "pc_g1",
4443 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4444 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4445 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4446 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4447 { "pc_g2",
4448 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4449 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4450 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4451 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4452 /* Section base relative */
4453 { "sb_g0_nc",
4454 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4455 0, /* LDR */
4456 0, /* LDRS */
4457 0 }, /* LDC */
4458 { "sb_g0",
4459 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4460 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4461 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4462 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4463 { "sb_g1_nc",
4464 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4465 0, /* LDR */
4466 0, /* LDRS */
4467 0 }, /* LDC */
4468 { "sb_g1",
4469 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4470 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4471 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4472 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4473 { "sb_g2",
4474 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4475 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4476 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4477 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4478
4479 /* Given the address of a pointer pointing to the textual name of a group
4480 relocation as may appear in assembler source, attempt to find its details
4481 in group_reloc_table. The pointer will be updated to the character after
4482 the trailing colon. On failure, FAIL will be returned; SUCCESS
4483 otherwise. On success, *entry will be updated to point at the relevant
4484 group_reloc_table entry. */
4485
4486 static int
4487 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4488 {
4489 unsigned int i;
4490 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4491 {
4492 int length = strlen (group_reloc_table[i].name);
4493
4494 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4495 (*str)[length] == ':')
4496 {
4497 *out = &group_reloc_table[i];
4498 *str += (length + 1);
4499 return SUCCESS;
4500 }
4501 }
4502
4503 return FAIL;
4504 }
4505
4506 /* Parse a <shifter_operand> for an ARM data processing instruction
4507 (as for parse_shifter_operand) where group relocations are allowed:
4508
4509 #<immediate>
4510 #<immediate>, <rotate>
4511 #:<group_reloc>:<expression>
4512 <Rm>
4513 <Rm>, <shift>
4514
4515 where <group_reloc> is one of the strings defined in group_reloc_table.
4516 The hashes are optional.
4517
4518 Everything else is as for parse_shifter_operand. */
4519
4520 static parse_operand_result
4521 parse_shifter_operand_group_reloc (char **str, int i)
4522 {
4523 /* Determine if we have the sequence of characters #: or just :
4524 coming next. If we do, then we check for a group relocation.
4525 If we don't, punt the whole lot to parse_shifter_operand. */
4526
4527 if (((*str)[0] == '#' && (*str)[1] == ':')
4528 || (*str)[0] == ':')
4529 {
4530 struct group_reloc_table_entry *entry;
4531
4532 if ((*str)[0] == '#')
4533 (*str) += 2;
4534 else
4535 (*str)++;
4536
4537 /* Try to parse a group relocation. Anything else is an error. */
4538 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4539 {
4540 inst.error = _("unknown group relocation");
4541 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4542 }
4543
4544 /* We now have the group relocation table entry corresponding to
4545 the name in the assembler source. Next, we parse the expression. */
4546 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4547 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4548
4549 /* Record the relocation type (always the ALU variant here). */
4550 inst.reloc.type = entry->alu_code;
4551 assert (inst.reloc.type != 0);
4552
4553 return PARSE_OPERAND_SUCCESS;
4554 }
4555 else
4556 return parse_shifter_operand (str, i) == SUCCESS
4557 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4558
4559 /* Never reached. */
4560 }
4561
4562 /* Parse all forms of an ARM address expression. Information is written
4563 to inst.operands[i] and/or inst.reloc.
4564
4565 Preindexed addressing (.preind=1):
4566
4567 [Rn, #offset] .reg=Rn .reloc.exp=offset
4568 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4569 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4570 .shift_kind=shift .reloc.exp=shift_imm
4571
4572 These three may have a trailing ! which causes .writeback to be set also.
4573
4574 Postindexed addressing (.postind=1, .writeback=1):
4575
4576 [Rn], #offset .reg=Rn .reloc.exp=offset
4577 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4578 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4579 .shift_kind=shift .reloc.exp=shift_imm
4580
4581 Unindexed addressing (.preind=0, .postind=0):
4582
4583 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4584
4585 Other:
4586
4587 [Rn]{!} shorthand for [Rn,#0]{!}
4588 =immediate .isreg=0 .reloc.exp=immediate
4589 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4590
4591 It is the caller's responsibility to check for addressing modes not
4592 supported by the instruction, and to set inst.reloc.type. */
4593
4594 static parse_operand_result
4595 parse_address_main (char **str, int i, int group_relocations,
4596 group_reloc_type group_type)
4597 {
4598 char *p = *str;
4599 int reg;
4600
4601 if (skip_past_char (&p, '[') == FAIL)
4602 {
4603 if (skip_past_char (&p, '=') == FAIL)
4604 {
4605 /* bare address - translate to PC-relative offset */
4606 inst.reloc.pc_rel = 1;
4607 inst.operands[i].reg = REG_PC;
4608 inst.operands[i].isreg = 1;
4609 inst.operands[i].preind = 1;
4610 }
4611 /* else a load-constant pseudo op, no special treatment needed here */
4612
4613 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4614 return PARSE_OPERAND_FAIL;
4615
4616 *str = p;
4617 return PARSE_OPERAND_SUCCESS;
4618 }
4619
4620 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4621 {
4622 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4623 return PARSE_OPERAND_FAIL;
4624 }
4625 inst.operands[i].reg = reg;
4626 inst.operands[i].isreg = 1;
4627
4628 if (skip_past_comma (&p) == SUCCESS)
4629 {
4630 inst.operands[i].preind = 1;
4631
4632 if (*p == '+') p++;
4633 else if (*p == '-') p++, inst.operands[i].negative = 1;
4634
4635 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4636 {
4637 inst.operands[i].imm = reg;
4638 inst.operands[i].immisreg = 1;
4639
4640 if (skip_past_comma (&p) == SUCCESS)
4641 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4642 return PARSE_OPERAND_FAIL;
4643 }
4644 else if (skip_past_char (&p, ':') == SUCCESS)
4645 {
4646 /* FIXME: '@' should be used here, but it's filtered out by generic
4647 code before we get to see it here. This may be subject to
4648 change. */
4649 expressionS exp;
4650 my_get_expression (&exp, &p, GE_NO_PREFIX);
4651 if (exp.X_op != O_constant)
4652 {
4653 inst.error = _("alignment must be constant");
4654 return PARSE_OPERAND_FAIL;
4655 }
4656 inst.operands[i].imm = exp.X_add_number << 8;
4657 inst.operands[i].immisalign = 1;
4658 /* Alignments are not pre-indexes. */
4659 inst.operands[i].preind = 0;
4660 }
4661 else
4662 {
4663 if (inst.operands[i].negative)
4664 {
4665 inst.operands[i].negative = 0;
4666 p--;
4667 }
4668
4669 if (group_relocations &&
4670 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4671
4672 {
4673 struct group_reloc_table_entry *entry;
4674
4675 /* Skip over the #: or : sequence. */
4676 if (*p == '#')
4677 p += 2;
4678 else
4679 p++;
4680
4681 /* Try to parse a group relocation. Anything else is an
4682 error. */
4683 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4684 {
4685 inst.error = _("unknown group relocation");
4686 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4687 }
4688
4689 /* We now have the group relocation table entry corresponding to
4690 the name in the assembler source. Next, we parse the
4691 expression. */
4692 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4693 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4694
4695 /* Record the relocation type. */
4696 switch (group_type)
4697 {
4698 case GROUP_LDR:
4699 inst.reloc.type = entry->ldr_code;
4700 break;
4701
4702 case GROUP_LDRS:
4703 inst.reloc.type = entry->ldrs_code;
4704 break;
4705
4706 case GROUP_LDC:
4707 inst.reloc.type = entry->ldc_code;
4708 break;
4709
4710 default:
4711 assert (0);
4712 }
4713
4714 if (inst.reloc.type == 0)
4715 {
4716 inst.error = _("this group relocation is not allowed on this instruction");
4717 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4718 }
4719 }
4720 else
4721 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4722 return PARSE_OPERAND_FAIL;
4723 }
4724 }
4725
4726 if (skip_past_char (&p, ']') == FAIL)
4727 {
4728 inst.error = _("']' expected");
4729 return PARSE_OPERAND_FAIL;
4730 }
4731
4732 if (skip_past_char (&p, '!') == SUCCESS)
4733 inst.operands[i].writeback = 1;
4734
4735 else if (skip_past_comma (&p) == SUCCESS)
4736 {
4737 if (skip_past_char (&p, '{') == SUCCESS)
4738 {
4739 /* [Rn], {expr} - unindexed, with option */
4740 if (parse_immediate (&p, &inst.operands[i].imm,
4741 0, 255, TRUE) == FAIL)
4742 return PARSE_OPERAND_FAIL;
4743
4744 if (skip_past_char (&p, '}') == FAIL)
4745 {
4746 inst.error = _("'}' expected at end of 'option' field");
4747 return PARSE_OPERAND_FAIL;
4748 }
4749 if (inst.operands[i].preind)
4750 {
4751 inst.error = _("cannot combine index with option");
4752 return PARSE_OPERAND_FAIL;
4753 }
4754 *str = p;
4755 return PARSE_OPERAND_SUCCESS;
4756 }
4757 else
4758 {
4759 inst.operands[i].postind = 1;
4760 inst.operands[i].writeback = 1;
4761
4762 if (inst.operands[i].preind)
4763 {
4764 inst.error = _("cannot combine pre- and post-indexing");
4765 return PARSE_OPERAND_FAIL;
4766 }
4767
4768 if (*p == '+') p++;
4769 else if (*p == '-') p++, inst.operands[i].negative = 1;
4770
4771 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4772 {
4773 /* We might be using the immediate for alignment already. If we
4774 are, OR the register number into the low-order bits. */
4775 if (inst.operands[i].immisalign)
4776 inst.operands[i].imm |= reg;
4777 else
4778 inst.operands[i].imm = reg;
4779 inst.operands[i].immisreg = 1;
4780
4781 if (skip_past_comma (&p) == SUCCESS)
4782 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4783 return PARSE_OPERAND_FAIL;
4784 }
4785 else
4786 {
4787 if (inst.operands[i].negative)
4788 {
4789 inst.operands[i].negative = 0;
4790 p--;
4791 }
4792 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4793 return PARSE_OPERAND_FAIL;
4794 }
4795 }
4796 }
4797
4798 /* If at this point neither .preind nor .postind is set, we have a
4799 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4800 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4801 {
4802 inst.operands[i].preind = 1;
4803 inst.reloc.exp.X_op = O_constant;
4804 inst.reloc.exp.X_add_number = 0;
4805 }
4806 *str = p;
4807 return PARSE_OPERAND_SUCCESS;
4808 }
4809
4810 static int
4811 parse_address (char **str, int i)
4812 {
4813 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4814 ? SUCCESS : FAIL;
4815 }
4816
4817 static parse_operand_result
4818 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4819 {
4820 return parse_address_main (str, i, 1, type);
4821 }
4822
4823 /* Parse an operand for a MOVW or MOVT instruction. */
4824 static int
4825 parse_half (char **str)
4826 {
4827 char * p;
4828
4829 p = *str;
4830 skip_past_char (&p, '#');
4831 if (strncasecmp (p, ":lower16:", 9) == 0)
4832 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4833 else if (strncasecmp (p, ":upper16:", 9) == 0)
4834 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4835
4836 if (inst.reloc.type != BFD_RELOC_UNUSED)
4837 {
4838 p += 9;
4839 skip_whitespace(p);
4840 }
4841
4842 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4843 return FAIL;
4844
4845 if (inst.reloc.type == BFD_RELOC_UNUSED)
4846 {
4847 if (inst.reloc.exp.X_op != O_constant)
4848 {
4849 inst.error = _("constant expression expected");
4850 return FAIL;
4851 }
4852 if (inst.reloc.exp.X_add_number < 0
4853 || inst.reloc.exp.X_add_number > 0xffff)
4854 {
4855 inst.error = _("immediate value out of range");
4856 return FAIL;
4857 }
4858 }
4859 *str = p;
4860 return SUCCESS;
4861 }
4862
4863 /* Miscellaneous. */
4864
4865 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4866 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4867 static int
4868 parse_psr (char **str)
4869 {
4870 char *p;
4871 unsigned long psr_field;
4872 const struct asm_psr *psr;
4873 char *start;
4874
4875 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4876 feature for ease of use and backwards compatibility. */
4877 p = *str;
4878 if (strncasecmp (p, "SPSR", 4) == 0)
4879 psr_field = SPSR_BIT;
4880 else if (strncasecmp (p, "CPSR", 4) == 0)
4881 psr_field = 0;
4882 else
4883 {
4884 start = p;
4885 do
4886 p++;
4887 while (ISALNUM (*p) || *p == '_');
4888
4889 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4890 if (!psr)
4891 return FAIL;
4892
4893 *str = p;
4894 return psr->field;
4895 }
4896
4897 p += 4;
4898 if (*p == '_')
4899 {
4900 /* A suffix follows. */
4901 p++;
4902 start = p;
4903
4904 do
4905 p++;
4906 while (ISALNUM (*p) || *p == '_');
4907
4908 psr = hash_find_n (arm_psr_hsh, start, p - start);
4909 if (!psr)
4910 goto error;
4911
4912 psr_field |= psr->field;
4913 }
4914 else
4915 {
4916 if (ISALNUM (*p))
4917 goto error; /* Garbage after "[CS]PSR". */
4918
4919 psr_field |= (PSR_c | PSR_f);
4920 }
4921 *str = p;
4922 return psr_field;
4923
4924 error:
4925 inst.error = _("flag for {c}psr instruction expected");
4926 return FAIL;
4927 }
4928
4929 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4930 value suitable for splatting into the AIF field of the instruction. */
4931
4932 static int
4933 parse_cps_flags (char **str)
4934 {
4935 int val = 0;
4936 int saw_a_flag = 0;
4937 char *s = *str;
4938
4939 for (;;)
4940 switch (*s++)
4941 {
4942 case '\0': case ',':
4943 goto done;
4944
4945 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4946 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4947 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4948
4949 default:
4950 inst.error = _("unrecognized CPS flag");
4951 return FAIL;
4952 }
4953
4954 done:
4955 if (saw_a_flag == 0)
4956 {
4957 inst.error = _("missing CPS flags");
4958 return FAIL;
4959 }
4960
4961 *str = s - 1;
4962 return val;
4963 }
4964
4965 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4966 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4967
4968 static int
4969 parse_endian_specifier (char **str)
4970 {
4971 int little_endian;
4972 char *s = *str;
4973
4974 if (strncasecmp (s, "BE", 2))
4975 little_endian = 0;
4976 else if (strncasecmp (s, "LE", 2))
4977 little_endian = 1;
4978 else
4979 {
4980 inst.error = _("valid endian specifiers are be or le");
4981 return FAIL;
4982 }
4983
4984 if (ISALNUM (s[2]) || s[2] == '_')
4985 {
4986 inst.error = _("valid endian specifiers are be or le");
4987 return FAIL;
4988 }
4989
4990 *str = s + 2;
4991 return little_endian;
4992 }
4993
4994 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4995 value suitable for poking into the rotate field of an sxt or sxta
4996 instruction, or FAIL on error. */
4997
4998 static int
4999 parse_ror (char **str)
5000 {
5001 int rot;
5002 char *s = *str;
5003
5004 if (strncasecmp (s, "ROR", 3) == 0)
5005 s += 3;
5006 else
5007 {
5008 inst.error = _("missing rotation field after comma");
5009 return FAIL;
5010 }
5011
5012 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5013 return FAIL;
5014
5015 switch (rot)
5016 {
5017 case 0: *str = s; return 0x0;
5018 case 8: *str = s; return 0x1;
5019 case 16: *str = s; return 0x2;
5020 case 24: *str = s; return 0x3;
5021
5022 default:
5023 inst.error = _("rotation can only be 0, 8, 16, or 24");
5024 return FAIL;
5025 }
5026 }
5027
5028 /* Parse a conditional code (from conds[] below). The value returned is in the
5029 range 0 .. 14, or FAIL. */
5030 static int
5031 parse_cond (char **str)
5032 {
5033 char *p, *q;
5034 const struct asm_cond *c;
5035
5036 p = q = *str;
5037 while (ISALPHA (*q))
5038 q++;
5039
5040 c = hash_find_n (arm_cond_hsh, p, q - p);
5041 if (!c)
5042 {
5043 inst.error = _("condition required");
5044 return FAIL;
5045 }
5046
5047 *str = q;
5048 return c->value;
5049 }
5050
5051 /* Parse an option for a barrier instruction. Returns the encoding for the
5052 option, or FAIL. */
5053 static int
5054 parse_barrier (char **str)
5055 {
5056 char *p, *q;
5057 const struct asm_barrier_opt *o;
5058
5059 p = q = *str;
5060 while (ISALPHA (*q))
5061 q++;
5062
5063 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5064 if (!o)
5065 return FAIL;
5066
5067 *str = q;
5068 return o->value;
5069 }
5070
5071 /* Parse the operands of a table branch instruction. Similar to a memory
5072 operand. */
5073 static int
5074 parse_tb (char **str)
5075 {
5076 char * p = *str;
5077 int reg;
5078
5079 if (skip_past_char (&p, '[') == FAIL)
5080 {
5081 inst.error = _("'[' expected");
5082 return FAIL;
5083 }
5084
5085 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5086 {
5087 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5088 return FAIL;
5089 }
5090 inst.operands[0].reg = reg;
5091
5092 if (skip_past_comma (&p) == FAIL)
5093 {
5094 inst.error = _("',' expected");
5095 return FAIL;
5096 }
5097
5098 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5099 {
5100 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5101 return FAIL;
5102 }
5103 inst.operands[0].imm = reg;
5104
5105 if (skip_past_comma (&p) == SUCCESS)
5106 {
5107 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5108 return FAIL;
5109 if (inst.reloc.exp.X_add_number != 1)
5110 {
5111 inst.error = _("invalid shift");
5112 return FAIL;
5113 }
5114 inst.operands[0].shifted = 1;
5115 }
5116
5117 if (skip_past_char (&p, ']') == FAIL)
5118 {
5119 inst.error = _("']' expected");
5120 return FAIL;
5121 }
5122 *str = p;
5123 return SUCCESS;
5124 }
5125
5126 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5127 information on the types the operands can take and how they are encoded.
5128 Up to four operands may be read; this function handles setting the
5129 ".present" field for each read operand itself.
5130 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5131 else returns FAIL. */
5132
5133 static int
5134 parse_neon_mov (char **str, int *which_operand)
5135 {
5136 int i = *which_operand, val;
5137 enum arm_reg_type rtype;
5138 char *ptr = *str;
5139 struct neon_type_el optype;
5140
5141 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5142 {
5143 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5144 inst.operands[i].reg = val;
5145 inst.operands[i].isscalar = 1;
5146 inst.operands[i].vectype = optype;
5147 inst.operands[i++].present = 1;
5148
5149 if (skip_past_comma (&ptr) == FAIL)
5150 goto wanted_comma;
5151
5152 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5153 goto wanted_arm;
5154
5155 inst.operands[i].reg = val;
5156 inst.operands[i].isreg = 1;
5157 inst.operands[i].present = 1;
5158 }
5159 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5160 != FAIL)
5161 {
5162 /* Cases 0, 1, 2, 3, 5 (D only). */
5163 if (skip_past_comma (&ptr) == FAIL)
5164 goto wanted_comma;
5165
5166 inst.operands[i].reg = val;
5167 inst.operands[i].isreg = 1;
5168 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5169 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5170 inst.operands[i].isvec = 1;
5171 inst.operands[i].vectype = optype;
5172 inst.operands[i++].present = 1;
5173
5174 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5175 {
5176 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5177 Case 13: VMOV <Sd>, <Rm> */
5178 inst.operands[i].reg = val;
5179 inst.operands[i].isreg = 1;
5180 inst.operands[i].present = 1;
5181
5182 if (rtype == REG_TYPE_NQ)
5183 {
5184 first_error (_("can't use Neon quad register here"));
5185 return FAIL;
5186 }
5187 else if (rtype != REG_TYPE_VFS)
5188 {
5189 i++;
5190 if (skip_past_comma (&ptr) == FAIL)
5191 goto wanted_comma;
5192 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5193 goto wanted_arm;
5194 inst.operands[i].reg = val;
5195 inst.operands[i].isreg = 1;
5196 inst.operands[i].present = 1;
5197 }
5198 }
5199 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5200 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5201 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5202 Case 10: VMOV.F32 <Sd>, #<imm>
5203 Case 11: VMOV.F64 <Dd>, #<imm> */
5204 ;
5205 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5206 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5207 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5208 ;
5209 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5210 &optype)) != FAIL)
5211 {
5212 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5213 Case 1: VMOV<c><q> <Dd>, <Dm>
5214 Case 8: VMOV.F32 <Sd>, <Sm>
5215 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5216
5217 inst.operands[i].reg = val;
5218 inst.operands[i].isreg = 1;
5219 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5220 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5221 inst.operands[i].isvec = 1;
5222 inst.operands[i].vectype = optype;
5223 inst.operands[i].present = 1;
5224
5225 if (skip_past_comma (&ptr) == SUCCESS)
5226 {
5227 /* Case 15. */
5228 i++;
5229
5230 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5231 goto wanted_arm;
5232
5233 inst.operands[i].reg = val;
5234 inst.operands[i].isreg = 1;
5235 inst.operands[i++].present = 1;
5236
5237 if (skip_past_comma (&ptr) == FAIL)
5238 goto wanted_comma;
5239
5240 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5241 goto wanted_arm;
5242
5243 inst.operands[i].reg = val;
5244 inst.operands[i].isreg = 1;
5245 inst.operands[i++].present = 1;
5246 }
5247 }
5248 else
5249 {
5250 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5251 return FAIL;
5252 }
5253 }
5254 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5255 {
5256 /* Cases 6, 7. */
5257 inst.operands[i].reg = val;
5258 inst.operands[i].isreg = 1;
5259 inst.operands[i++].present = 1;
5260
5261 if (skip_past_comma (&ptr) == FAIL)
5262 goto wanted_comma;
5263
5264 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5265 {
5266 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5267 inst.operands[i].reg = val;
5268 inst.operands[i].isscalar = 1;
5269 inst.operands[i].present = 1;
5270 inst.operands[i].vectype = optype;
5271 }
5272 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5273 {
5274 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5275 inst.operands[i].reg = val;
5276 inst.operands[i].isreg = 1;
5277 inst.operands[i++].present = 1;
5278
5279 if (skip_past_comma (&ptr) == FAIL)
5280 goto wanted_comma;
5281
5282 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5283 == FAIL)
5284 {
5285 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5286 return FAIL;
5287 }
5288
5289 inst.operands[i].reg = val;
5290 inst.operands[i].isreg = 1;
5291 inst.operands[i].isvec = 1;
5292 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5293 inst.operands[i].vectype = optype;
5294 inst.operands[i].present = 1;
5295
5296 if (rtype == REG_TYPE_VFS)
5297 {
5298 /* Case 14. */
5299 i++;
5300 if (skip_past_comma (&ptr) == FAIL)
5301 goto wanted_comma;
5302 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5303 &optype)) == FAIL)
5304 {
5305 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5306 return FAIL;
5307 }
5308 inst.operands[i].reg = val;
5309 inst.operands[i].isreg = 1;
5310 inst.operands[i].isvec = 1;
5311 inst.operands[i].issingle = 1;
5312 inst.operands[i].vectype = optype;
5313 inst.operands[i].present = 1;
5314 }
5315 }
5316 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5317 != FAIL)
5318 {
5319 /* Case 13. */
5320 inst.operands[i].reg = val;
5321 inst.operands[i].isreg = 1;
5322 inst.operands[i].isvec = 1;
5323 inst.operands[i].issingle = 1;
5324 inst.operands[i].vectype = optype;
5325 inst.operands[i++].present = 1;
5326 }
5327 }
5328 else
5329 {
5330 first_error (_("parse error"));
5331 return FAIL;
5332 }
5333
5334 /* Successfully parsed the operands. Update args. */
5335 *which_operand = i;
5336 *str = ptr;
5337 return SUCCESS;
5338
5339 wanted_comma:
5340 first_error (_("expected comma"));
5341 return FAIL;
5342
5343 wanted_arm:
5344 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5345 return FAIL;
5346 }
5347
5348 /* Matcher codes for parse_operands. */
5349 enum operand_parse_code
5350 {
5351 OP_stop, /* end of line */
5352
5353 OP_RR, /* ARM register */
5354 OP_RRnpc, /* ARM register, not r15 */
5355 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5356 OP_RRw, /* ARM register, not r15, optional trailing ! */
5357 OP_RCP, /* Coprocessor number */
5358 OP_RCN, /* Coprocessor register */
5359 OP_RF, /* FPA register */
5360 OP_RVS, /* VFP single precision register */
5361 OP_RVD, /* VFP double precision register (0..15) */
5362 OP_RND, /* Neon double precision register (0..31) */
5363 OP_RNQ, /* Neon quad precision register */
5364 OP_RVSD, /* VFP single or double precision register */
5365 OP_RNDQ, /* Neon double or quad precision register */
5366 OP_RNSDQ, /* Neon single, double or quad precision register */
5367 OP_RNSC, /* Neon scalar D[X] */
5368 OP_RVC, /* VFP control register */
5369 OP_RMF, /* Maverick F register */
5370 OP_RMD, /* Maverick D register */
5371 OP_RMFX, /* Maverick FX register */
5372 OP_RMDX, /* Maverick DX register */
5373 OP_RMAX, /* Maverick AX register */
5374 OP_RMDS, /* Maverick DSPSC register */
5375 OP_RIWR, /* iWMMXt wR register */
5376 OP_RIWC, /* iWMMXt wC register */
5377 OP_RIWG, /* iWMMXt wCG register */
5378 OP_RXA, /* XScale accumulator register */
5379
5380 OP_REGLST, /* ARM register list */
5381 OP_VRSLST, /* VFP single-precision register list */
5382 OP_VRDLST, /* VFP double-precision register list */
5383 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5384 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5385 OP_NSTRLST, /* Neon element/structure list */
5386
5387 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5388 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5389 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5390 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5391 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5392 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5393 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5394 OP_VMOV, /* Neon VMOV operands. */
5395 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5396 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5397 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5398
5399 OP_I0, /* immediate zero */
5400 OP_I7, /* immediate value 0 .. 7 */
5401 OP_I15, /* 0 .. 15 */
5402 OP_I16, /* 1 .. 16 */
5403 OP_I16z, /* 0 .. 16 */
5404 OP_I31, /* 0 .. 31 */
5405 OP_I31w, /* 0 .. 31, optional trailing ! */
5406 OP_I32, /* 1 .. 32 */
5407 OP_I32z, /* 0 .. 32 */
5408 OP_I63, /* 0 .. 63 */
5409 OP_I63s, /* -64 .. 63 */
5410 OP_I64, /* 1 .. 64 */
5411 OP_I64z, /* 0 .. 64 */
5412 OP_I255, /* 0 .. 255 */
5413
5414 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5415 OP_I7b, /* 0 .. 7 */
5416 OP_I15b, /* 0 .. 15 */
5417 OP_I31b, /* 0 .. 31 */
5418
5419 OP_SH, /* shifter operand */
5420 OP_SHG, /* shifter operand with possible group relocation */
5421 OP_ADDR, /* Memory address expression (any mode) */
5422 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5423 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5424 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5425 OP_EXP, /* arbitrary expression */
5426 OP_EXPi, /* same, with optional immediate prefix */
5427 OP_EXPr, /* same, with optional relocation suffix */
5428 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5429
5430 OP_CPSF, /* CPS flags */
5431 OP_ENDI, /* Endianness specifier */
5432 OP_PSR, /* CPSR/SPSR mask for msr */
5433 OP_COND, /* conditional code */
5434 OP_TB, /* Table branch. */
5435
5436 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5437 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5438
5439 OP_RRnpc_I0, /* ARM register or literal 0 */
5440 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5441 OP_RR_EXi, /* ARM register or expression with imm prefix */
5442 OP_RF_IF, /* FPA register or immediate */
5443 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5444 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5445
5446 /* Optional operands. */
5447 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5448 OP_oI31b, /* 0 .. 31 */
5449 OP_oI32b, /* 1 .. 32 */
5450 OP_oIffffb, /* 0 .. 65535 */
5451 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5452
5453 OP_oRR, /* ARM register */
5454 OP_oRRnpc, /* ARM register, not the PC */
5455 OP_oRND, /* Optional Neon double precision register */
5456 OP_oRNQ, /* Optional Neon quad precision register */
5457 OP_oRNDQ, /* Optional Neon double or quad precision register */
5458 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5459 OP_oSHll, /* LSL immediate */
5460 OP_oSHar, /* ASR immediate */
5461 OP_oSHllar, /* LSL or ASR immediate */
5462 OP_oROR, /* ROR 0/8/16/24 */
5463 OP_oBARRIER, /* Option argument for a barrier instruction. */
5464
5465 OP_FIRST_OPTIONAL = OP_oI7b
5466 };
5467
5468 /* Generic instruction operand parser. This does no encoding and no
5469 semantic validation; it merely squirrels values away in the inst
5470 structure. Returns SUCCESS or FAIL depending on whether the
5471 specified grammar matched. */
5472 static int
5473 parse_operands (char *str, const unsigned char *pattern)
5474 {
5475 unsigned const char *upat = pattern;
5476 char *backtrack_pos = 0;
5477 const char *backtrack_error = 0;
5478 int i, val, backtrack_index = 0;
5479 enum arm_reg_type rtype;
5480 parse_operand_result result;
5481
5482 #define po_char_or_fail(chr) do { \
5483 if (skip_past_char (&str, chr) == FAIL) \
5484 goto bad_args; \
5485 } while (0)
5486
5487 #define po_reg_or_fail(regtype) do { \
5488 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5489 &inst.operands[i].vectype); \
5490 if (val == FAIL) \
5491 { \
5492 first_error (_(reg_expected_msgs[regtype])); \
5493 goto failure; \
5494 } \
5495 inst.operands[i].reg = val; \
5496 inst.operands[i].isreg = 1; \
5497 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5498 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5499 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5500 || rtype == REG_TYPE_VFD \
5501 || rtype == REG_TYPE_NQ); \
5502 } while (0)
5503
5504 #define po_reg_or_goto(regtype, label) do { \
5505 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5506 &inst.operands[i].vectype); \
5507 if (val == FAIL) \
5508 goto label; \
5509 \
5510 inst.operands[i].reg = val; \
5511 inst.operands[i].isreg = 1; \
5512 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5513 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5514 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5515 || rtype == REG_TYPE_VFD \
5516 || rtype == REG_TYPE_NQ); \
5517 } while (0)
5518
5519 #define po_imm_or_fail(min, max, popt) do { \
5520 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5521 goto failure; \
5522 inst.operands[i].imm = val; \
5523 } while (0)
5524
5525 #define po_scalar_or_goto(elsz, label) do { \
5526 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5527 if (val == FAIL) \
5528 goto label; \
5529 inst.operands[i].reg = val; \
5530 inst.operands[i].isscalar = 1; \
5531 } while (0)
5532
5533 #define po_misc_or_fail(expr) do { \
5534 if (expr) \
5535 goto failure; \
5536 } while (0)
5537
5538 #define po_misc_or_fail_no_backtrack(expr) do { \
5539 result = expr; \
5540 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5541 backtrack_pos = 0; \
5542 if (result != PARSE_OPERAND_SUCCESS) \
5543 goto failure; \
5544 } while (0)
5545
5546 skip_whitespace (str);
5547
5548 for (i = 0; upat[i] != OP_stop; i++)
5549 {
5550 if (upat[i] >= OP_FIRST_OPTIONAL)
5551 {
5552 /* Remember where we are in case we need to backtrack. */
5553 assert (!backtrack_pos);
5554 backtrack_pos = str;
5555 backtrack_error = inst.error;
5556 backtrack_index = i;
5557 }
5558
5559 if (i > 0)
5560 po_char_or_fail (',');
5561
5562 switch (upat[i])
5563 {
5564 /* Registers */
5565 case OP_oRRnpc:
5566 case OP_RRnpc:
5567 case OP_oRR:
5568 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5569 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5570 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5571 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5572 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5573 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5574 case OP_oRND:
5575 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5576 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5577 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5578 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5579 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5580 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5581 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5582 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5583 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5584 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5585 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5586 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5587 case OP_oRNQ:
5588 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5589 case OP_oRNDQ:
5590 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5591 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5592 case OP_oRNSDQ:
5593 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5594
5595 /* Neon scalar. Using an element size of 8 means that some invalid
5596 scalars are accepted here, so deal with those in later code. */
5597 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5598
5599 /* WARNING: We can expand to two operands here. This has the potential
5600 to totally confuse the backtracking mechanism! It will be OK at
5601 least as long as we don't try to use optional args as well,
5602 though. */
5603 case OP_NILO:
5604 {
5605 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5606 inst.operands[i].present = 1;
5607 i++;
5608 skip_past_comma (&str);
5609 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5610 break;
5611 one_reg_only:
5612 /* Optional register operand was omitted. Unfortunately, it's in
5613 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5614 here (this is a bit grotty). */
5615 inst.operands[i] = inst.operands[i-1];
5616 inst.operands[i-1].present = 0;
5617 break;
5618 try_imm:
5619 /* There's a possibility of getting a 64-bit immediate here, so
5620 we need special handling. */
5621 if (parse_big_immediate (&str, i) == FAIL)
5622 {
5623 inst.error = _("immediate value is out of range");
5624 goto failure;
5625 }
5626 }
5627 break;
5628
5629 case OP_RNDQ_I0:
5630 {
5631 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5632 break;
5633 try_imm0:
5634 po_imm_or_fail (0, 0, TRUE);
5635 }
5636 break;
5637
5638 case OP_RVSD_I0:
5639 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5640 break;
5641
5642 case OP_RR_RNSC:
5643 {
5644 po_scalar_or_goto (8, try_rr);
5645 break;
5646 try_rr:
5647 po_reg_or_fail (REG_TYPE_RN);
5648 }
5649 break;
5650
5651 case OP_RNSDQ_RNSC:
5652 {
5653 po_scalar_or_goto (8, try_nsdq);
5654 break;
5655 try_nsdq:
5656 po_reg_or_fail (REG_TYPE_NSDQ);
5657 }
5658 break;
5659
5660 case OP_RNDQ_RNSC:
5661 {
5662 po_scalar_or_goto (8, try_ndq);
5663 break;
5664 try_ndq:
5665 po_reg_or_fail (REG_TYPE_NDQ);
5666 }
5667 break;
5668
5669 case OP_RND_RNSC:
5670 {
5671 po_scalar_or_goto (8, try_vfd);
5672 break;
5673 try_vfd:
5674 po_reg_or_fail (REG_TYPE_VFD);
5675 }
5676 break;
5677
5678 case OP_VMOV:
5679 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5680 not careful then bad things might happen. */
5681 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5682 break;
5683
5684 case OP_RNDQ_IMVNb:
5685 {
5686 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5687 break;
5688 try_mvnimm:
5689 /* There's a possibility of getting a 64-bit immediate here, so
5690 we need special handling. */
5691 if (parse_big_immediate (&str, i) == FAIL)
5692 {
5693 inst.error = _("immediate value is out of range");
5694 goto failure;
5695 }
5696 }
5697 break;
5698
5699 case OP_RNDQ_I63b:
5700 {
5701 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5702 break;
5703 try_shimm:
5704 po_imm_or_fail (0, 63, TRUE);
5705 }
5706 break;
5707
5708 case OP_RRnpcb:
5709 po_char_or_fail ('[');
5710 po_reg_or_fail (REG_TYPE_RN);
5711 po_char_or_fail (']');
5712 break;
5713
5714 case OP_RRw:
5715 po_reg_or_fail (REG_TYPE_RN);
5716 if (skip_past_char (&str, '!') == SUCCESS)
5717 inst.operands[i].writeback = 1;
5718 break;
5719
5720 /* Immediates */
5721 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5722 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5723 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5724 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5725 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5726 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5727 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5728 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5729 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5730 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5731 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5732 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5733
5734 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5735 case OP_oI7b:
5736 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5737 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5738 case OP_oI31b:
5739 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5740 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5741 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5742
5743 /* Immediate variants */
5744 case OP_oI255c:
5745 po_char_or_fail ('{');
5746 po_imm_or_fail (0, 255, TRUE);
5747 po_char_or_fail ('}');
5748 break;
5749
5750 case OP_I31w:
5751 /* The expression parser chokes on a trailing !, so we have
5752 to find it first and zap it. */
5753 {
5754 char *s = str;
5755 while (*s && *s != ',')
5756 s++;
5757 if (s[-1] == '!')
5758 {
5759 s[-1] = '\0';
5760 inst.operands[i].writeback = 1;
5761 }
5762 po_imm_or_fail (0, 31, TRUE);
5763 if (str == s - 1)
5764 str = s;
5765 }
5766 break;
5767
5768 /* Expressions */
5769 case OP_EXPi: EXPi:
5770 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5771 GE_OPT_PREFIX));
5772 break;
5773
5774 case OP_EXP:
5775 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5776 GE_NO_PREFIX));
5777 break;
5778
5779 case OP_EXPr: EXPr:
5780 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5781 GE_NO_PREFIX));
5782 if (inst.reloc.exp.X_op == O_symbol)
5783 {
5784 val = parse_reloc (&str);
5785 if (val == -1)
5786 {
5787 inst.error = _("unrecognized relocation suffix");
5788 goto failure;
5789 }
5790 else if (val != BFD_RELOC_UNUSED)
5791 {
5792 inst.operands[i].imm = val;
5793 inst.operands[i].hasreloc = 1;
5794 }
5795 }
5796 break;
5797
5798 /* Operand for MOVW or MOVT. */
5799 case OP_HALF:
5800 po_misc_or_fail (parse_half (&str));
5801 break;
5802
5803 /* Register or expression */
5804 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5805 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5806
5807 /* Register or immediate */
5808 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5809 I0: po_imm_or_fail (0, 0, FALSE); break;
5810
5811 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5812 IF:
5813 if (!is_immediate_prefix (*str))
5814 goto bad_args;
5815 str++;
5816 val = parse_fpa_immediate (&str);
5817 if (val == FAIL)
5818 goto failure;
5819 /* FPA immediates are encoded as registers 8-15.
5820 parse_fpa_immediate has already applied the offset. */
5821 inst.operands[i].reg = val;
5822 inst.operands[i].isreg = 1;
5823 break;
5824
5825 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5826 I32z: po_imm_or_fail (0, 32, FALSE); break;
5827
5828 /* Two kinds of register */
5829 case OP_RIWR_RIWC:
5830 {
5831 struct reg_entry *rege = arm_reg_parse_multi (&str);
5832 if (!rege
5833 || (rege->type != REG_TYPE_MMXWR
5834 && rege->type != REG_TYPE_MMXWC
5835 && rege->type != REG_TYPE_MMXWCG))
5836 {
5837 inst.error = _("iWMMXt data or control register expected");
5838 goto failure;
5839 }
5840 inst.operands[i].reg = rege->number;
5841 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5842 }
5843 break;
5844
5845 case OP_RIWC_RIWG:
5846 {
5847 struct reg_entry *rege = arm_reg_parse_multi (&str);
5848 if (!rege
5849 || (rege->type != REG_TYPE_MMXWC
5850 && rege->type != REG_TYPE_MMXWCG))
5851 {
5852 inst.error = _("iWMMXt control register expected");
5853 goto failure;
5854 }
5855 inst.operands[i].reg = rege->number;
5856 inst.operands[i].isreg = 1;
5857 }
5858 break;
5859
5860 /* Misc */
5861 case OP_CPSF: val = parse_cps_flags (&str); break;
5862 case OP_ENDI: val = parse_endian_specifier (&str); break;
5863 case OP_oROR: val = parse_ror (&str); break;
5864 case OP_PSR: val = parse_psr (&str); break;
5865 case OP_COND: val = parse_cond (&str); break;
5866 case OP_oBARRIER:val = parse_barrier (&str); break;
5867
5868 case OP_RVC_PSR:
5869 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5870 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5871 break;
5872 try_psr:
5873 val = parse_psr (&str);
5874 break;
5875
5876 case OP_APSR_RR:
5877 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5878 break;
5879 try_apsr:
5880 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5881 instruction). */
5882 if (strncasecmp (str, "APSR_", 5) == 0)
5883 {
5884 unsigned found = 0;
5885 str += 5;
5886 while (found < 15)
5887 switch (*str++)
5888 {
5889 case 'c': found = (found & 1) ? 16 : found | 1; break;
5890 case 'n': found = (found & 2) ? 16 : found | 2; break;
5891 case 'z': found = (found & 4) ? 16 : found | 4; break;
5892 case 'v': found = (found & 8) ? 16 : found | 8; break;
5893 default: found = 16;
5894 }
5895 if (found != 15)
5896 goto failure;
5897 inst.operands[i].isvec = 1;
5898 }
5899 else
5900 goto failure;
5901 break;
5902
5903 case OP_TB:
5904 po_misc_or_fail (parse_tb (&str));
5905 break;
5906
5907 /* Register lists */
5908 case OP_REGLST:
5909 val = parse_reg_list (&str);
5910 if (*str == '^')
5911 {
5912 inst.operands[1].writeback = 1;
5913 str++;
5914 }
5915 break;
5916
5917 case OP_VRSLST:
5918 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5919 break;
5920
5921 case OP_VRDLST:
5922 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5923 break;
5924
5925 case OP_VRSDLST:
5926 /* Allow Q registers too. */
5927 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5928 REGLIST_NEON_D);
5929 if (val == FAIL)
5930 {
5931 inst.error = NULL;
5932 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5933 REGLIST_VFP_S);
5934 inst.operands[i].issingle = 1;
5935 }
5936 break;
5937
5938 case OP_NRDLST:
5939 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5940 REGLIST_NEON_D);
5941 break;
5942
5943 case OP_NSTRLST:
5944 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5945 &inst.operands[i].vectype);
5946 break;
5947
5948 /* Addressing modes */
5949 case OP_ADDR:
5950 po_misc_or_fail (parse_address (&str, i));
5951 break;
5952
5953 case OP_ADDRGLDR:
5954 po_misc_or_fail_no_backtrack (
5955 parse_address_group_reloc (&str, i, GROUP_LDR));
5956 break;
5957
5958 case OP_ADDRGLDRS:
5959 po_misc_or_fail_no_backtrack (
5960 parse_address_group_reloc (&str, i, GROUP_LDRS));
5961 break;
5962
5963 case OP_ADDRGLDC:
5964 po_misc_or_fail_no_backtrack (
5965 parse_address_group_reloc (&str, i, GROUP_LDC));
5966 break;
5967
5968 case OP_SH:
5969 po_misc_or_fail (parse_shifter_operand (&str, i));
5970 break;
5971
5972 case OP_SHG:
5973 po_misc_or_fail_no_backtrack (
5974 parse_shifter_operand_group_reloc (&str, i));
5975 break;
5976
5977 case OP_oSHll:
5978 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5979 break;
5980
5981 case OP_oSHar:
5982 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5983 break;
5984
5985 case OP_oSHllar:
5986 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5987 break;
5988
5989 default:
5990 as_fatal ("unhandled operand code %d", upat[i]);
5991 }
5992
5993 /* Various value-based sanity checks and shared operations. We
5994 do not signal immediate failures for the register constraints;
5995 this allows a syntax error to take precedence. */
5996 switch (upat[i])
5997 {
5998 case OP_oRRnpc:
5999 case OP_RRnpc:
6000 case OP_RRnpcb:
6001 case OP_RRw:
6002 case OP_RRnpc_I0:
6003 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6004 inst.error = BAD_PC;
6005 break;
6006
6007 case OP_CPSF:
6008 case OP_ENDI:
6009 case OP_oROR:
6010 case OP_PSR:
6011 case OP_RVC_PSR:
6012 case OP_COND:
6013 case OP_oBARRIER:
6014 case OP_REGLST:
6015 case OP_VRSLST:
6016 case OP_VRDLST:
6017 case OP_VRSDLST:
6018 case OP_NRDLST:
6019 case OP_NSTRLST:
6020 if (val == FAIL)
6021 goto failure;
6022 inst.operands[i].imm = val;
6023 break;
6024
6025 default:
6026 break;
6027 }
6028
6029 /* If we get here, this operand was successfully parsed. */
6030 inst.operands[i].present = 1;
6031 continue;
6032
6033 bad_args:
6034 inst.error = BAD_ARGS;
6035
6036 failure:
6037 if (!backtrack_pos)
6038 {
6039 /* The parse routine should already have set inst.error, but set a
6040 defaut here just in case. */
6041 if (!inst.error)
6042 inst.error = _("syntax error");
6043 return FAIL;
6044 }
6045
6046 /* Do not backtrack over a trailing optional argument that
6047 absorbed some text. We will only fail again, with the
6048 'garbage following instruction' error message, which is
6049 probably less helpful than the current one. */
6050 if (backtrack_index == i && backtrack_pos != str
6051 && upat[i+1] == OP_stop)
6052 {
6053 if (!inst.error)
6054 inst.error = _("syntax error");
6055 return FAIL;
6056 }
6057
6058 /* Try again, skipping the optional argument at backtrack_pos. */
6059 str = backtrack_pos;
6060 inst.error = backtrack_error;
6061 inst.operands[backtrack_index].present = 0;
6062 i = backtrack_index;
6063 backtrack_pos = 0;
6064 }
6065
6066 /* Check that we have parsed all the arguments. */
6067 if (*str != '\0' && !inst.error)
6068 inst.error = _("garbage following instruction");
6069
6070 return inst.error ? FAIL : SUCCESS;
6071 }
6072
6073 #undef po_char_or_fail
6074 #undef po_reg_or_fail
6075 #undef po_reg_or_goto
6076 #undef po_imm_or_fail
6077 #undef po_scalar_or_fail
6078 \f
6079 /* Shorthand macro for instruction encoding functions issuing errors. */
6080 #define constraint(expr, err) do { \
6081 if (expr) \
6082 { \
6083 inst.error = err; \
6084 return; \
6085 } \
6086 } while (0)
6087
6088 /* Functions for operand encoding. ARM, then Thumb. */
6089
6090 #define rotate_left(v, n) (v << n | v >> (32 - n))
6091
6092 /* If VAL can be encoded in the immediate field of an ARM instruction,
6093 return the encoded form. Otherwise, return FAIL. */
6094
6095 static unsigned int
6096 encode_arm_immediate (unsigned int val)
6097 {
6098 unsigned int a, i;
6099
6100 for (i = 0; i < 32; i += 2)
6101 if ((a = rotate_left (val, i)) <= 0xff)
6102 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6103
6104 return FAIL;
6105 }
6106
6107 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6108 return the encoded form. Otherwise, return FAIL. */
6109 static unsigned int
6110 encode_thumb32_immediate (unsigned int val)
6111 {
6112 unsigned int a, i;
6113
6114 if (val <= 0xff)
6115 return val;
6116
6117 for (i = 1; i <= 24; i++)
6118 {
6119 a = val >> i;
6120 if ((val & ~(0xff << i)) == 0)
6121 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6122 }
6123
6124 a = val & 0xff;
6125 if (val == ((a << 16) | a))
6126 return 0x100 | a;
6127 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6128 return 0x300 | a;
6129
6130 a = val & 0xff00;
6131 if (val == ((a << 16) | a))
6132 return 0x200 | (a >> 8);
6133
6134 return FAIL;
6135 }
6136 /* Encode a VFP SP or DP register number into inst.instruction. */
6137
6138 static void
6139 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6140 {
6141 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6142 && reg > 15)
6143 {
6144 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6145 {
6146 if (thumb_mode)
6147 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6148 fpu_vfp_ext_v3);
6149 else
6150 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6151 fpu_vfp_ext_v3);
6152 }
6153 else
6154 {
6155 first_error (_("D register out of range for selected VFP version"));
6156 return;
6157 }
6158 }
6159
6160 switch (pos)
6161 {
6162 case VFP_REG_Sd:
6163 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6164 break;
6165
6166 case VFP_REG_Sn:
6167 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6168 break;
6169
6170 case VFP_REG_Sm:
6171 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6172 break;
6173
6174 case VFP_REG_Dd:
6175 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6176 break;
6177
6178 case VFP_REG_Dn:
6179 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6180 break;
6181
6182 case VFP_REG_Dm:
6183 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6184 break;
6185
6186 default:
6187 abort ();
6188 }
6189 }
6190
6191 /* Encode a <shift> in an ARM-format instruction. The immediate,
6192 if any, is handled by md_apply_fix. */
6193 static void
6194 encode_arm_shift (int i)
6195 {
6196 if (inst.operands[i].shift_kind == SHIFT_RRX)
6197 inst.instruction |= SHIFT_ROR << 5;
6198 else
6199 {
6200 inst.instruction |= inst.operands[i].shift_kind << 5;
6201 if (inst.operands[i].immisreg)
6202 {
6203 inst.instruction |= SHIFT_BY_REG;
6204 inst.instruction |= inst.operands[i].imm << 8;
6205 }
6206 else
6207 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6208 }
6209 }
6210
6211 static void
6212 encode_arm_shifter_operand (int i)
6213 {
6214 if (inst.operands[i].isreg)
6215 {
6216 inst.instruction |= inst.operands[i].reg;
6217 encode_arm_shift (i);
6218 }
6219 else
6220 inst.instruction |= INST_IMMEDIATE;
6221 }
6222
6223 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6224 static void
6225 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6226 {
6227 assert (inst.operands[i].isreg);
6228 inst.instruction |= inst.operands[i].reg << 16;
6229
6230 if (inst.operands[i].preind)
6231 {
6232 if (is_t)
6233 {
6234 inst.error = _("instruction does not accept preindexed addressing");
6235 return;
6236 }
6237 inst.instruction |= PRE_INDEX;
6238 if (inst.operands[i].writeback)
6239 inst.instruction |= WRITE_BACK;
6240
6241 }
6242 else if (inst.operands[i].postind)
6243 {
6244 assert (inst.operands[i].writeback);
6245 if (is_t)
6246 inst.instruction |= WRITE_BACK;
6247 }
6248 else /* unindexed - only for coprocessor */
6249 {
6250 inst.error = _("instruction does not accept unindexed addressing");
6251 return;
6252 }
6253
6254 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6255 && (((inst.instruction & 0x000f0000) >> 16)
6256 == ((inst.instruction & 0x0000f000) >> 12)))
6257 as_warn ((inst.instruction & LOAD_BIT)
6258 ? _("destination register same as write-back base")
6259 : _("source register same as write-back base"));
6260 }
6261
6262 /* inst.operands[i] was set up by parse_address. Encode it into an
6263 ARM-format mode 2 load or store instruction. If is_t is true,
6264 reject forms that cannot be used with a T instruction (i.e. not
6265 post-indexed). */
6266 static void
6267 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6268 {
6269 encode_arm_addr_mode_common (i, is_t);
6270
6271 if (inst.operands[i].immisreg)
6272 {
6273 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6274 inst.instruction |= inst.operands[i].imm;
6275 if (!inst.operands[i].negative)
6276 inst.instruction |= INDEX_UP;
6277 if (inst.operands[i].shifted)
6278 {
6279 if (inst.operands[i].shift_kind == SHIFT_RRX)
6280 inst.instruction |= SHIFT_ROR << 5;
6281 else
6282 {
6283 inst.instruction |= inst.operands[i].shift_kind << 5;
6284 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6285 }
6286 }
6287 }
6288 else /* immediate offset in inst.reloc */
6289 {
6290 if (inst.reloc.type == BFD_RELOC_UNUSED)
6291 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6292 }
6293 }
6294
6295 /* inst.operands[i] was set up by parse_address. Encode it into an
6296 ARM-format mode 3 load or store instruction. Reject forms that
6297 cannot be used with such instructions. If is_t is true, reject
6298 forms that cannot be used with a T instruction (i.e. not
6299 post-indexed). */
6300 static void
6301 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6302 {
6303 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6304 {
6305 inst.error = _("instruction does not accept scaled register index");
6306 return;
6307 }
6308
6309 encode_arm_addr_mode_common (i, is_t);
6310
6311 if (inst.operands[i].immisreg)
6312 {
6313 inst.instruction |= inst.operands[i].imm;
6314 if (!inst.operands[i].negative)
6315 inst.instruction |= INDEX_UP;
6316 }
6317 else /* immediate offset in inst.reloc */
6318 {
6319 inst.instruction |= HWOFFSET_IMM;
6320 if (inst.reloc.type == BFD_RELOC_UNUSED)
6321 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6322 }
6323 }
6324
6325 /* inst.operands[i] was set up by parse_address. Encode it into an
6326 ARM-format instruction. Reject all forms which cannot be encoded
6327 into a coprocessor load/store instruction. If wb_ok is false,
6328 reject use of writeback; if unind_ok is false, reject use of
6329 unindexed addressing. If reloc_override is not 0, use it instead
6330 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6331 (in which case it is preserved). */
6332
6333 static int
6334 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6335 {
6336 inst.instruction |= inst.operands[i].reg << 16;
6337
6338 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6339
6340 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6341 {
6342 assert (!inst.operands[i].writeback);
6343 if (!unind_ok)
6344 {
6345 inst.error = _("instruction does not support unindexed addressing");
6346 return FAIL;
6347 }
6348 inst.instruction |= inst.operands[i].imm;
6349 inst.instruction |= INDEX_UP;
6350 return SUCCESS;
6351 }
6352
6353 if (inst.operands[i].preind)
6354 inst.instruction |= PRE_INDEX;
6355
6356 if (inst.operands[i].writeback)
6357 {
6358 if (inst.operands[i].reg == REG_PC)
6359 {
6360 inst.error = _("pc may not be used with write-back");
6361 return FAIL;
6362 }
6363 if (!wb_ok)
6364 {
6365 inst.error = _("instruction does not support writeback");
6366 return FAIL;
6367 }
6368 inst.instruction |= WRITE_BACK;
6369 }
6370
6371 if (reloc_override)
6372 inst.reloc.type = reloc_override;
6373 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6374 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6375 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6376 {
6377 if (thumb_mode)
6378 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6379 else
6380 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6381 }
6382
6383 return SUCCESS;
6384 }
6385
6386 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6387 Determine whether it can be performed with a move instruction; if
6388 it can, convert inst.instruction to that move instruction and
6389 return 1; if it can't, convert inst.instruction to a literal-pool
6390 load and return 0. If this is not a valid thing to do in the
6391 current context, set inst.error and return 1.
6392
6393 inst.operands[i] describes the destination register. */
6394
6395 static int
6396 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6397 {
6398 unsigned long tbit;
6399
6400 if (thumb_p)
6401 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6402 else
6403 tbit = LOAD_BIT;
6404
6405 if ((inst.instruction & tbit) == 0)
6406 {
6407 inst.error = _("invalid pseudo operation");
6408 return 1;
6409 }
6410 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6411 {
6412 inst.error = _("constant expression expected");
6413 return 1;
6414 }
6415 if (inst.reloc.exp.X_op == O_constant)
6416 {
6417 if (thumb_p)
6418 {
6419 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6420 {
6421 /* This can be done with a mov(1) instruction. */
6422 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6423 inst.instruction |= inst.reloc.exp.X_add_number;
6424 return 1;
6425 }
6426 }
6427 else
6428 {
6429 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6430 if (value != FAIL)
6431 {
6432 /* This can be done with a mov instruction. */
6433 inst.instruction &= LITERAL_MASK;
6434 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6435 inst.instruction |= value & 0xfff;
6436 return 1;
6437 }
6438
6439 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6440 if (value != FAIL)
6441 {
6442 /* This can be done with a mvn instruction. */
6443 inst.instruction &= LITERAL_MASK;
6444 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6445 inst.instruction |= value & 0xfff;
6446 return 1;
6447 }
6448 }
6449 }
6450
6451 if (add_to_lit_pool () == FAIL)
6452 {
6453 inst.error = _("literal pool insertion failed");
6454 return 1;
6455 }
6456 inst.operands[1].reg = REG_PC;
6457 inst.operands[1].isreg = 1;
6458 inst.operands[1].preind = 1;
6459 inst.reloc.pc_rel = 1;
6460 inst.reloc.type = (thumb_p
6461 ? BFD_RELOC_ARM_THUMB_OFFSET
6462 : (mode_3
6463 ? BFD_RELOC_ARM_HWLITERAL
6464 : BFD_RELOC_ARM_LITERAL));
6465 return 0;
6466 }
6467
6468 /* Functions for instruction encoding, sorted by subarchitecture.
6469 First some generics; their names are taken from the conventional
6470 bit positions for register arguments in ARM format instructions. */
6471
6472 static void
6473 do_noargs (void)
6474 {
6475 }
6476
6477 static void
6478 do_rd (void)
6479 {
6480 inst.instruction |= inst.operands[0].reg << 12;
6481 }
6482
6483 static void
6484 do_rd_rm (void)
6485 {
6486 inst.instruction |= inst.operands[0].reg << 12;
6487 inst.instruction |= inst.operands[1].reg;
6488 }
6489
6490 static void
6491 do_rd_rn (void)
6492 {
6493 inst.instruction |= inst.operands[0].reg << 12;
6494 inst.instruction |= inst.operands[1].reg << 16;
6495 }
6496
6497 static void
6498 do_rn_rd (void)
6499 {
6500 inst.instruction |= inst.operands[0].reg << 16;
6501 inst.instruction |= inst.operands[1].reg << 12;
6502 }
6503
6504 static void
6505 do_rd_rm_rn (void)
6506 {
6507 unsigned Rn = inst.operands[2].reg;
6508 /* Enforce restrictions on SWP instruction. */
6509 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6510 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6511 _("Rn must not overlap other operands"));
6512 inst.instruction |= inst.operands[0].reg << 12;
6513 inst.instruction |= inst.operands[1].reg;
6514 inst.instruction |= Rn << 16;
6515 }
6516
6517 static void
6518 do_rd_rn_rm (void)
6519 {
6520 inst.instruction |= inst.operands[0].reg << 12;
6521 inst.instruction |= inst.operands[1].reg << 16;
6522 inst.instruction |= inst.operands[2].reg;
6523 }
6524
6525 static void
6526 do_rm_rd_rn (void)
6527 {
6528 inst.instruction |= inst.operands[0].reg;
6529 inst.instruction |= inst.operands[1].reg << 12;
6530 inst.instruction |= inst.operands[2].reg << 16;
6531 }
6532
6533 static void
6534 do_imm0 (void)
6535 {
6536 inst.instruction |= inst.operands[0].imm;
6537 }
6538
6539 static void
6540 do_rd_cpaddr (void)
6541 {
6542 inst.instruction |= inst.operands[0].reg << 12;
6543 encode_arm_cp_address (1, TRUE, TRUE, 0);
6544 }
6545
6546 /* ARM instructions, in alphabetical order by function name (except
6547 that wrapper functions appear immediately after the function they
6548 wrap). */
6549
6550 /* This is a pseudo-op of the form "adr rd, label" to be converted
6551 into a relative address of the form "add rd, pc, #label-.-8". */
6552
6553 static void
6554 do_adr (void)
6555 {
6556 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6557
6558 /* Frag hacking will turn this into a sub instruction if the offset turns
6559 out to be negative. */
6560 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6561 inst.reloc.pc_rel = 1;
6562 inst.reloc.exp.X_add_number -= 8;
6563 }
6564
6565 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6566 into a relative address of the form:
6567 add rd, pc, #low(label-.-8)"
6568 add rd, rd, #high(label-.-8)" */
6569
6570 static void
6571 do_adrl (void)
6572 {
6573 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6574
6575 /* Frag hacking will turn this into a sub instruction if the offset turns
6576 out to be negative. */
6577 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6578 inst.reloc.pc_rel = 1;
6579 inst.size = INSN_SIZE * 2;
6580 inst.reloc.exp.X_add_number -= 8;
6581 }
6582
6583 static void
6584 do_arit (void)
6585 {
6586 if (!inst.operands[1].present)
6587 inst.operands[1].reg = inst.operands[0].reg;
6588 inst.instruction |= inst.operands[0].reg << 12;
6589 inst.instruction |= inst.operands[1].reg << 16;
6590 encode_arm_shifter_operand (2);
6591 }
6592
6593 static void
6594 do_barrier (void)
6595 {
6596 if (inst.operands[0].present)
6597 {
6598 constraint ((inst.instruction & 0xf0) != 0x40
6599 && inst.operands[0].imm != 0xf,
6600 "bad barrier type");
6601 inst.instruction |= inst.operands[0].imm;
6602 }
6603 else
6604 inst.instruction |= 0xf;
6605 }
6606
6607 static void
6608 do_bfc (void)
6609 {
6610 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6611 constraint (msb > 32, _("bit-field extends past end of register"));
6612 /* The instruction encoding stores the LSB and MSB,
6613 not the LSB and width. */
6614 inst.instruction |= inst.operands[0].reg << 12;
6615 inst.instruction |= inst.operands[1].imm << 7;
6616 inst.instruction |= (msb - 1) << 16;
6617 }
6618
6619 static void
6620 do_bfi (void)
6621 {
6622 unsigned int msb;
6623
6624 /* #0 in second position is alternative syntax for bfc, which is
6625 the same instruction but with REG_PC in the Rm field. */
6626 if (!inst.operands[1].isreg)
6627 inst.operands[1].reg = REG_PC;
6628
6629 msb = inst.operands[2].imm + inst.operands[3].imm;
6630 constraint (msb > 32, _("bit-field extends past end of register"));
6631 /* The instruction encoding stores the LSB and MSB,
6632 not the LSB and width. */
6633 inst.instruction |= inst.operands[0].reg << 12;
6634 inst.instruction |= inst.operands[1].reg;
6635 inst.instruction |= inst.operands[2].imm << 7;
6636 inst.instruction |= (msb - 1) << 16;
6637 }
6638
6639 static void
6640 do_bfx (void)
6641 {
6642 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6643 _("bit-field extends past end of register"));
6644 inst.instruction |= inst.operands[0].reg << 12;
6645 inst.instruction |= inst.operands[1].reg;
6646 inst.instruction |= inst.operands[2].imm << 7;
6647 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6648 }
6649
6650 /* ARM V5 breakpoint instruction (argument parse)
6651 BKPT <16 bit unsigned immediate>
6652 Instruction is not conditional.
6653 The bit pattern given in insns[] has the COND_ALWAYS condition,
6654 and it is an error if the caller tried to override that. */
6655
6656 static void
6657 do_bkpt (void)
6658 {
6659 /* Top 12 of 16 bits to bits 19:8. */
6660 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6661
6662 /* Bottom 4 of 16 bits to bits 3:0. */
6663 inst.instruction |= inst.operands[0].imm & 0xf;
6664 }
6665
6666 static void
6667 encode_branch (int default_reloc)
6668 {
6669 if (inst.operands[0].hasreloc)
6670 {
6671 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6672 _("the only suffix valid here is '(plt)'"));
6673 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6674 }
6675 else
6676 {
6677 inst.reloc.type = default_reloc;
6678 }
6679 inst.reloc.pc_rel = 1;
6680 }
6681
6682 static void
6683 do_branch (void)
6684 {
6685 #ifdef OBJ_ELF
6686 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6687 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6688 else
6689 #endif
6690 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6691 }
6692
6693 static void
6694 do_bl (void)
6695 {
6696 #ifdef OBJ_ELF
6697 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6698 {
6699 if (inst.cond == COND_ALWAYS)
6700 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6701 else
6702 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6703 }
6704 else
6705 #endif
6706 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6707 }
6708
6709 /* ARM V5 branch-link-exchange instruction (argument parse)
6710 BLX <target_addr> ie BLX(1)
6711 BLX{<condition>} <Rm> ie BLX(2)
6712 Unfortunately, there are two different opcodes for this mnemonic.
6713 So, the insns[].value is not used, and the code here zaps values
6714 into inst.instruction.
6715 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6716
6717 static void
6718 do_blx (void)
6719 {
6720 if (inst.operands[0].isreg)
6721 {
6722 /* Arg is a register; the opcode provided by insns[] is correct.
6723 It is not illegal to do "blx pc", just useless. */
6724 if (inst.operands[0].reg == REG_PC)
6725 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6726
6727 inst.instruction |= inst.operands[0].reg;
6728 }
6729 else
6730 {
6731 /* Arg is an address; this instruction cannot be executed
6732 conditionally, and the opcode must be adjusted. */
6733 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6734 inst.instruction = 0xfa000000;
6735 #ifdef OBJ_ELF
6736 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6737 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6738 else
6739 #endif
6740 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6741 }
6742 }
6743
6744 static void
6745 do_bx (void)
6746 {
6747 if (inst.operands[0].reg == REG_PC)
6748 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6749
6750 inst.instruction |= inst.operands[0].reg;
6751 }
6752
6753
6754 /* ARM v5TEJ. Jump to Jazelle code. */
6755
6756 static void
6757 do_bxj (void)
6758 {
6759 if (inst.operands[0].reg == REG_PC)
6760 as_tsktsk (_("use of r15 in bxj is not really useful"));
6761
6762 inst.instruction |= inst.operands[0].reg;
6763 }
6764
6765 /* Co-processor data operation:
6766 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6767 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6768 static void
6769 do_cdp (void)
6770 {
6771 inst.instruction |= inst.operands[0].reg << 8;
6772 inst.instruction |= inst.operands[1].imm << 20;
6773 inst.instruction |= inst.operands[2].reg << 12;
6774 inst.instruction |= inst.operands[3].reg << 16;
6775 inst.instruction |= inst.operands[4].reg;
6776 inst.instruction |= inst.operands[5].imm << 5;
6777 }
6778
6779 static void
6780 do_cmp (void)
6781 {
6782 inst.instruction |= inst.operands[0].reg << 16;
6783 encode_arm_shifter_operand (1);
6784 }
6785
6786 /* Transfer between coprocessor and ARM registers.
6787 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6788 MRC2
6789 MCR{cond}
6790 MCR2
6791
6792 No special properties. */
6793
6794 static void
6795 do_co_reg (void)
6796 {
6797 inst.instruction |= inst.operands[0].reg << 8;
6798 inst.instruction |= inst.operands[1].imm << 21;
6799 inst.instruction |= inst.operands[2].reg << 12;
6800 inst.instruction |= inst.operands[3].reg << 16;
6801 inst.instruction |= inst.operands[4].reg;
6802 inst.instruction |= inst.operands[5].imm << 5;
6803 }
6804
6805 /* Transfer between coprocessor register and pair of ARM registers.
6806 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6807 MCRR2
6808 MRRC{cond}
6809 MRRC2
6810
6811 Two XScale instructions are special cases of these:
6812
6813 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6814 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6815
6816 Result unpredicatable if Rd or Rn is R15. */
6817
6818 static void
6819 do_co_reg2c (void)
6820 {
6821 inst.instruction |= inst.operands[0].reg << 8;
6822 inst.instruction |= inst.operands[1].imm << 4;
6823 inst.instruction |= inst.operands[2].reg << 12;
6824 inst.instruction |= inst.operands[3].reg << 16;
6825 inst.instruction |= inst.operands[4].reg;
6826 }
6827
6828 static void
6829 do_cpsi (void)
6830 {
6831 inst.instruction |= inst.operands[0].imm << 6;
6832 if (inst.operands[1].present)
6833 {
6834 inst.instruction |= CPSI_MMOD;
6835 inst.instruction |= inst.operands[1].imm;
6836 }
6837 }
6838
6839 static void
6840 do_dbg (void)
6841 {
6842 inst.instruction |= inst.operands[0].imm;
6843 }
6844
6845 static void
6846 do_it (void)
6847 {
6848 /* There is no IT instruction in ARM mode. We
6849 process it but do not generate code for it. */
6850 inst.size = 0;
6851 }
6852
6853 static void
6854 do_ldmstm (void)
6855 {
6856 int base_reg = inst.operands[0].reg;
6857 int range = inst.operands[1].imm;
6858
6859 inst.instruction |= base_reg << 16;
6860 inst.instruction |= range;
6861
6862 if (inst.operands[1].writeback)
6863 inst.instruction |= LDM_TYPE_2_OR_3;
6864
6865 if (inst.operands[0].writeback)
6866 {
6867 inst.instruction |= WRITE_BACK;
6868 /* Check for unpredictable uses of writeback. */
6869 if (inst.instruction & LOAD_BIT)
6870 {
6871 /* Not allowed in LDM type 2. */
6872 if ((inst.instruction & LDM_TYPE_2_OR_3)
6873 && ((range & (1 << REG_PC)) == 0))
6874 as_warn (_("writeback of base register is UNPREDICTABLE"));
6875 /* Only allowed if base reg not in list for other types. */
6876 else if (range & (1 << base_reg))
6877 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6878 }
6879 else /* STM. */
6880 {
6881 /* Not allowed for type 2. */
6882 if (inst.instruction & LDM_TYPE_2_OR_3)
6883 as_warn (_("writeback of base register is UNPREDICTABLE"));
6884 /* Only allowed if base reg not in list, or first in list. */
6885 else if ((range & (1 << base_reg))
6886 && (range & ((1 << base_reg) - 1)))
6887 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6888 }
6889 }
6890 }
6891
6892 /* ARMv5TE load-consecutive (argument parse)
6893 Mode is like LDRH.
6894
6895 LDRccD R, mode
6896 STRccD R, mode. */
6897
6898 static void
6899 do_ldrd (void)
6900 {
6901 constraint (inst.operands[0].reg % 2 != 0,
6902 _("first destination register must be even"));
6903 constraint (inst.operands[1].present
6904 && inst.operands[1].reg != inst.operands[0].reg + 1,
6905 _("can only load two consecutive registers"));
6906 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6907 constraint (!inst.operands[2].isreg, _("'[' expected"));
6908
6909 if (!inst.operands[1].present)
6910 inst.operands[1].reg = inst.operands[0].reg + 1;
6911
6912 if (inst.instruction & LOAD_BIT)
6913 {
6914 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6915 register and the first register written; we have to diagnose
6916 overlap between the base and the second register written here. */
6917
6918 if (inst.operands[2].reg == inst.operands[1].reg
6919 && (inst.operands[2].writeback || inst.operands[2].postind))
6920 as_warn (_("base register written back, and overlaps "
6921 "second destination register"));
6922
6923 /* For an index-register load, the index register must not overlap the
6924 destination (even if not write-back). */
6925 else if (inst.operands[2].immisreg
6926 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6927 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6928 as_warn (_("index register overlaps destination register"));
6929 }
6930
6931 inst.instruction |= inst.operands[0].reg << 12;
6932 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6933 }
6934
6935 static void
6936 do_ldrex (void)
6937 {
6938 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6939 || inst.operands[1].postind || inst.operands[1].writeback
6940 || inst.operands[1].immisreg || inst.operands[1].shifted
6941 || inst.operands[1].negative
6942 /* This can arise if the programmer has written
6943 strex rN, rM, foo
6944 or if they have mistakenly used a register name as the last
6945 operand, eg:
6946 strex rN, rM, rX
6947 It is very difficult to distinguish between these two cases
6948 because "rX" might actually be a label. ie the register
6949 name has been occluded by a symbol of the same name. So we
6950 just generate a general 'bad addressing mode' type error
6951 message and leave it up to the programmer to discover the
6952 true cause and fix their mistake. */
6953 || (inst.operands[1].reg == REG_PC),
6954 BAD_ADDR_MODE);
6955
6956 constraint (inst.reloc.exp.X_op != O_constant
6957 || inst.reloc.exp.X_add_number != 0,
6958 _("offset must be zero in ARM encoding"));
6959
6960 inst.instruction |= inst.operands[0].reg << 12;
6961 inst.instruction |= inst.operands[1].reg << 16;
6962 inst.reloc.type = BFD_RELOC_UNUSED;
6963 }
6964
6965 static void
6966 do_ldrexd (void)
6967 {
6968 constraint (inst.operands[0].reg % 2 != 0,
6969 _("even register required"));
6970 constraint (inst.operands[1].present
6971 && inst.operands[1].reg != inst.operands[0].reg + 1,
6972 _("can only load two consecutive registers"));
6973 /* If op 1 were present and equal to PC, this function wouldn't
6974 have been called in the first place. */
6975 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6976
6977 inst.instruction |= inst.operands[0].reg << 12;
6978 inst.instruction |= inst.operands[2].reg << 16;
6979 }
6980
6981 static void
6982 do_ldst (void)
6983 {
6984 inst.instruction |= inst.operands[0].reg << 12;
6985 if (!inst.operands[1].isreg)
6986 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6987 return;
6988 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6989 }
6990
6991 static void
6992 do_ldstt (void)
6993 {
6994 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6995 reject [Rn,...]. */
6996 if (inst.operands[1].preind)
6997 {
6998 constraint (inst.reloc.exp.X_op != O_constant ||
6999 inst.reloc.exp.X_add_number != 0,
7000 _("this instruction requires a post-indexed address"));
7001
7002 inst.operands[1].preind = 0;
7003 inst.operands[1].postind = 1;
7004 inst.operands[1].writeback = 1;
7005 }
7006 inst.instruction |= inst.operands[0].reg << 12;
7007 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7008 }
7009
7010 /* Halfword and signed-byte load/store operations. */
7011
7012 static void
7013 do_ldstv4 (void)
7014 {
7015 inst.instruction |= inst.operands[0].reg << 12;
7016 if (!inst.operands[1].isreg)
7017 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7018 return;
7019 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7020 }
7021
7022 static void
7023 do_ldsttv4 (void)
7024 {
7025 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7026 reject [Rn,...]. */
7027 if (inst.operands[1].preind)
7028 {
7029 constraint (inst.reloc.exp.X_op != O_constant ||
7030 inst.reloc.exp.X_add_number != 0,
7031 _("this instruction requires a post-indexed address"));
7032
7033 inst.operands[1].preind = 0;
7034 inst.operands[1].postind = 1;
7035 inst.operands[1].writeback = 1;
7036 }
7037 inst.instruction |= inst.operands[0].reg << 12;
7038 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7039 }
7040
7041 /* Co-processor register load/store.
7042 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7043 static void
7044 do_lstc (void)
7045 {
7046 inst.instruction |= inst.operands[0].reg << 8;
7047 inst.instruction |= inst.operands[1].reg << 12;
7048 encode_arm_cp_address (2, TRUE, TRUE, 0);
7049 }
7050
7051 static void
7052 do_mlas (void)
7053 {
7054 /* This restriction does not apply to mls (nor to mla in v6, but
7055 that's hard to detect at present). */
7056 if (inst.operands[0].reg == inst.operands[1].reg
7057 && !(inst.instruction & 0x00400000))
7058 as_tsktsk (_("rd and rm should be different in mla"));
7059
7060 inst.instruction |= inst.operands[0].reg << 16;
7061 inst.instruction |= inst.operands[1].reg;
7062 inst.instruction |= inst.operands[2].reg << 8;
7063 inst.instruction |= inst.operands[3].reg << 12;
7064
7065 }
7066
7067 static void
7068 do_mov (void)
7069 {
7070 inst.instruction |= inst.operands[0].reg << 12;
7071 encode_arm_shifter_operand (1);
7072 }
7073
7074 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7075 static void
7076 do_mov16 (void)
7077 {
7078 bfd_vma imm;
7079 bfd_boolean top;
7080
7081 top = (inst.instruction & 0x00400000) != 0;
7082 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7083 _(":lower16: not allowed this instruction"));
7084 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7085 _(":upper16: not allowed instruction"));
7086 inst.instruction |= inst.operands[0].reg << 12;
7087 if (inst.reloc.type == BFD_RELOC_UNUSED)
7088 {
7089 imm = inst.reloc.exp.X_add_number;
7090 /* The value is in two pieces: 0:11, 16:19. */
7091 inst.instruction |= (imm & 0x00000fff);
7092 inst.instruction |= (imm & 0x0000f000) << 4;
7093 }
7094 }
7095
7096 static void do_vfp_nsyn_opcode (const char *);
7097
7098 static int
7099 do_vfp_nsyn_mrs (void)
7100 {
7101 if (inst.operands[0].isvec)
7102 {
7103 if (inst.operands[1].reg != 1)
7104 first_error (_("operand 1 must be FPSCR"));
7105 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7106 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7107 do_vfp_nsyn_opcode ("fmstat");
7108 }
7109 else if (inst.operands[1].isvec)
7110 do_vfp_nsyn_opcode ("fmrx");
7111 else
7112 return FAIL;
7113
7114 return SUCCESS;
7115 }
7116
7117 static int
7118 do_vfp_nsyn_msr (void)
7119 {
7120 if (inst.operands[0].isvec)
7121 do_vfp_nsyn_opcode ("fmxr");
7122 else
7123 return FAIL;
7124
7125 return SUCCESS;
7126 }
7127
7128 static void
7129 do_mrs (void)
7130 {
7131 if (do_vfp_nsyn_mrs () == SUCCESS)
7132 return;
7133
7134 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7135 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7136 != (PSR_c|PSR_f),
7137 _("'CPSR' or 'SPSR' expected"));
7138 inst.instruction |= inst.operands[0].reg << 12;
7139 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7140 }
7141
7142 /* Two possible forms:
7143 "{C|S}PSR_<field>, Rm",
7144 "{C|S}PSR_f, #expression". */
7145
7146 static void
7147 do_msr (void)
7148 {
7149 if (do_vfp_nsyn_msr () == SUCCESS)
7150 return;
7151
7152 inst.instruction |= inst.operands[0].imm;
7153 if (inst.operands[1].isreg)
7154 inst.instruction |= inst.operands[1].reg;
7155 else
7156 {
7157 inst.instruction |= INST_IMMEDIATE;
7158 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7159 inst.reloc.pc_rel = 0;
7160 }
7161 }
7162
7163 static void
7164 do_mul (void)
7165 {
7166 if (!inst.operands[2].present)
7167 inst.operands[2].reg = inst.operands[0].reg;
7168 inst.instruction |= inst.operands[0].reg << 16;
7169 inst.instruction |= inst.operands[1].reg;
7170 inst.instruction |= inst.operands[2].reg << 8;
7171
7172 if (inst.operands[0].reg == inst.operands[1].reg)
7173 as_tsktsk (_("rd and rm should be different in mul"));
7174 }
7175
7176 /* Long Multiply Parser
7177 UMULL RdLo, RdHi, Rm, Rs
7178 SMULL RdLo, RdHi, Rm, Rs
7179 UMLAL RdLo, RdHi, Rm, Rs
7180 SMLAL RdLo, RdHi, Rm, Rs. */
7181
7182 static void
7183 do_mull (void)
7184 {
7185 inst.instruction |= inst.operands[0].reg << 12;
7186 inst.instruction |= inst.operands[1].reg << 16;
7187 inst.instruction |= inst.operands[2].reg;
7188 inst.instruction |= inst.operands[3].reg << 8;
7189
7190 /* rdhi, rdlo and rm must all be different. */
7191 if (inst.operands[0].reg == inst.operands[1].reg
7192 || inst.operands[0].reg == inst.operands[2].reg
7193 || inst.operands[1].reg == inst.operands[2].reg)
7194 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7195 }
7196
7197 static void
7198 do_nop (void)
7199 {
7200 if (inst.operands[0].present)
7201 {
7202 /* Architectural NOP hints are CPSR sets with no bits selected. */
7203 inst.instruction &= 0xf0000000;
7204 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7205 }
7206 }
7207
7208 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7209 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7210 Condition defaults to COND_ALWAYS.
7211 Error if Rd, Rn or Rm are R15. */
7212
7213 static void
7214 do_pkhbt (void)
7215 {
7216 inst.instruction |= inst.operands[0].reg << 12;
7217 inst.instruction |= inst.operands[1].reg << 16;
7218 inst.instruction |= inst.operands[2].reg;
7219 if (inst.operands[3].present)
7220 encode_arm_shift (3);
7221 }
7222
7223 /* ARM V6 PKHTB (Argument Parse). */
7224
7225 static void
7226 do_pkhtb (void)
7227 {
7228 if (!inst.operands[3].present)
7229 {
7230 /* If the shift specifier is omitted, turn the instruction
7231 into pkhbt rd, rm, rn. */
7232 inst.instruction &= 0xfff00010;
7233 inst.instruction |= inst.operands[0].reg << 12;
7234 inst.instruction |= inst.operands[1].reg;
7235 inst.instruction |= inst.operands[2].reg << 16;
7236 }
7237 else
7238 {
7239 inst.instruction |= inst.operands[0].reg << 12;
7240 inst.instruction |= inst.operands[1].reg << 16;
7241 inst.instruction |= inst.operands[2].reg;
7242 encode_arm_shift (3);
7243 }
7244 }
7245
7246 /* ARMv5TE: Preload-Cache
7247
7248 PLD <addr_mode>
7249
7250 Syntactically, like LDR with B=1, W=0, L=1. */
7251
7252 static void
7253 do_pld (void)
7254 {
7255 constraint (!inst.operands[0].isreg,
7256 _("'[' expected after PLD mnemonic"));
7257 constraint (inst.operands[0].postind,
7258 _("post-indexed expression used in preload instruction"));
7259 constraint (inst.operands[0].writeback,
7260 _("writeback used in preload instruction"));
7261 constraint (!inst.operands[0].preind,
7262 _("unindexed addressing used in preload instruction"));
7263 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7264 }
7265
7266 /* ARMv7: PLI <addr_mode> */
7267 static void
7268 do_pli (void)
7269 {
7270 constraint (!inst.operands[0].isreg,
7271 _("'[' expected after PLI mnemonic"));
7272 constraint (inst.operands[0].postind,
7273 _("post-indexed expression used in preload instruction"));
7274 constraint (inst.operands[0].writeback,
7275 _("writeback used in preload instruction"));
7276 constraint (!inst.operands[0].preind,
7277 _("unindexed addressing used in preload instruction"));
7278 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7279 inst.instruction &= ~PRE_INDEX;
7280 }
7281
7282 static void
7283 do_push_pop (void)
7284 {
7285 inst.operands[1] = inst.operands[0];
7286 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7287 inst.operands[0].isreg = 1;
7288 inst.operands[0].writeback = 1;
7289 inst.operands[0].reg = REG_SP;
7290 do_ldmstm ();
7291 }
7292
7293 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7294 word at the specified address and the following word
7295 respectively.
7296 Unconditionally executed.
7297 Error if Rn is R15. */
7298
7299 static void
7300 do_rfe (void)
7301 {
7302 inst.instruction |= inst.operands[0].reg << 16;
7303 if (inst.operands[0].writeback)
7304 inst.instruction |= WRITE_BACK;
7305 }
7306
7307 /* ARM V6 ssat (argument parse). */
7308
7309 static void
7310 do_ssat (void)
7311 {
7312 inst.instruction |= inst.operands[0].reg << 12;
7313 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7314 inst.instruction |= inst.operands[2].reg;
7315
7316 if (inst.operands[3].present)
7317 encode_arm_shift (3);
7318 }
7319
7320 /* ARM V6 usat (argument parse). */
7321
7322 static void
7323 do_usat (void)
7324 {
7325 inst.instruction |= inst.operands[0].reg << 12;
7326 inst.instruction |= inst.operands[1].imm << 16;
7327 inst.instruction |= inst.operands[2].reg;
7328
7329 if (inst.operands[3].present)
7330 encode_arm_shift (3);
7331 }
7332
7333 /* ARM V6 ssat16 (argument parse). */
7334
7335 static void
7336 do_ssat16 (void)
7337 {
7338 inst.instruction |= inst.operands[0].reg << 12;
7339 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7340 inst.instruction |= inst.operands[2].reg;
7341 }
7342
7343 static void
7344 do_usat16 (void)
7345 {
7346 inst.instruction |= inst.operands[0].reg << 12;
7347 inst.instruction |= inst.operands[1].imm << 16;
7348 inst.instruction |= inst.operands[2].reg;
7349 }
7350
7351 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7352 preserving the other bits.
7353
7354 setend <endian_specifier>, where <endian_specifier> is either
7355 BE or LE. */
7356
7357 static void
7358 do_setend (void)
7359 {
7360 if (inst.operands[0].imm)
7361 inst.instruction |= 0x200;
7362 }
7363
7364 static void
7365 do_shift (void)
7366 {
7367 unsigned int Rm = (inst.operands[1].present
7368 ? inst.operands[1].reg
7369 : inst.operands[0].reg);
7370
7371 inst.instruction |= inst.operands[0].reg << 12;
7372 inst.instruction |= Rm;
7373 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7374 {
7375 inst.instruction |= inst.operands[2].reg << 8;
7376 inst.instruction |= SHIFT_BY_REG;
7377 }
7378 else
7379 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7380 }
7381
7382 static void
7383 do_smc (void)
7384 {
7385 inst.reloc.type = BFD_RELOC_ARM_SMC;
7386 inst.reloc.pc_rel = 0;
7387 }
7388
7389 static void
7390 do_swi (void)
7391 {
7392 inst.reloc.type = BFD_RELOC_ARM_SWI;
7393 inst.reloc.pc_rel = 0;
7394 }
7395
7396 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7397 SMLAxy{cond} Rd,Rm,Rs,Rn
7398 SMLAWy{cond} Rd,Rm,Rs,Rn
7399 Error if any register is R15. */
7400
7401 static void
7402 do_smla (void)
7403 {
7404 inst.instruction |= inst.operands[0].reg << 16;
7405 inst.instruction |= inst.operands[1].reg;
7406 inst.instruction |= inst.operands[2].reg << 8;
7407 inst.instruction |= inst.operands[3].reg << 12;
7408 }
7409
7410 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7411 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7412 Error if any register is R15.
7413 Warning if Rdlo == Rdhi. */
7414
7415 static void
7416 do_smlal (void)
7417 {
7418 inst.instruction |= inst.operands[0].reg << 12;
7419 inst.instruction |= inst.operands[1].reg << 16;
7420 inst.instruction |= inst.operands[2].reg;
7421 inst.instruction |= inst.operands[3].reg << 8;
7422
7423 if (inst.operands[0].reg == inst.operands[1].reg)
7424 as_tsktsk (_("rdhi and rdlo must be different"));
7425 }
7426
7427 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7428 SMULxy{cond} Rd,Rm,Rs
7429 Error if any register is R15. */
7430
7431 static void
7432 do_smul (void)
7433 {
7434 inst.instruction |= inst.operands[0].reg << 16;
7435 inst.instruction |= inst.operands[1].reg;
7436 inst.instruction |= inst.operands[2].reg << 8;
7437 }
7438
7439 /* ARM V6 srs (argument parse). */
7440
7441 static void
7442 do_srs (void)
7443 {
7444 inst.instruction |= inst.operands[0].imm;
7445 if (inst.operands[0].writeback)
7446 inst.instruction |= WRITE_BACK;
7447 }
7448
7449 /* ARM V6 strex (argument parse). */
7450
7451 static void
7452 do_strex (void)
7453 {
7454 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7455 || inst.operands[2].postind || inst.operands[2].writeback
7456 || inst.operands[2].immisreg || inst.operands[2].shifted
7457 || inst.operands[2].negative
7458 /* See comment in do_ldrex(). */
7459 || (inst.operands[2].reg == REG_PC),
7460 BAD_ADDR_MODE);
7461
7462 constraint (inst.operands[0].reg == inst.operands[1].reg
7463 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7464
7465 constraint (inst.reloc.exp.X_op != O_constant
7466 || inst.reloc.exp.X_add_number != 0,
7467 _("offset must be zero in ARM encoding"));
7468
7469 inst.instruction |= inst.operands[0].reg << 12;
7470 inst.instruction |= inst.operands[1].reg;
7471 inst.instruction |= inst.operands[2].reg << 16;
7472 inst.reloc.type = BFD_RELOC_UNUSED;
7473 }
7474
7475 static void
7476 do_strexd (void)
7477 {
7478 constraint (inst.operands[1].reg % 2 != 0,
7479 _("even register required"));
7480 constraint (inst.operands[2].present
7481 && inst.operands[2].reg != inst.operands[1].reg + 1,
7482 _("can only store two consecutive registers"));
7483 /* If op 2 were present and equal to PC, this function wouldn't
7484 have been called in the first place. */
7485 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7486
7487 constraint (inst.operands[0].reg == inst.operands[1].reg
7488 || inst.operands[0].reg == inst.operands[1].reg + 1
7489 || inst.operands[0].reg == inst.operands[3].reg,
7490 BAD_OVERLAP);
7491
7492 inst.instruction |= inst.operands[0].reg << 12;
7493 inst.instruction |= inst.operands[1].reg;
7494 inst.instruction |= inst.operands[3].reg << 16;
7495 }
7496
7497 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7498 extends it to 32-bits, and adds the result to a value in another
7499 register. You can specify a rotation by 0, 8, 16, or 24 bits
7500 before extracting the 16-bit value.
7501 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7502 Condition defaults to COND_ALWAYS.
7503 Error if any register uses R15. */
7504
7505 static void
7506 do_sxtah (void)
7507 {
7508 inst.instruction |= inst.operands[0].reg << 12;
7509 inst.instruction |= inst.operands[1].reg << 16;
7510 inst.instruction |= inst.operands[2].reg;
7511 inst.instruction |= inst.operands[3].imm << 10;
7512 }
7513
7514 /* ARM V6 SXTH.
7515
7516 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7517 Condition defaults to COND_ALWAYS.
7518 Error if any register uses R15. */
7519
7520 static void
7521 do_sxth (void)
7522 {
7523 inst.instruction |= inst.operands[0].reg << 12;
7524 inst.instruction |= inst.operands[1].reg;
7525 inst.instruction |= inst.operands[2].imm << 10;
7526 }
7527 \f
7528 /* VFP instructions. In a logical order: SP variant first, monad
7529 before dyad, arithmetic then move then load/store. */
7530
7531 static void
7532 do_vfp_sp_monadic (void)
7533 {
7534 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7535 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7536 }
7537
7538 static void
7539 do_vfp_sp_dyadic (void)
7540 {
7541 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7542 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7543 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7544 }
7545
7546 static void
7547 do_vfp_sp_compare_z (void)
7548 {
7549 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7550 }
7551
7552 static void
7553 do_vfp_dp_sp_cvt (void)
7554 {
7555 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7556 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7557 }
7558
7559 static void
7560 do_vfp_sp_dp_cvt (void)
7561 {
7562 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7563 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7564 }
7565
7566 static void
7567 do_vfp_reg_from_sp (void)
7568 {
7569 inst.instruction |= inst.operands[0].reg << 12;
7570 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7571 }
7572
7573 static void
7574 do_vfp_reg2_from_sp2 (void)
7575 {
7576 constraint (inst.operands[2].imm != 2,
7577 _("only two consecutive VFP SP registers allowed here"));
7578 inst.instruction |= inst.operands[0].reg << 12;
7579 inst.instruction |= inst.operands[1].reg << 16;
7580 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7581 }
7582
7583 static void
7584 do_vfp_sp_from_reg (void)
7585 {
7586 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7587 inst.instruction |= inst.operands[1].reg << 12;
7588 }
7589
7590 static void
7591 do_vfp_sp2_from_reg2 (void)
7592 {
7593 constraint (inst.operands[0].imm != 2,
7594 _("only two consecutive VFP SP registers allowed here"));
7595 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7596 inst.instruction |= inst.operands[1].reg << 12;
7597 inst.instruction |= inst.operands[2].reg << 16;
7598 }
7599
7600 static void
7601 do_vfp_sp_ldst (void)
7602 {
7603 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7604 encode_arm_cp_address (1, FALSE, TRUE, 0);
7605 }
7606
7607 static void
7608 do_vfp_dp_ldst (void)
7609 {
7610 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7611 encode_arm_cp_address (1, FALSE, TRUE, 0);
7612 }
7613
7614
7615 static void
7616 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7617 {
7618 if (inst.operands[0].writeback)
7619 inst.instruction |= WRITE_BACK;
7620 else
7621 constraint (ldstm_type != VFP_LDSTMIA,
7622 _("this addressing mode requires base-register writeback"));
7623 inst.instruction |= inst.operands[0].reg << 16;
7624 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7625 inst.instruction |= inst.operands[1].imm;
7626 }
7627
7628 static void
7629 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7630 {
7631 int count;
7632
7633 if (inst.operands[0].writeback)
7634 inst.instruction |= WRITE_BACK;
7635 else
7636 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7637 _("this addressing mode requires base-register writeback"));
7638
7639 inst.instruction |= inst.operands[0].reg << 16;
7640 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7641
7642 count = inst.operands[1].imm << 1;
7643 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7644 count += 1;
7645
7646 inst.instruction |= count;
7647 }
7648
7649 static void
7650 do_vfp_sp_ldstmia (void)
7651 {
7652 vfp_sp_ldstm (VFP_LDSTMIA);
7653 }
7654
7655 static void
7656 do_vfp_sp_ldstmdb (void)
7657 {
7658 vfp_sp_ldstm (VFP_LDSTMDB);
7659 }
7660
7661 static void
7662 do_vfp_dp_ldstmia (void)
7663 {
7664 vfp_dp_ldstm (VFP_LDSTMIA);
7665 }
7666
7667 static void
7668 do_vfp_dp_ldstmdb (void)
7669 {
7670 vfp_dp_ldstm (VFP_LDSTMDB);
7671 }
7672
7673 static void
7674 do_vfp_xp_ldstmia (void)
7675 {
7676 vfp_dp_ldstm (VFP_LDSTMIAX);
7677 }
7678
7679 static void
7680 do_vfp_xp_ldstmdb (void)
7681 {
7682 vfp_dp_ldstm (VFP_LDSTMDBX);
7683 }
7684
7685 static void
7686 do_vfp_dp_rd_rm (void)
7687 {
7688 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7689 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7690 }
7691
7692 static void
7693 do_vfp_dp_rn_rd (void)
7694 {
7695 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7696 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7697 }
7698
7699 static void
7700 do_vfp_dp_rd_rn (void)
7701 {
7702 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7703 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7704 }
7705
7706 static void
7707 do_vfp_dp_rd_rn_rm (void)
7708 {
7709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7710 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7711 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7712 }
7713
7714 static void
7715 do_vfp_dp_rd (void)
7716 {
7717 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7718 }
7719
7720 static void
7721 do_vfp_dp_rm_rd_rn (void)
7722 {
7723 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7724 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7725 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7726 }
7727
7728 /* VFPv3 instructions. */
7729 static void
7730 do_vfp_sp_const (void)
7731 {
7732 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7733 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7734 inst.instruction |= (inst.operands[1].imm & 0x0f);
7735 }
7736
7737 static void
7738 do_vfp_dp_const (void)
7739 {
7740 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7741 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7742 inst.instruction |= (inst.operands[1].imm & 0x0f);
7743 }
7744
7745 static void
7746 vfp_conv (int srcsize)
7747 {
7748 unsigned immbits = srcsize - inst.operands[1].imm;
7749 inst.instruction |= (immbits & 1) << 5;
7750 inst.instruction |= (immbits >> 1);
7751 }
7752
7753 static void
7754 do_vfp_sp_conv_16 (void)
7755 {
7756 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7757 vfp_conv (16);
7758 }
7759
7760 static void
7761 do_vfp_dp_conv_16 (void)
7762 {
7763 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7764 vfp_conv (16);
7765 }
7766
7767 static void
7768 do_vfp_sp_conv_32 (void)
7769 {
7770 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7771 vfp_conv (32);
7772 }
7773
7774 static void
7775 do_vfp_dp_conv_32 (void)
7776 {
7777 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7778 vfp_conv (32);
7779 }
7780
7781 \f
7782 /* FPA instructions. Also in a logical order. */
7783
7784 static void
7785 do_fpa_cmp (void)
7786 {
7787 inst.instruction |= inst.operands[0].reg << 16;
7788 inst.instruction |= inst.operands[1].reg;
7789 }
7790
7791 static void
7792 do_fpa_ldmstm (void)
7793 {
7794 inst.instruction |= inst.operands[0].reg << 12;
7795 switch (inst.operands[1].imm)
7796 {
7797 case 1: inst.instruction |= CP_T_X; break;
7798 case 2: inst.instruction |= CP_T_Y; break;
7799 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7800 case 4: break;
7801 default: abort ();
7802 }
7803
7804 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7805 {
7806 /* The instruction specified "ea" or "fd", so we can only accept
7807 [Rn]{!}. The instruction does not really support stacking or
7808 unstacking, so we have to emulate these by setting appropriate
7809 bits and offsets. */
7810 constraint (inst.reloc.exp.X_op != O_constant
7811 || inst.reloc.exp.X_add_number != 0,
7812 _("this instruction does not support indexing"));
7813
7814 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7815 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7816
7817 if (!(inst.instruction & INDEX_UP))
7818 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7819
7820 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7821 {
7822 inst.operands[2].preind = 0;
7823 inst.operands[2].postind = 1;
7824 }
7825 }
7826
7827 encode_arm_cp_address (2, TRUE, TRUE, 0);
7828 }
7829
7830 \f
7831 /* iWMMXt instructions: strictly in alphabetical order. */
7832
7833 static void
7834 do_iwmmxt_tandorc (void)
7835 {
7836 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7837 }
7838
7839 static void
7840 do_iwmmxt_textrc (void)
7841 {
7842 inst.instruction |= inst.operands[0].reg << 12;
7843 inst.instruction |= inst.operands[1].imm;
7844 }
7845
7846 static void
7847 do_iwmmxt_textrm (void)
7848 {
7849 inst.instruction |= inst.operands[0].reg << 12;
7850 inst.instruction |= inst.operands[1].reg << 16;
7851 inst.instruction |= inst.operands[2].imm;
7852 }
7853
7854 static void
7855 do_iwmmxt_tinsr (void)
7856 {
7857 inst.instruction |= inst.operands[0].reg << 16;
7858 inst.instruction |= inst.operands[1].reg << 12;
7859 inst.instruction |= inst.operands[2].imm;
7860 }
7861
7862 static void
7863 do_iwmmxt_tmia (void)
7864 {
7865 inst.instruction |= inst.operands[0].reg << 5;
7866 inst.instruction |= inst.operands[1].reg;
7867 inst.instruction |= inst.operands[2].reg << 12;
7868 }
7869
7870 static void
7871 do_iwmmxt_waligni (void)
7872 {
7873 inst.instruction |= inst.operands[0].reg << 12;
7874 inst.instruction |= inst.operands[1].reg << 16;
7875 inst.instruction |= inst.operands[2].reg;
7876 inst.instruction |= inst.operands[3].imm << 20;
7877 }
7878
7879 static void
7880 do_iwmmxt_wmerge (void)
7881 {
7882 inst.instruction |= inst.operands[0].reg << 12;
7883 inst.instruction |= inst.operands[1].reg << 16;
7884 inst.instruction |= inst.operands[2].reg;
7885 inst.instruction |= inst.operands[3].imm << 21;
7886 }
7887
7888 static void
7889 do_iwmmxt_wmov (void)
7890 {
7891 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7892 inst.instruction |= inst.operands[0].reg << 12;
7893 inst.instruction |= inst.operands[1].reg << 16;
7894 inst.instruction |= inst.operands[1].reg;
7895 }
7896
7897 static void
7898 do_iwmmxt_wldstbh (void)
7899 {
7900 int reloc;
7901 inst.instruction |= inst.operands[0].reg << 12;
7902 if (thumb_mode)
7903 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7904 else
7905 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7906 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7907 }
7908
7909 static void
7910 do_iwmmxt_wldstw (void)
7911 {
7912 /* RIWR_RIWC clears .isreg for a control register. */
7913 if (!inst.operands[0].isreg)
7914 {
7915 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7916 inst.instruction |= 0xf0000000;
7917 }
7918
7919 inst.instruction |= inst.operands[0].reg << 12;
7920 encode_arm_cp_address (1, TRUE, TRUE, 0);
7921 }
7922
7923 static void
7924 do_iwmmxt_wldstd (void)
7925 {
7926 inst.instruction |= inst.operands[0].reg << 12;
7927 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7928 && inst.operands[1].immisreg)
7929 {
7930 inst.instruction &= ~0x1a000ff;
7931 inst.instruction |= (0xf << 28);
7932 if (inst.operands[1].preind)
7933 inst.instruction |= PRE_INDEX;
7934 if (!inst.operands[1].negative)
7935 inst.instruction |= INDEX_UP;
7936 if (inst.operands[1].writeback)
7937 inst.instruction |= WRITE_BACK;
7938 inst.instruction |= inst.operands[1].reg << 16;
7939 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7940 inst.instruction |= inst.operands[1].imm;
7941 }
7942 else
7943 encode_arm_cp_address (1, TRUE, FALSE, 0);
7944 }
7945
7946 static void
7947 do_iwmmxt_wshufh (void)
7948 {
7949 inst.instruction |= inst.operands[0].reg << 12;
7950 inst.instruction |= inst.operands[1].reg << 16;
7951 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7952 inst.instruction |= (inst.operands[2].imm & 0x0f);
7953 }
7954
7955 static void
7956 do_iwmmxt_wzero (void)
7957 {
7958 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7959 inst.instruction |= inst.operands[0].reg;
7960 inst.instruction |= inst.operands[0].reg << 12;
7961 inst.instruction |= inst.operands[0].reg << 16;
7962 }
7963
7964 static void
7965 do_iwmmxt_wrwrwr_or_imm5 (void)
7966 {
7967 if (inst.operands[2].isreg)
7968 do_rd_rn_rm ();
7969 else {
7970 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
7971 _("immediate operand requires iWMMXt2"));
7972 do_rd_rn ();
7973 if (inst.operands[2].imm == 0)
7974 {
7975 switch ((inst.instruction >> 20) & 0xf)
7976 {
7977 case 4:
7978 case 5:
7979 case 6:
7980 case 7:
7981 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7982 inst.operands[2].imm = 16;
7983 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
7984 break;
7985 case 8:
7986 case 9:
7987 case 10:
7988 case 11:
7989 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7990 inst.operands[2].imm = 32;
7991 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
7992 break;
7993 case 12:
7994 case 13:
7995 case 14:
7996 case 15:
7997 {
7998 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7999 unsigned long wrn;
8000 wrn = (inst.instruction >> 16) & 0xf;
8001 inst.instruction &= 0xff0fff0f;
8002 inst.instruction |= wrn;
8003 /* Bail out here; the instruction is now assembled. */
8004 return;
8005 }
8006 }
8007 }
8008 /* Map 32 -> 0, etc. */
8009 inst.operands[2].imm &= 0x1f;
8010 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8011 }
8012 }
8013 \f
8014 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8015 operations first, then control, shift, and load/store. */
8016
8017 /* Insns like "foo X,Y,Z". */
8018
8019 static void
8020 do_mav_triple (void)
8021 {
8022 inst.instruction |= inst.operands[0].reg << 16;
8023 inst.instruction |= inst.operands[1].reg;
8024 inst.instruction |= inst.operands[2].reg << 12;
8025 }
8026
8027 /* Insns like "foo W,X,Y,Z".
8028 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8029
8030 static void
8031 do_mav_quad (void)
8032 {
8033 inst.instruction |= inst.operands[0].reg << 5;
8034 inst.instruction |= inst.operands[1].reg << 12;
8035 inst.instruction |= inst.operands[2].reg << 16;
8036 inst.instruction |= inst.operands[3].reg;
8037 }
8038
8039 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8040 static void
8041 do_mav_dspsc (void)
8042 {
8043 inst.instruction |= inst.operands[1].reg << 12;
8044 }
8045
8046 /* Maverick shift immediate instructions.
8047 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8048 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8049
8050 static void
8051 do_mav_shift (void)
8052 {
8053 int imm = inst.operands[2].imm;
8054
8055 inst.instruction |= inst.operands[0].reg << 12;
8056 inst.instruction |= inst.operands[1].reg << 16;
8057
8058 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8059 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8060 Bit 4 should be 0. */
8061 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8062
8063 inst.instruction |= imm;
8064 }
8065 \f
8066 /* XScale instructions. Also sorted arithmetic before move. */
8067
8068 /* Xscale multiply-accumulate (argument parse)
8069 MIAcc acc0,Rm,Rs
8070 MIAPHcc acc0,Rm,Rs
8071 MIAxycc acc0,Rm,Rs. */
8072
8073 static void
8074 do_xsc_mia (void)
8075 {
8076 inst.instruction |= inst.operands[1].reg;
8077 inst.instruction |= inst.operands[2].reg << 12;
8078 }
8079
8080 /* Xscale move-accumulator-register (argument parse)
8081
8082 MARcc acc0,RdLo,RdHi. */
8083
8084 static void
8085 do_xsc_mar (void)
8086 {
8087 inst.instruction |= inst.operands[1].reg << 12;
8088 inst.instruction |= inst.operands[2].reg << 16;
8089 }
8090
8091 /* Xscale move-register-accumulator (argument parse)
8092
8093 MRAcc RdLo,RdHi,acc0. */
8094
8095 static void
8096 do_xsc_mra (void)
8097 {
8098 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8099 inst.instruction |= inst.operands[0].reg << 12;
8100 inst.instruction |= inst.operands[1].reg << 16;
8101 }
8102 \f
8103 /* Encoding functions relevant only to Thumb. */
8104
8105 /* inst.operands[i] is a shifted-register operand; encode
8106 it into inst.instruction in the format used by Thumb32. */
8107
8108 static void
8109 encode_thumb32_shifted_operand (int i)
8110 {
8111 unsigned int value = inst.reloc.exp.X_add_number;
8112 unsigned int shift = inst.operands[i].shift_kind;
8113
8114 constraint (inst.operands[i].immisreg,
8115 _("shift by register not allowed in thumb mode"));
8116 inst.instruction |= inst.operands[i].reg;
8117 if (shift == SHIFT_RRX)
8118 inst.instruction |= SHIFT_ROR << 4;
8119 else
8120 {
8121 constraint (inst.reloc.exp.X_op != O_constant,
8122 _("expression too complex"));
8123
8124 constraint (value > 32
8125 || (value == 32 && (shift == SHIFT_LSL
8126 || shift == SHIFT_ROR)),
8127 _("shift expression is too large"));
8128
8129 if (value == 0)
8130 shift = SHIFT_LSL;
8131 else if (value == 32)
8132 value = 0;
8133
8134 inst.instruction |= shift << 4;
8135 inst.instruction |= (value & 0x1c) << 10;
8136 inst.instruction |= (value & 0x03) << 6;
8137 }
8138 }
8139
8140
8141 /* inst.operands[i] was set up by parse_address. Encode it into a
8142 Thumb32 format load or store instruction. Reject forms that cannot
8143 be used with such instructions. If is_t is true, reject forms that
8144 cannot be used with a T instruction; if is_d is true, reject forms
8145 that cannot be used with a D instruction. */
8146
8147 static void
8148 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8149 {
8150 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8151
8152 constraint (!inst.operands[i].isreg,
8153 _("Instruction does not support =N addresses"));
8154
8155 inst.instruction |= inst.operands[i].reg << 16;
8156 if (inst.operands[i].immisreg)
8157 {
8158 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8159 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8160 constraint (inst.operands[i].negative,
8161 _("Thumb does not support negative register indexing"));
8162 constraint (inst.operands[i].postind,
8163 _("Thumb does not support register post-indexing"));
8164 constraint (inst.operands[i].writeback,
8165 _("Thumb does not support register indexing with writeback"));
8166 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8167 _("Thumb supports only LSL in shifted register indexing"));
8168
8169 inst.instruction |= inst.operands[i].imm;
8170 if (inst.operands[i].shifted)
8171 {
8172 constraint (inst.reloc.exp.X_op != O_constant,
8173 _("expression too complex"));
8174 constraint (inst.reloc.exp.X_add_number < 0
8175 || inst.reloc.exp.X_add_number > 3,
8176 _("shift out of range"));
8177 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8178 }
8179 inst.reloc.type = BFD_RELOC_UNUSED;
8180 }
8181 else if (inst.operands[i].preind)
8182 {
8183 constraint (is_pc && inst.operands[i].writeback,
8184 _("cannot use writeback with PC-relative addressing"));
8185 constraint (is_t && inst.operands[i].writeback,
8186 _("cannot use writeback with this instruction"));
8187
8188 if (is_d)
8189 {
8190 inst.instruction |= 0x01000000;
8191 if (inst.operands[i].writeback)
8192 inst.instruction |= 0x00200000;
8193 }
8194 else
8195 {
8196 inst.instruction |= 0x00000c00;
8197 if (inst.operands[i].writeback)
8198 inst.instruction |= 0x00000100;
8199 }
8200 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8201 }
8202 else if (inst.operands[i].postind)
8203 {
8204 assert (inst.operands[i].writeback);
8205 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8206 constraint (is_t, _("cannot use post-indexing with this instruction"));
8207
8208 if (is_d)
8209 inst.instruction |= 0x00200000;
8210 else
8211 inst.instruction |= 0x00000900;
8212 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8213 }
8214 else /* unindexed - only for coprocessor */
8215 inst.error = _("instruction does not accept unindexed addressing");
8216 }
8217
8218 /* Table of Thumb instructions which exist in both 16- and 32-bit
8219 encodings (the latter only in post-V6T2 cores). The index is the
8220 value used in the insns table below. When there is more than one
8221 possible 16-bit encoding for the instruction, this table always
8222 holds variant (1).
8223 Also contains several pseudo-instructions used during relaxation. */
8224 #define T16_32_TAB \
8225 X(adc, 4140, eb400000), \
8226 X(adcs, 4140, eb500000), \
8227 X(add, 1c00, eb000000), \
8228 X(adds, 1c00, eb100000), \
8229 X(addi, 0000, f1000000), \
8230 X(addis, 0000, f1100000), \
8231 X(add_pc,000f, f20f0000), \
8232 X(add_sp,000d, f10d0000), \
8233 X(adr, 000f, f20f0000), \
8234 X(and, 4000, ea000000), \
8235 X(ands, 4000, ea100000), \
8236 X(asr, 1000, fa40f000), \
8237 X(asrs, 1000, fa50f000), \
8238 X(b, e000, f000b000), \
8239 X(bcond, d000, f0008000), \
8240 X(bic, 4380, ea200000), \
8241 X(bics, 4380, ea300000), \
8242 X(cmn, 42c0, eb100f00), \
8243 X(cmp, 2800, ebb00f00), \
8244 X(cpsie, b660, f3af8400), \
8245 X(cpsid, b670, f3af8600), \
8246 X(cpy, 4600, ea4f0000), \
8247 X(dec_sp,80dd, f1bd0d00), \
8248 X(eor, 4040, ea800000), \
8249 X(eors, 4040, ea900000), \
8250 X(inc_sp,00dd, f10d0d00), \
8251 X(ldmia, c800, e8900000), \
8252 X(ldr, 6800, f8500000), \
8253 X(ldrb, 7800, f8100000), \
8254 X(ldrh, 8800, f8300000), \
8255 X(ldrsb, 5600, f9100000), \
8256 X(ldrsh, 5e00, f9300000), \
8257 X(ldr_pc,4800, f85f0000), \
8258 X(ldr_pc2,4800, f85f0000), \
8259 X(ldr_sp,9800, f85d0000), \
8260 X(lsl, 0000, fa00f000), \
8261 X(lsls, 0000, fa10f000), \
8262 X(lsr, 0800, fa20f000), \
8263 X(lsrs, 0800, fa30f000), \
8264 X(mov, 2000, ea4f0000), \
8265 X(movs, 2000, ea5f0000), \
8266 X(mul, 4340, fb00f000), \
8267 X(muls, 4340, ffffffff), /* no 32b muls */ \
8268 X(mvn, 43c0, ea6f0000), \
8269 X(mvns, 43c0, ea7f0000), \
8270 X(neg, 4240, f1c00000), /* rsb #0 */ \
8271 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8272 X(orr, 4300, ea400000), \
8273 X(orrs, 4300, ea500000), \
8274 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8275 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8276 X(rev, ba00, fa90f080), \
8277 X(rev16, ba40, fa90f090), \
8278 X(revsh, bac0, fa90f0b0), \
8279 X(ror, 41c0, fa60f000), \
8280 X(rors, 41c0, fa70f000), \
8281 X(sbc, 4180, eb600000), \
8282 X(sbcs, 4180, eb700000), \
8283 X(stmia, c000, e8800000), \
8284 X(str, 6000, f8400000), \
8285 X(strb, 7000, f8000000), \
8286 X(strh, 8000, f8200000), \
8287 X(str_sp,9000, f84d0000), \
8288 X(sub, 1e00, eba00000), \
8289 X(subs, 1e00, ebb00000), \
8290 X(subi, 8000, f1a00000), \
8291 X(subis, 8000, f1b00000), \
8292 X(sxtb, b240, fa4ff080), \
8293 X(sxth, b200, fa0ff080), \
8294 X(tst, 4200, ea100f00), \
8295 X(uxtb, b2c0, fa5ff080), \
8296 X(uxth, b280, fa1ff080), \
8297 X(nop, bf00, f3af8000), \
8298 X(yield, bf10, f3af8001), \
8299 X(wfe, bf20, f3af8002), \
8300 X(wfi, bf30, f3af8003), \
8301 X(sev, bf40, f3af9004), /* typo, 8004? */
8302
8303 /* To catch errors in encoding functions, the codes are all offset by
8304 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8305 as 16-bit instructions. */
8306 #define X(a,b,c) T_MNEM_##a
8307 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8308 #undef X
8309
8310 #define X(a,b,c) 0x##b
8311 static const unsigned short thumb_op16[] = { T16_32_TAB };
8312 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8313 #undef X
8314
8315 #define X(a,b,c) 0x##c
8316 static const unsigned int thumb_op32[] = { T16_32_TAB };
8317 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8318 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8319 #undef X
8320 #undef T16_32_TAB
8321
8322 /* Thumb instruction encoders, in alphabetical order. */
8323
8324 /* ADDW or SUBW. */
8325 static void
8326 do_t_add_sub_w (void)
8327 {
8328 int Rd, Rn;
8329
8330 Rd = inst.operands[0].reg;
8331 Rn = inst.operands[1].reg;
8332
8333 constraint (Rd == 15, _("PC not allowed as destination"));
8334 inst.instruction |= (Rn << 16) | (Rd << 8);
8335 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8336 }
8337
8338 /* Parse an add or subtract instruction. We get here with inst.instruction
8339 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8340
8341 static void
8342 do_t_add_sub (void)
8343 {
8344 int Rd, Rs, Rn;
8345
8346 Rd = inst.operands[0].reg;
8347 Rs = (inst.operands[1].present
8348 ? inst.operands[1].reg /* Rd, Rs, foo */
8349 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8350
8351 if (unified_syntax)
8352 {
8353 bfd_boolean flags;
8354 bfd_boolean narrow;
8355 int opcode;
8356
8357 flags = (inst.instruction == T_MNEM_adds
8358 || inst.instruction == T_MNEM_subs);
8359 if (flags)
8360 narrow = (current_it_mask == 0);
8361 else
8362 narrow = (current_it_mask != 0);
8363 if (!inst.operands[2].isreg)
8364 {
8365 int add;
8366
8367 add = (inst.instruction == T_MNEM_add
8368 || inst.instruction == T_MNEM_adds);
8369 opcode = 0;
8370 if (inst.size_req != 4)
8371 {
8372 /* Attempt to use a narrow opcode, with relaxation if
8373 appropriate. */
8374 if (Rd == REG_SP && Rs == REG_SP && !flags)
8375 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8376 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8377 opcode = T_MNEM_add_sp;
8378 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8379 opcode = T_MNEM_add_pc;
8380 else if (Rd <= 7 && Rs <= 7 && narrow)
8381 {
8382 if (flags)
8383 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8384 else
8385 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8386 }
8387 if (opcode)
8388 {
8389 inst.instruction = THUMB_OP16(opcode);
8390 inst.instruction |= (Rd << 4) | Rs;
8391 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8392 if (inst.size_req != 2)
8393 inst.relax = opcode;
8394 }
8395 else
8396 constraint (inst.size_req == 2, BAD_HIREG);
8397 }
8398 if (inst.size_req == 4
8399 || (inst.size_req != 2 && !opcode))
8400 {
8401 if (Rs == REG_PC)
8402 {
8403 /* Always use addw/subw. */
8404 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8405 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8406 }
8407 else
8408 {
8409 inst.instruction = THUMB_OP32 (inst.instruction);
8410 inst.instruction = (inst.instruction & 0xe1ffffff)
8411 | 0x10000000;
8412 if (flags)
8413 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8414 else
8415 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8416 }
8417 inst.instruction |= Rd << 8;
8418 inst.instruction |= Rs << 16;
8419 }
8420 }
8421 else
8422 {
8423 Rn = inst.operands[2].reg;
8424 /* See if we can do this with a 16-bit instruction. */
8425 if (!inst.operands[2].shifted && inst.size_req != 4)
8426 {
8427 if (Rd > 7 || Rs > 7 || Rn > 7)
8428 narrow = FALSE;
8429
8430 if (narrow)
8431 {
8432 inst.instruction = ((inst.instruction == T_MNEM_adds
8433 || inst.instruction == T_MNEM_add)
8434 ? T_OPCODE_ADD_R3
8435 : T_OPCODE_SUB_R3);
8436 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8437 return;
8438 }
8439
8440 if (inst.instruction == T_MNEM_add)
8441 {
8442 if (Rd == Rs)
8443 {
8444 inst.instruction = T_OPCODE_ADD_HI;
8445 inst.instruction |= (Rd & 8) << 4;
8446 inst.instruction |= (Rd & 7);
8447 inst.instruction |= Rn << 3;
8448 return;
8449 }
8450 /* ... because addition is commutative! */
8451 else if (Rd == Rn)
8452 {
8453 inst.instruction = T_OPCODE_ADD_HI;
8454 inst.instruction |= (Rd & 8) << 4;
8455 inst.instruction |= (Rd & 7);
8456 inst.instruction |= Rs << 3;
8457 return;
8458 }
8459 }
8460 }
8461 /* If we get here, it can't be done in 16 bits. */
8462 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8463 _("shift must be constant"));
8464 inst.instruction = THUMB_OP32 (inst.instruction);
8465 inst.instruction |= Rd << 8;
8466 inst.instruction |= Rs << 16;
8467 encode_thumb32_shifted_operand (2);
8468 }
8469 }
8470 else
8471 {
8472 constraint (inst.instruction == T_MNEM_adds
8473 || inst.instruction == T_MNEM_subs,
8474 BAD_THUMB32);
8475
8476 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8477 {
8478 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8479 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8480 BAD_HIREG);
8481
8482 inst.instruction = (inst.instruction == T_MNEM_add
8483 ? 0x0000 : 0x8000);
8484 inst.instruction |= (Rd << 4) | Rs;
8485 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8486 return;
8487 }
8488
8489 Rn = inst.operands[2].reg;
8490 constraint (inst.operands[2].shifted, _("unshifted register required"));
8491
8492 /* We now have Rd, Rs, and Rn set to registers. */
8493 if (Rd > 7 || Rs > 7 || Rn > 7)
8494 {
8495 /* Can't do this for SUB. */
8496 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8497 inst.instruction = T_OPCODE_ADD_HI;
8498 inst.instruction |= (Rd & 8) << 4;
8499 inst.instruction |= (Rd & 7);
8500 if (Rs == Rd)
8501 inst.instruction |= Rn << 3;
8502 else if (Rn == Rd)
8503 inst.instruction |= Rs << 3;
8504 else
8505 constraint (1, _("dest must overlap one source register"));
8506 }
8507 else
8508 {
8509 inst.instruction = (inst.instruction == T_MNEM_add
8510 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8511 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8512 }
8513 }
8514 }
8515
8516 static void
8517 do_t_adr (void)
8518 {
8519 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8520 {
8521 /* Defer to section relaxation. */
8522 inst.relax = inst.instruction;
8523 inst.instruction = THUMB_OP16 (inst.instruction);
8524 inst.instruction |= inst.operands[0].reg << 4;
8525 }
8526 else if (unified_syntax && inst.size_req != 2)
8527 {
8528 /* Generate a 32-bit opcode. */
8529 inst.instruction = THUMB_OP32 (inst.instruction);
8530 inst.instruction |= inst.operands[0].reg << 8;
8531 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8532 inst.reloc.pc_rel = 1;
8533 }
8534 else
8535 {
8536 /* Generate a 16-bit opcode. */
8537 inst.instruction = THUMB_OP16 (inst.instruction);
8538 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8539 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8540 inst.reloc.pc_rel = 1;
8541
8542 inst.instruction |= inst.operands[0].reg << 4;
8543 }
8544 }
8545
8546 /* Arithmetic instructions for which there is just one 16-bit
8547 instruction encoding, and it allows only two low registers.
8548 For maximal compatibility with ARM syntax, we allow three register
8549 operands even when Thumb-32 instructions are not available, as long
8550 as the first two are identical. For instance, both "sbc r0,r1" and
8551 "sbc r0,r0,r1" are allowed. */
8552 static void
8553 do_t_arit3 (void)
8554 {
8555 int Rd, Rs, Rn;
8556
8557 Rd = inst.operands[0].reg;
8558 Rs = (inst.operands[1].present
8559 ? inst.operands[1].reg /* Rd, Rs, foo */
8560 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8561 Rn = inst.operands[2].reg;
8562
8563 if (unified_syntax)
8564 {
8565 if (!inst.operands[2].isreg)
8566 {
8567 /* For an immediate, we always generate a 32-bit opcode;
8568 section relaxation will shrink it later if possible. */
8569 inst.instruction = THUMB_OP32 (inst.instruction);
8570 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8571 inst.instruction |= Rd << 8;
8572 inst.instruction |= Rs << 16;
8573 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8574 }
8575 else
8576 {
8577 bfd_boolean narrow;
8578
8579 /* See if we can do this with a 16-bit instruction. */
8580 if (THUMB_SETS_FLAGS (inst.instruction))
8581 narrow = current_it_mask == 0;
8582 else
8583 narrow = current_it_mask != 0;
8584
8585 if (Rd > 7 || Rn > 7 || Rs > 7)
8586 narrow = FALSE;
8587 if (inst.operands[2].shifted)
8588 narrow = FALSE;
8589 if (inst.size_req == 4)
8590 narrow = FALSE;
8591
8592 if (narrow
8593 && Rd == Rs)
8594 {
8595 inst.instruction = THUMB_OP16 (inst.instruction);
8596 inst.instruction |= Rd;
8597 inst.instruction |= Rn << 3;
8598 return;
8599 }
8600
8601 /* If we get here, it can't be done in 16 bits. */
8602 constraint (inst.operands[2].shifted
8603 && inst.operands[2].immisreg,
8604 _("shift must be constant"));
8605 inst.instruction = THUMB_OP32 (inst.instruction);
8606 inst.instruction |= Rd << 8;
8607 inst.instruction |= Rs << 16;
8608 encode_thumb32_shifted_operand (2);
8609 }
8610 }
8611 else
8612 {
8613 /* On its face this is a lie - the instruction does set the
8614 flags. However, the only supported mnemonic in this mode
8615 says it doesn't. */
8616 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8617
8618 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8619 _("unshifted register required"));
8620 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8621 constraint (Rd != Rs,
8622 _("dest and source1 must be the same register"));
8623
8624 inst.instruction = THUMB_OP16 (inst.instruction);
8625 inst.instruction |= Rd;
8626 inst.instruction |= Rn << 3;
8627 }
8628 }
8629
8630 /* Similarly, but for instructions where the arithmetic operation is
8631 commutative, so we can allow either of them to be different from
8632 the destination operand in a 16-bit instruction. For instance, all
8633 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8634 accepted. */
8635 static void
8636 do_t_arit3c (void)
8637 {
8638 int Rd, Rs, Rn;
8639
8640 Rd = inst.operands[0].reg;
8641 Rs = (inst.operands[1].present
8642 ? inst.operands[1].reg /* Rd, Rs, foo */
8643 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8644 Rn = inst.operands[2].reg;
8645
8646 if (unified_syntax)
8647 {
8648 if (!inst.operands[2].isreg)
8649 {
8650 /* For an immediate, we always generate a 32-bit opcode;
8651 section relaxation will shrink it later if possible. */
8652 inst.instruction = THUMB_OP32 (inst.instruction);
8653 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8654 inst.instruction |= Rd << 8;
8655 inst.instruction |= Rs << 16;
8656 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8657 }
8658 else
8659 {
8660 bfd_boolean narrow;
8661
8662 /* See if we can do this with a 16-bit instruction. */
8663 if (THUMB_SETS_FLAGS (inst.instruction))
8664 narrow = current_it_mask == 0;
8665 else
8666 narrow = current_it_mask != 0;
8667
8668 if (Rd > 7 || Rn > 7 || Rs > 7)
8669 narrow = FALSE;
8670 if (inst.operands[2].shifted)
8671 narrow = FALSE;
8672 if (inst.size_req == 4)
8673 narrow = FALSE;
8674
8675 if (narrow)
8676 {
8677 if (Rd == Rs)
8678 {
8679 inst.instruction = THUMB_OP16 (inst.instruction);
8680 inst.instruction |= Rd;
8681 inst.instruction |= Rn << 3;
8682 return;
8683 }
8684 if (Rd == Rn)
8685 {
8686 inst.instruction = THUMB_OP16 (inst.instruction);
8687 inst.instruction |= Rd;
8688 inst.instruction |= Rs << 3;
8689 return;
8690 }
8691 }
8692
8693 /* If we get here, it can't be done in 16 bits. */
8694 constraint (inst.operands[2].shifted
8695 && inst.operands[2].immisreg,
8696 _("shift must be constant"));
8697 inst.instruction = THUMB_OP32 (inst.instruction);
8698 inst.instruction |= Rd << 8;
8699 inst.instruction |= Rs << 16;
8700 encode_thumb32_shifted_operand (2);
8701 }
8702 }
8703 else
8704 {
8705 /* On its face this is a lie - the instruction does set the
8706 flags. However, the only supported mnemonic in this mode
8707 says it doesn't. */
8708 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8709
8710 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8711 _("unshifted register required"));
8712 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8713
8714 inst.instruction = THUMB_OP16 (inst.instruction);
8715 inst.instruction |= Rd;
8716
8717 if (Rd == Rs)
8718 inst.instruction |= Rn << 3;
8719 else if (Rd == Rn)
8720 inst.instruction |= Rs << 3;
8721 else
8722 constraint (1, _("dest must overlap one source register"));
8723 }
8724 }
8725
8726 static void
8727 do_t_barrier (void)
8728 {
8729 if (inst.operands[0].present)
8730 {
8731 constraint ((inst.instruction & 0xf0) != 0x40
8732 && inst.operands[0].imm != 0xf,
8733 "bad barrier type");
8734 inst.instruction |= inst.operands[0].imm;
8735 }
8736 else
8737 inst.instruction |= 0xf;
8738 }
8739
8740 static void
8741 do_t_bfc (void)
8742 {
8743 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8744 constraint (msb > 32, _("bit-field extends past end of register"));
8745 /* The instruction encoding stores the LSB and MSB,
8746 not the LSB and width. */
8747 inst.instruction |= inst.operands[0].reg << 8;
8748 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8749 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8750 inst.instruction |= msb - 1;
8751 }
8752
8753 static void
8754 do_t_bfi (void)
8755 {
8756 unsigned int msb;
8757
8758 /* #0 in second position is alternative syntax for bfc, which is
8759 the same instruction but with REG_PC in the Rm field. */
8760 if (!inst.operands[1].isreg)
8761 inst.operands[1].reg = REG_PC;
8762
8763 msb = inst.operands[2].imm + inst.operands[3].imm;
8764 constraint (msb > 32, _("bit-field extends past end of register"));
8765 /* The instruction encoding stores the LSB and MSB,
8766 not the LSB and width. */
8767 inst.instruction |= inst.operands[0].reg << 8;
8768 inst.instruction |= inst.operands[1].reg << 16;
8769 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8770 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8771 inst.instruction |= msb - 1;
8772 }
8773
8774 static void
8775 do_t_bfx (void)
8776 {
8777 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8778 _("bit-field extends past end of register"));
8779 inst.instruction |= inst.operands[0].reg << 8;
8780 inst.instruction |= inst.operands[1].reg << 16;
8781 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8782 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8783 inst.instruction |= inst.operands[3].imm - 1;
8784 }
8785
8786 /* ARM V5 Thumb BLX (argument parse)
8787 BLX <target_addr> which is BLX(1)
8788 BLX <Rm> which is BLX(2)
8789 Unfortunately, there are two different opcodes for this mnemonic.
8790 So, the insns[].value is not used, and the code here zaps values
8791 into inst.instruction.
8792
8793 ??? How to take advantage of the additional two bits of displacement
8794 available in Thumb32 mode? Need new relocation? */
8795
8796 static void
8797 do_t_blx (void)
8798 {
8799 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8800 if (inst.operands[0].isreg)
8801 /* We have a register, so this is BLX(2). */
8802 inst.instruction |= inst.operands[0].reg << 3;
8803 else
8804 {
8805 /* No register. This must be BLX(1). */
8806 inst.instruction = 0xf000e800;
8807 #ifdef OBJ_ELF
8808 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8809 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8810 else
8811 #endif
8812 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8813 inst.reloc.pc_rel = 1;
8814 }
8815 }
8816
8817 static void
8818 do_t_branch (void)
8819 {
8820 int opcode;
8821 int cond;
8822
8823 if (current_it_mask)
8824 {
8825 /* Conditional branches inside IT blocks are encoded as unconditional
8826 branches. */
8827 cond = COND_ALWAYS;
8828 /* A branch must be the last instruction in an IT block. */
8829 constraint (current_it_mask != 0x10, BAD_BRANCH);
8830 }
8831 else
8832 cond = inst.cond;
8833
8834 if (cond != COND_ALWAYS)
8835 opcode = T_MNEM_bcond;
8836 else
8837 opcode = inst.instruction;
8838
8839 if (unified_syntax && inst.size_req == 4)
8840 {
8841 inst.instruction = THUMB_OP32(opcode);
8842 if (cond == COND_ALWAYS)
8843 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8844 else
8845 {
8846 assert (cond != 0xF);
8847 inst.instruction |= cond << 22;
8848 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8849 }
8850 }
8851 else
8852 {
8853 inst.instruction = THUMB_OP16(opcode);
8854 if (cond == COND_ALWAYS)
8855 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8856 else
8857 {
8858 inst.instruction |= cond << 8;
8859 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8860 }
8861 /* Allow section relaxation. */
8862 if (unified_syntax && inst.size_req != 2)
8863 inst.relax = opcode;
8864 }
8865
8866 inst.reloc.pc_rel = 1;
8867 }
8868
8869 static void
8870 do_t_bkpt (void)
8871 {
8872 constraint (inst.cond != COND_ALWAYS,
8873 _("instruction is always unconditional"));
8874 if (inst.operands[0].present)
8875 {
8876 constraint (inst.operands[0].imm > 255,
8877 _("immediate value out of range"));
8878 inst.instruction |= inst.operands[0].imm;
8879 }
8880 }
8881
8882 static void
8883 do_t_branch23 (void)
8884 {
8885 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8886 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8887 inst.reloc.pc_rel = 1;
8888
8889 /* If the destination of the branch is a defined symbol which does not have
8890 the THUMB_FUNC attribute, then we must be calling a function which has
8891 the (interfacearm) attribute. We look for the Thumb entry point to that
8892 function and change the branch to refer to that function instead. */
8893 if ( inst.reloc.exp.X_op == O_symbol
8894 && inst.reloc.exp.X_add_symbol != NULL
8895 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8896 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8897 inst.reloc.exp.X_add_symbol =
8898 find_real_start (inst.reloc.exp.X_add_symbol);
8899 }
8900
8901 static void
8902 do_t_bx (void)
8903 {
8904 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8905 inst.instruction |= inst.operands[0].reg << 3;
8906 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8907 should cause the alignment to be checked once it is known. This is
8908 because BX PC only works if the instruction is word aligned. */
8909 }
8910
8911 static void
8912 do_t_bxj (void)
8913 {
8914 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8915 if (inst.operands[0].reg == REG_PC)
8916 as_tsktsk (_("use of r15 in bxj is not really useful"));
8917
8918 inst.instruction |= inst.operands[0].reg << 16;
8919 }
8920
8921 static void
8922 do_t_clz (void)
8923 {
8924 inst.instruction |= inst.operands[0].reg << 8;
8925 inst.instruction |= inst.operands[1].reg << 16;
8926 inst.instruction |= inst.operands[1].reg;
8927 }
8928
8929 static void
8930 do_t_cps (void)
8931 {
8932 constraint (current_it_mask, BAD_NOT_IT);
8933 inst.instruction |= inst.operands[0].imm;
8934 }
8935
8936 static void
8937 do_t_cpsi (void)
8938 {
8939 constraint (current_it_mask, BAD_NOT_IT);
8940 if (unified_syntax
8941 && (inst.operands[1].present || inst.size_req == 4)
8942 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8943 {
8944 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8945 inst.instruction = 0xf3af8000;
8946 inst.instruction |= imod << 9;
8947 inst.instruction |= inst.operands[0].imm << 5;
8948 if (inst.operands[1].present)
8949 inst.instruction |= 0x100 | inst.operands[1].imm;
8950 }
8951 else
8952 {
8953 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8954 && (inst.operands[0].imm & 4),
8955 _("selected processor does not support 'A' form "
8956 "of this instruction"));
8957 constraint (inst.operands[1].present || inst.size_req == 4,
8958 _("Thumb does not support the 2-argument "
8959 "form of this instruction"));
8960 inst.instruction |= inst.operands[0].imm;
8961 }
8962 }
8963
8964 /* THUMB CPY instruction (argument parse). */
8965
8966 static void
8967 do_t_cpy (void)
8968 {
8969 if (inst.size_req == 4)
8970 {
8971 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8972 inst.instruction |= inst.operands[0].reg << 8;
8973 inst.instruction |= inst.operands[1].reg;
8974 }
8975 else
8976 {
8977 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8978 inst.instruction |= (inst.operands[0].reg & 0x7);
8979 inst.instruction |= inst.operands[1].reg << 3;
8980 }
8981 }
8982
8983 static void
8984 do_t_cbz (void)
8985 {
8986 constraint (current_it_mask, BAD_NOT_IT);
8987 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8988 inst.instruction |= inst.operands[0].reg;
8989 inst.reloc.pc_rel = 1;
8990 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8991 }
8992
8993 static void
8994 do_t_dbg (void)
8995 {
8996 inst.instruction |= inst.operands[0].imm;
8997 }
8998
8999 static void
9000 do_t_div (void)
9001 {
9002 if (!inst.operands[1].present)
9003 inst.operands[1].reg = inst.operands[0].reg;
9004 inst.instruction |= inst.operands[0].reg << 8;
9005 inst.instruction |= inst.operands[1].reg << 16;
9006 inst.instruction |= inst.operands[2].reg;
9007 }
9008
9009 static void
9010 do_t_hint (void)
9011 {
9012 if (unified_syntax && inst.size_req == 4)
9013 inst.instruction = THUMB_OP32 (inst.instruction);
9014 else
9015 inst.instruction = THUMB_OP16 (inst.instruction);
9016 }
9017
9018 static void
9019 do_t_it (void)
9020 {
9021 unsigned int cond = inst.operands[0].imm;
9022
9023 constraint (current_it_mask, BAD_NOT_IT);
9024 current_it_mask = (inst.instruction & 0xf) | 0x10;
9025 current_cc = cond;
9026
9027 /* If the condition is a negative condition, invert the mask. */
9028 if ((cond & 0x1) == 0x0)
9029 {
9030 unsigned int mask = inst.instruction & 0x000f;
9031
9032 if ((mask & 0x7) == 0)
9033 /* no conversion needed */;
9034 else if ((mask & 0x3) == 0)
9035 mask ^= 0x8;
9036 else if ((mask & 0x1) == 0)
9037 mask ^= 0xC;
9038 else
9039 mask ^= 0xE;
9040
9041 inst.instruction &= 0xfff0;
9042 inst.instruction |= mask;
9043 }
9044
9045 inst.instruction |= cond << 4;
9046 }
9047
9048 static void
9049 do_t_ldmstm (void)
9050 {
9051 /* This really doesn't seem worth it. */
9052 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9053 _("expression too complex"));
9054 constraint (inst.operands[1].writeback,
9055 _("Thumb load/store multiple does not support {reglist}^"));
9056
9057 if (unified_syntax)
9058 {
9059 /* See if we can use a 16-bit instruction. */
9060 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9061 && inst.size_req != 4
9062 && inst.operands[0].reg <= 7
9063 && !(inst.operands[1].imm & ~0xff)
9064 && (inst.instruction == T_MNEM_stmia
9065 ? inst.operands[0].writeback
9066 : (inst.operands[0].writeback
9067 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
9068 {
9069 if (inst.instruction == T_MNEM_stmia
9070 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
9071 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9072 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9073 inst.operands[0].reg);
9074
9075 inst.instruction = THUMB_OP16 (inst.instruction);
9076 inst.instruction |= inst.operands[0].reg << 8;
9077 inst.instruction |= inst.operands[1].imm;
9078 }
9079 else
9080 {
9081 if (inst.operands[1].imm & (1 << 13))
9082 as_warn (_("SP should not be in register list"));
9083 if (inst.instruction == T_MNEM_stmia)
9084 {
9085 if (inst.operands[1].imm & (1 << 15))
9086 as_warn (_("PC should not be in register list"));
9087 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
9088 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9089 inst.operands[0].reg);
9090 }
9091 else
9092 {
9093 if (inst.operands[1].imm & (1 << 14)
9094 && inst.operands[1].imm & (1 << 15))
9095 as_warn (_("LR and PC should not both be in register list"));
9096 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9097 && inst.operands[0].writeback)
9098 as_warn (_("base register should not be in register list "
9099 "when written back"));
9100 }
9101 if (inst.instruction < 0xffff)
9102 inst.instruction = THUMB_OP32 (inst.instruction);
9103 inst.instruction |= inst.operands[0].reg << 16;
9104 inst.instruction |= inst.operands[1].imm;
9105 if (inst.operands[0].writeback)
9106 inst.instruction |= WRITE_BACK;
9107 }
9108 }
9109 else
9110 {
9111 constraint (inst.operands[0].reg > 7
9112 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9113 if (inst.instruction == T_MNEM_stmia)
9114 {
9115 if (!inst.operands[0].writeback)
9116 as_warn (_("this instruction will write back the base register"));
9117 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9118 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9119 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9120 inst.operands[0].reg);
9121 }
9122 else
9123 {
9124 if (!inst.operands[0].writeback
9125 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9126 as_warn (_("this instruction will write back the base register"));
9127 else if (inst.operands[0].writeback
9128 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9129 as_warn (_("this instruction will not write back the base register"));
9130 }
9131
9132 inst.instruction = THUMB_OP16 (inst.instruction);
9133 inst.instruction |= inst.operands[0].reg << 8;
9134 inst.instruction |= inst.operands[1].imm;
9135 }
9136 }
9137
9138 static void
9139 do_t_ldrex (void)
9140 {
9141 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9142 || inst.operands[1].postind || inst.operands[1].writeback
9143 || inst.operands[1].immisreg || inst.operands[1].shifted
9144 || inst.operands[1].negative,
9145 BAD_ADDR_MODE);
9146
9147 inst.instruction |= inst.operands[0].reg << 12;
9148 inst.instruction |= inst.operands[1].reg << 16;
9149 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9150 }
9151
9152 static void
9153 do_t_ldrexd (void)
9154 {
9155 if (!inst.operands[1].present)
9156 {
9157 constraint (inst.operands[0].reg == REG_LR,
9158 _("r14 not allowed as first register "
9159 "when second register is omitted"));
9160 inst.operands[1].reg = inst.operands[0].reg + 1;
9161 }
9162 constraint (inst.operands[0].reg == inst.operands[1].reg,
9163 BAD_OVERLAP);
9164
9165 inst.instruction |= inst.operands[0].reg << 12;
9166 inst.instruction |= inst.operands[1].reg << 8;
9167 inst.instruction |= inst.operands[2].reg << 16;
9168 }
9169
9170 static void
9171 do_t_ldst (void)
9172 {
9173 unsigned long opcode;
9174 int Rn;
9175
9176 opcode = inst.instruction;
9177 if (unified_syntax)
9178 {
9179 if (!inst.operands[1].isreg)
9180 {
9181 if (opcode <= 0xffff)
9182 inst.instruction = THUMB_OP32 (opcode);
9183 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9184 return;
9185 }
9186 if (inst.operands[1].isreg
9187 && !inst.operands[1].writeback
9188 && !inst.operands[1].shifted && !inst.operands[1].postind
9189 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9190 && opcode <= 0xffff
9191 && inst.size_req != 4)
9192 {
9193 /* Insn may have a 16-bit form. */
9194 Rn = inst.operands[1].reg;
9195 if (inst.operands[1].immisreg)
9196 {
9197 inst.instruction = THUMB_OP16 (opcode);
9198 /* [Rn, Ri] */
9199 if (Rn <= 7 && inst.operands[1].imm <= 7)
9200 goto op16;
9201 }
9202 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9203 && opcode != T_MNEM_ldrsb)
9204 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9205 || (Rn == REG_SP && opcode == T_MNEM_str))
9206 {
9207 /* [Rn, #const] */
9208 if (Rn > 7)
9209 {
9210 if (Rn == REG_PC)
9211 {
9212 if (inst.reloc.pc_rel)
9213 opcode = T_MNEM_ldr_pc2;
9214 else
9215 opcode = T_MNEM_ldr_pc;
9216 }
9217 else
9218 {
9219 if (opcode == T_MNEM_ldr)
9220 opcode = T_MNEM_ldr_sp;
9221 else
9222 opcode = T_MNEM_str_sp;
9223 }
9224 inst.instruction = inst.operands[0].reg << 8;
9225 }
9226 else
9227 {
9228 inst.instruction = inst.operands[0].reg;
9229 inst.instruction |= inst.operands[1].reg << 3;
9230 }
9231 inst.instruction |= THUMB_OP16 (opcode);
9232 if (inst.size_req == 2)
9233 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9234 else
9235 inst.relax = opcode;
9236 return;
9237 }
9238 }
9239 /* Definitely a 32-bit variant. */
9240 inst.instruction = THUMB_OP32 (opcode);
9241 inst.instruction |= inst.operands[0].reg << 12;
9242 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9243 return;
9244 }
9245
9246 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9247
9248 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9249 {
9250 /* Only [Rn,Rm] is acceptable. */
9251 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9252 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9253 || inst.operands[1].postind || inst.operands[1].shifted
9254 || inst.operands[1].negative,
9255 _("Thumb does not support this addressing mode"));
9256 inst.instruction = THUMB_OP16 (inst.instruction);
9257 goto op16;
9258 }
9259
9260 inst.instruction = THUMB_OP16 (inst.instruction);
9261 if (!inst.operands[1].isreg)
9262 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9263 return;
9264
9265 constraint (!inst.operands[1].preind
9266 || inst.operands[1].shifted
9267 || inst.operands[1].writeback,
9268 _("Thumb does not support this addressing mode"));
9269 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9270 {
9271 constraint (inst.instruction & 0x0600,
9272 _("byte or halfword not valid for base register"));
9273 constraint (inst.operands[1].reg == REG_PC
9274 && !(inst.instruction & THUMB_LOAD_BIT),
9275 _("r15 based store not allowed"));
9276 constraint (inst.operands[1].immisreg,
9277 _("invalid base register for register offset"));
9278
9279 if (inst.operands[1].reg == REG_PC)
9280 inst.instruction = T_OPCODE_LDR_PC;
9281 else if (inst.instruction & THUMB_LOAD_BIT)
9282 inst.instruction = T_OPCODE_LDR_SP;
9283 else
9284 inst.instruction = T_OPCODE_STR_SP;
9285
9286 inst.instruction |= inst.operands[0].reg << 8;
9287 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9288 return;
9289 }
9290
9291 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9292 if (!inst.operands[1].immisreg)
9293 {
9294 /* Immediate offset. */
9295 inst.instruction |= inst.operands[0].reg;
9296 inst.instruction |= inst.operands[1].reg << 3;
9297 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9298 return;
9299 }
9300
9301 /* Register offset. */
9302 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9303 constraint (inst.operands[1].negative,
9304 _("Thumb does not support this addressing mode"));
9305
9306 op16:
9307 switch (inst.instruction)
9308 {
9309 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9310 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9311 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9312 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9313 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9314 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9315 case 0x5600 /* ldrsb */:
9316 case 0x5e00 /* ldrsh */: break;
9317 default: abort ();
9318 }
9319
9320 inst.instruction |= inst.operands[0].reg;
9321 inst.instruction |= inst.operands[1].reg << 3;
9322 inst.instruction |= inst.operands[1].imm << 6;
9323 }
9324
9325 static void
9326 do_t_ldstd (void)
9327 {
9328 if (!inst.operands[1].present)
9329 {
9330 inst.operands[1].reg = inst.operands[0].reg + 1;
9331 constraint (inst.operands[0].reg == REG_LR,
9332 _("r14 not allowed here"));
9333 }
9334 inst.instruction |= inst.operands[0].reg << 12;
9335 inst.instruction |= inst.operands[1].reg << 8;
9336 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9337
9338 }
9339
9340 static void
9341 do_t_ldstt (void)
9342 {
9343 inst.instruction |= inst.operands[0].reg << 12;
9344 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9345 }
9346
9347 static void
9348 do_t_mla (void)
9349 {
9350 inst.instruction |= inst.operands[0].reg << 8;
9351 inst.instruction |= inst.operands[1].reg << 16;
9352 inst.instruction |= inst.operands[2].reg;
9353 inst.instruction |= inst.operands[3].reg << 12;
9354 }
9355
9356 static void
9357 do_t_mlal (void)
9358 {
9359 inst.instruction |= inst.operands[0].reg << 12;
9360 inst.instruction |= inst.operands[1].reg << 8;
9361 inst.instruction |= inst.operands[2].reg << 16;
9362 inst.instruction |= inst.operands[3].reg;
9363 }
9364
9365 static void
9366 do_t_mov_cmp (void)
9367 {
9368 if (unified_syntax)
9369 {
9370 int r0off = (inst.instruction == T_MNEM_mov
9371 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9372 unsigned long opcode;
9373 bfd_boolean narrow;
9374 bfd_boolean low_regs;
9375
9376 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9377 opcode = inst.instruction;
9378 if (current_it_mask)
9379 narrow = opcode != T_MNEM_movs;
9380 else
9381 narrow = opcode != T_MNEM_movs || low_regs;
9382 if (inst.size_req == 4
9383 || inst.operands[1].shifted)
9384 narrow = FALSE;
9385
9386 if (!inst.operands[1].isreg)
9387 {
9388 /* Immediate operand. */
9389 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9390 narrow = 0;
9391 if (low_regs && narrow)
9392 {
9393 inst.instruction = THUMB_OP16 (opcode);
9394 inst.instruction |= inst.operands[0].reg << 8;
9395 if (inst.size_req == 2)
9396 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9397 else
9398 inst.relax = opcode;
9399 }
9400 else
9401 {
9402 inst.instruction = THUMB_OP32 (inst.instruction);
9403 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9404 inst.instruction |= inst.operands[0].reg << r0off;
9405 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9406 }
9407 }
9408 else if (!narrow)
9409 {
9410 inst.instruction = THUMB_OP32 (inst.instruction);
9411 inst.instruction |= inst.operands[0].reg << r0off;
9412 encode_thumb32_shifted_operand (1);
9413 }
9414 else
9415 switch (inst.instruction)
9416 {
9417 case T_MNEM_mov:
9418 inst.instruction = T_OPCODE_MOV_HR;
9419 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9420 inst.instruction |= (inst.operands[0].reg & 0x7);
9421 inst.instruction |= inst.operands[1].reg << 3;
9422 break;
9423
9424 case T_MNEM_movs:
9425 /* We know we have low registers at this point.
9426 Generate ADD Rd, Rs, #0. */
9427 inst.instruction = T_OPCODE_ADD_I3;
9428 inst.instruction |= inst.operands[0].reg;
9429 inst.instruction |= inst.operands[1].reg << 3;
9430 break;
9431
9432 case T_MNEM_cmp:
9433 if (low_regs)
9434 {
9435 inst.instruction = T_OPCODE_CMP_LR;
9436 inst.instruction |= inst.operands[0].reg;
9437 inst.instruction |= inst.operands[1].reg << 3;
9438 }
9439 else
9440 {
9441 inst.instruction = T_OPCODE_CMP_HR;
9442 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9443 inst.instruction |= (inst.operands[0].reg & 0x7);
9444 inst.instruction |= inst.operands[1].reg << 3;
9445 }
9446 break;
9447 }
9448 return;
9449 }
9450
9451 inst.instruction = THUMB_OP16 (inst.instruction);
9452 if (inst.operands[1].isreg)
9453 {
9454 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9455 {
9456 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9457 since a MOV instruction produces unpredictable results. */
9458 if (inst.instruction == T_OPCODE_MOV_I8)
9459 inst.instruction = T_OPCODE_ADD_I3;
9460 else
9461 inst.instruction = T_OPCODE_CMP_LR;
9462
9463 inst.instruction |= inst.operands[0].reg;
9464 inst.instruction |= inst.operands[1].reg << 3;
9465 }
9466 else
9467 {
9468 if (inst.instruction == T_OPCODE_MOV_I8)
9469 inst.instruction = T_OPCODE_MOV_HR;
9470 else
9471 inst.instruction = T_OPCODE_CMP_HR;
9472 do_t_cpy ();
9473 }
9474 }
9475 else
9476 {
9477 constraint (inst.operands[0].reg > 7,
9478 _("only lo regs allowed with immediate"));
9479 inst.instruction |= inst.operands[0].reg << 8;
9480 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9481 }
9482 }
9483
9484 static void
9485 do_t_mov16 (void)
9486 {
9487 bfd_vma imm;
9488 bfd_boolean top;
9489
9490 top = (inst.instruction & 0x00800000) != 0;
9491 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9492 {
9493 constraint (top, _(":lower16: not allowed this instruction"));
9494 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9495 }
9496 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9497 {
9498 constraint (!top, _(":upper16: not allowed this instruction"));
9499 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9500 }
9501
9502 inst.instruction |= inst.operands[0].reg << 8;
9503 if (inst.reloc.type == BFD_RELOC_UNUSED)
9504 {
9505 imm = inst.reloc.exp.X_add_number;
9506 inst.instruction |= (imm & 0xf000) << 4;
9507 inst.instruction |= (imm & 0x0800) << 15;
9508 inst.instruction |= (imm & 0x0700) << 4;
9509 inst.instruction |= (imm & 0x00ff);
9510 }
9511 }
9512
9513 static void
9514 do_t_mvn_tst (void)
9515 {
9516 if (unified_syntax)
9517 {
9518 int r0off = (inst.instruction == T_MNEM_mvn
9519 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9520 bfd_boolean narrow;
9521
9522 if (inst.size_req == 4
9523 || inst.instruction > 0xffff
9524 || inst.operands[1].shifted
9525 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9526 narrow = FALSE;
9527 else if (inst.instruction == T_MNEM_cmn)
9528 narrow = TRUE;
9529 else if (THUMB_SETS_FLAGS (inst.instruction))
9530 narrow = (current_it_mask == 0);
9531 else
9532 narrow = (current_it_mask != 0);
9533
9534 if (!inst.operands[1].isreg)
9535 {
9536 /* For an immediate, we always generate a 32-bit opcode;
9537 section relaxation will shrink it later if possible. */
9538 if (inst.instruction < 0xffff)
9539 inst.instruction = THUMB_OP32 (inst.instruction);
9540 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9541 inst.instruction |= inst.operands[0].reg << r0off;
9542 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9543 }
9544 else
9545 {
9546 /* See if we can do this with a 16-bit instruction. */
9547 if (narrow)
9548 {
9549 inst.instruction = THUMB_OP16 (inst.instruction);
9550 inst.instruction |= inst.operands[0].reg;
9551 inst.instruction |= inst.operands[1].reg << 3;
9552 }
9553 else
9554 {
9555 constraint (inst.operands[1].shifted
9556 && inst.operands[1].immisreg,
9557 _("shift must be constant"));
9558 if (inst.instruction < 0xffff)
9559 inst.instruction = THUMB_OP32 (inst.instruction);
9560 inst.instruction |= inst.operands[0].reg << r0off;
9561 encode_thumb32_shifted_operand (1);
9562 }
9563 }
9564 }
9565 else
9566 {
9567 constraint (inst.instruction > 0xffff
9568 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9569 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9570 _("unshifted register required"));
9571 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9572 BAD_HIREG);
9573
9574 inst.instruction = THUMB_OP16 (inst.instruction);
9575 inst.instruction |= inst.operands[0].reg;
9576 inst.instruction |= inst.operands[1].reg << 3;
9577 }
9578 }
9579
9580 static void
9581 do_t_mrs (void)
9582 {
9583 int flags;
9584
9585 if (do_vfp_nsyn_mrs () == SUCCESS)
9586 return;
9587
9588 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9589 if (flags == 0)
9590 {
9591 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9592 _("selected processor does not support "
9593 "requested special purpose register"));
9594 }
9595 else
9596 {
9597 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9598 _("selected processor does not support "
9599 "requested special purpose register %x"));
9600 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9601 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9602 _("'CPSR' or 'SPSR' expected"));
9603 }
9604
9605 inst.instruction |= inst.operands[0].reg << 8;
9606 inst.instruction |= (flags & SPSR_BIT) >> 2;
9607 inst.instruction |= inst.operands[1].imm & 0xff;
9608 }
9609
9610 static void
9611 do_t_msr (void)
9612 {
9613 int flags;
9614
9615 if (do_vfp_nsyn_msr () == SUCCESS)
9616 return;
9617
9618 constraint (!inst.operands[1].isreg,
9619 _("Thumb encoding does not support an immediate here"));
9620 flags = inst.operands[0].imm;
9621 if (flags & ~0xff)
9622 {
9623 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9624 _("selected processor does not support "
9625 "requested special purpose register"));
9626 }
9627 else
9628 {
9629 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9630 _("selected processor does not support "
9631 "requested special purpose register"));
9632 flags |= PSR_f;
9633 }
9634 inst.instruction |= (flags & SPSR_BIT) >> 2;
9635 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9636 inst.instruction |= (flags & 0xff);
9637 inst.instruction |= inst.operands[1].reg << 16;
9638 }
9639
9640 static void
9641 do_t_mul (void)
9642 {
9643 if (!inst.operands[2].present)
9644 inst.operands[2].reg = inst.operands[0].reg;
9645
9646 /* There is no 32-bit MULS and no 16-bit MUL. */
9647 if (unified_syntax && inst.instruction == T_MNEM_mul)
9648 {
9649 inst.instruction = THUMB_OP32 (inst.instruction);
9650 inst.instruction |= inst.operands[0].reg << 8;
9651 inst.instruction |= inst.operands[1].reg << 16;
9652 inst.instruction |= inst.operands[2].reg << 0;
9653 }
9654 else
9655 {
9656 constraint (!unified_syntax
9657 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9658 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9659 BAD_HIREG);
9660
9661 inst.instruction = THUMB_OP16 (inst.instruction);
9662 inst.instruction |= inst.operands[0].reg;
9663
9664 if (inst.operands[0].reg == inst.operands[1].reg)
9665 inst.instruction |= inst.operands[2].reg << 3;
9666 else if (inst.operands[0].reg == inst.operands[2].reg)
9667 inst.instruction |= inst.operands[1].reg << 3;
9668 else
9669 constraint (1, _("dest must overlap one source register"));
9670 }
9671 }
9672
9673 static void
9674 do_t_mull (void)
9675 {
9676 inst.instruction |= inst.operands[0].reg << 12;
9677 inst.instruction |= inst.operands[1].reg << 8;
9678 inst.instruction |= inst.operands[2].reg << 16;
9679 inst.instruction |= inst.operands[3].reg;
9680
9681 if (inst.operands[0].reg == inst.operands[1].reg)
9682 as_tsktsk (_("rdhi and rdlo must be different"));
9683 }
9684
9685 static void
9686 do_t_nop (void)
9687 {
9688 if (unified_syntax)
9689 {
9690 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9691 {
9692 inst.instruction = THUMB_OP32 (inst.instruction);
9693 inst.instruction |= inst.operands[0].imm;
9694 }
9695 else
9696 {
9697 inst.instruction = THUMB_OP16 (inst.instruction);
9698 inst.instruction |= inst.operands[0].imm << 4;
9699 }
9700 }
9701 else
9702 {
9703 constraint (inst.operands[0].present,
9704 _("Thumb does not support NOP with hints"));
9705 inst.instruction = 0x46c0;
9706 }
9707 }
9708
9709 static void
9710 do_t_neg (void)
9711 {
9712 if (unified_syntax)
9713 {
9714 bfd_boolean narrow;
9715
9716 if (THUMB_SETS_FLAGS (inst.instruction))
9717 narrow = (current_it_mask == 0);
9718 else
9719 narrow = (current_it_mask != 0);
9720 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9721 narrow = FALSE;
9722 if (inst.size_req == 4)
9723 narrow = FALSE;
9724
9725 if (!narrow)
9726 {
9727 inst.instruction = THUMB_OP32 (inst.instruction);
9728 inst.instruction |= inst.operands[0].reg << 8;
9729 inst.instruction |= inst.operands[1].reg << 16;
9730 }
9731 else
9732 {
9733 inst.instruction = THUMB_OP16 (inst.instruction);
9734 inst.instruction |= inst.operands[0].reg;
9735 inst.instruction |= inst.operands[1].reg << 3;
9736 }
9737 }
9738 else
9739 {
9740 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9741 BAD_HIREG);
9742 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9743
9744 inst.instruction = THUMB_OP16 (inst.instruction);
9745 inst.instruction |= inst.operands[0].reg;
9746 inst.instruction |= inst.operands[1].reg << 3;
9747 }
9748 }
9749
9750 static void
9751 do_t_pkhbt (void)
9752 {
9753 inst.instruction |= inst.operands[0].reg << 8;
9754 inst.instruction |= inst.operands[1].reg << 16;
9755 inst.instruction |= inst.operands[2].reg;
9756 if (inst.operands[3].present)
9757 {
9758 unsigned int val = inst.reloc.exp.X_add_number;
9759 constraint (inst.reloc.exp.X_op != O_constant,
9760 _("expression too complex"));
9761 inst.instruction |= (val & 0x1c) << 10;
9762 inst.instruction |= (val & 0x03) << 6;
9763 }
9764 }
9765
9766 static void
9767 do_t_pkhtb (void)
9768 {
9769 if (!inst.operands[3].present)
9770 inst.instruction &= ~0x00000020;
9771 do_t_pkhbt ();
9772 }
9773
9774 static void
9775 do_t_pld (void)
9776 {
9777 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9778 }
9779
9780 static void
9781 do_t_push_pop (void)
9782 {
9783 unsigned mask;
9784
9785 constraint (inst.operands[0].writeback,
9786 _("push/pop do not support {reglist}^"));
9787 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9788 _("expression too complex"));
9789
9790 mask = inst.operands[0].imm;
9791 if ((mask & ~0xff) == 0)
9792 inst.instruction = THUMB_OP16 (inst.instruction);
9793 else if ((inst.instruction == T_MNEM_push
9794 && (mask & ~0xff) == 1 << REG_LR)
9795 || (inst.instruction == T_MNEM_pop
9796 && (mask & ~0xff) == 1 << REG_PC))
9797 {
9798 inst.instruction = THUMB_OP16 (inst.instruction);
9799 inst.instruction |= THUMB_PP_PC_LR;
9800 mask &= 0xff;
9801 }
9802 else if (unified_syntax)
9803 {
9804 if (mask & (1 << 13))
9805 inst.error = _("SP not allowed in register list");
9806 if (inst.instruction == T_MNEM_push)
9807 {
9808 if (mask & (1 << 15))
9809 inst.error = _("PC not allowed in register list");
9810 }
9811 else
9812 {
9813 if (mask & (1 << 14)
9814 && mask & (1 << 15))
9815 inst.error = _("LR and PC should not both be in register list");
9816 }
9817 if ((mask & (mask - 1)) == 0)
9818 {
9819 /* Single register push/pop implemented as str/ldr. */
9820 if (inst.instruction == T_MNEM_push)
9821 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9822 else
9823 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9824 mask = ffs(mask) - 1;
9825 mask <<= 12;
9826 }
9827 else
9828 inst.instruction = THUMB_OP32 (inst.instruction);
9829 }
9830 else
9831 {
9832 inst.error = _("invalid register list to push/pop instruction");
9833 return;
9834 }
9835
9836 inst.instruction |= mask;
9837 }
9838
9839 static void
9840 do_t_rbit (void)
9841 {
9842 inst.instruction |= inst.operands[0].reg << 8;
9843 inst.instruction |= inst.operands[1].reg << 16;
9844 }
9845
9846 static void
9847 do_t_rev (void)
9848 {
9849 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9850 && inst.size_req != 4)
9851 {
9852 inst.instruction = THUMB_OP16 (inst.instruction);
9853 inst.instruction |= inst.operands[0].reg;
9854 inst.instruction |= inst.operands[1].reg << 3;
9855 }
9856 else if (unified_syntax)
9857 {
9858 inst.instruction = THUMB_OP32 (inst.instruction);
9859 inst.instruction |= inst.operands[0].reg << 8;
9860 inst.instruction |= inst.operands[1].reg << 16;
9861 inst.instruction |= inst.operands[1].reg;
9862 }
9863 else
9864 inst.error = BAD_HIREG;
9865 }
9866
9867 static void
9868 do_t_rsb (void)
9869 {
9870 int Rd, Rs;
9871
9872 Rd = inst.operands[0].reg;
9873 Rs = (inst.operands[1].present
9874 ? inst.operands[1].reg /* Rd, Rs, foo */
9875 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9876
9877 inst.instruction |= Rd << 8;
9878 inst.instruction |= Rs << 16;
9879 if (!inst.operands[2].isreg)
9880 {
9881 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9882 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9883 }
9884 else
9885 encode_thumb32_shifted_operand (2);
9886 }
9887
9888 static void
9889 do_t_setend (void)
9890 {
9891 constraint (current_it_mask, BAD_NOT_IT);
9892 if (inst.operands[0].imm)
9893 inst.instruction |= 0x8;
9894 }
9895
9896 static void
9897 do_t_shift (void)
9898 {
9899 if (!inst.operands[1].present)
9900 inst.operands[1].reg = inst.operands[0].reg;
9901
9902 if (unified_syntax)
9903 {
9904 bfd_boolean narrow;
9905 int shift_kind;
9906
9907 switch (inst.instruction)
9908 {
9909 case T_MNEM_asr:
9910 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9911 case T_MNEM_lsl:
9912 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9913 case T_MNEM_lsr:
9914 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9915 case T_MNEM_ror:
9916 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9917 default: abort ();
9918 }
9919
9920 if (THUMB_SETS_FLAGS (inst.instruction))
9921 narrow = (current_it_mask == 0);
9922 else
9923 narrow = (current_it_mask != 0);
9924 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9925 narrow = FALSE;
9926 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9927 narrow = FALSE;
9928 if (inst.operands[2].isreg
9929 && (inst.operands[1].reg != inst.operands[0].reg
9930 || inst.operands[2].reg > 7))
9931 narrow = FALSE;
9932 if (inst.size_req == 4)
9933 narrow = FALSE;
9934
9935 if (!narrow)
9936 {
9937 if (inst.operands[2].isreg)
9938 {
9939 inst.instruction = THUMB_OP32 (inst.instruction);
9940 inst.instruction |= inst.operands[0].reg << 8;
9941 inst.instruction |= inst.operands[1].reg << 16;
9942 inst.instruction |= inst.operands[2].reg;
9943 }
9944 else
9945 {
9946 inst.operands[1].shifted = 1;
9947 inst.operands[1].shift_kind = shift_kind;
9948 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9949 ? T_MNEM_movs : T_MNEM_mov);
9950 inst.instruction |= inst.operands[0].reg << 8;
9951 encode_thumb32_shifted_operand (1);
9952 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9953 inst.reloc.type = BFD_RELOC_UNUSED;
9954 }
9955 }
9956 else
9957 {
9958 if (inst.operands[2].isreg)
9959 {
9960 switch (shift_kind)
9961 {
9962 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9963 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9964 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9965 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9966 default: abort ();
9967 }
9968
9969 inst.instruction |= inst.operands[0].reg;
9970 inst.instruction |= inst.operands[2].reg << 3;
9971 }
9972 else
9973 {
9974 switch (shift_kind)
9975 {
9976 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9977 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9978 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9979 default: abort ();
9980 }
9981 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9982 inst.instruction |= inst.operands[0].reg;
9983 inst.instruction |= inst.operands[1].reg << 3;
9984 }
9985 }
9986 }
9987 else
9988 {
9989 constraint (inst.operands[0].reg > 7
9990 || inst.operands[1].reg > 7, BAD_HIREG);
9991 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9992
9993 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9994 {
9995 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9996 constraint (inst.operands[0].reg != inst.operands[1].reg,
9997 _("source1 and dest must be same register"));
9998
9999 switch (inst.instruction)
10000 {
10001 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10002 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10003 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10004 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10005 default: abort ();
10006 }
10007
10008 inst.instruction |= inst.operands[0].reg;
10009 inst.instruction |= inst.operands[2].reg << 3;
10010 }
10011 else
10012 {
10013 switch (inst.instruction)
10014 {
10015 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10016 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10017 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10018 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10019 default: abort ();
10020 }
10021 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10022 inst.instruction |= inst.operands[0].reg;
10023 inst.instruction |= inst.operands[1].reg << 3;
10024 }
10025 }
10026 }
10027
10028 static void
10029 do_t_simd (void)
10030 {
10031 inst.instruction |= inst.operands[0].reg << 8;
10032 inst.instruction |= inst.operands[1].reg << 16;
10033 inst.instruction |= inst.operands[2].reg;
10034 }
10035
10036 static void
10037 do_t_smc (void)
10038 {
10039 unsigned int value = inst.reloc.exp.X_add_number;
10040 constraint (inst.reloc.exp.X_op != O_constant,
10041 _("expression too complex"));
10042 inst.reloc.type = BFD_RELOC_UNUSED;
10043 inst.instruction |= (value & 0xf000) >> 12;
10044 inst.instruction |= (value & 0x0ff0);
10045 inst.instruction |= (value & 0x000f) << 16;
10046 }
10047
10048 static void
10049 do_t_ssat (void)
10050 {
10051 inst.instruction |= inst.operands[0].reg << 8;
10052 inst.instruction |= inst.operands[1].imm - 1;
10053 inst.instruction |= inst.operands[2].reg << 16;
10054
10055 if (inst.operands[3].present)
10056 {
10057 constraint (inst.reloc.exp.X_op != O_constant,
10058 _("expression too complex"));
10059
10060 if (inst.reloc.exp.X_add_number != 0)
10061 {
10062 if (inst.operands[3].shift_kind == SHIFT_ASR)
10063 inst.instruction |= 0x00200000; /* sh bit */
10064 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10065 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10066 }
10067 inst.reloc.type = BFD_RELOC_UNUSED;
10068 }
10069 }
10070
10071 static void
10072 do_t_ssat16 (void)
10073 {
10074 inst.instruction |= inst.operands[0].reg << 8;
10075 inst.instruction |= inst.operands[1].imm - 1;
10076 inst.instruction |= inst.operands[2].reg << 16;
10077 }
10078
10079 static void
10080 do_t_strex (void)
10081 {
10082 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10083 || inst.operands[2].postind || inst.operands[2].writeback
10084 || inst.operands[2].immisreg || inst.operands[2].shifted
10085 || inst.operands[2].negative,
10086 BAD_ADDR_MODE);
10087
10088 inst.instruction |= inst.operands[0].reg << 8;
10089 inst.instruction |= inst.operands[1].reg << 12;
10090 inst.instruction |= inst.operands[2].reg << 16;
10091 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10092 }
10093
10094 static void
10095 do_t_strexd (void)
10096 {
10097 if (!inst.operands[2].present)
10098 inst.operands[2].reg = inst.operands[1].reg + 1;
10099
10100 constraint (inst.operands[0].reg == inst.operands[1].reg
10101 || inst.operands[0].reg == inst.operands[2].reg
10102 || inst.operands[0].reg == inst.operands[3].reg
10103 || inst.operands[1].reg == inst.operands[2].reg,
10104 BAD_OVERLAP);
10105
10106 inst.instruction |= inst.operands[0].reg;
10107 inst.instruction |= inst.operands[1].reg << 12;
10108 inst.instruction |= inst.operands[2].reg << 8;
10109 inst.instruction |= inst.operands[3].reg << 16;
10110 }
10111
10112 static void
10113 do_t_sxtah (void)
10114 {
10115 inst.instruction |= inst.operands[0].reg << 8;
10116 inst.instruction |= inst.operands[1].reg << 16;
10117 inst.instruction |= inst.operands[2].reg;
10118 inst.instruction |= inst.operands[3].imm << 4;
10119 }
10120
10121 static void
10122 do_t_sxth (void)
10123 {
10124 if (inst.instruction <= 0xffff && inst.size_req != 4
10125 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10126 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10127 {
10128 inst.instruction = THUMB_OP16 (inst.instruction);
10129 inst.instruction |= inst.operands[0].reg;
10130 inst.instruction |= inst.operands[1].reg << 3;
10131 }
10132 else if (unified_syntax)
10133 {
10134 if (inst.instruction <= 0xffff)
10135 inst.instruction = THUMB_OP32 (inst.instruction);
10136 inst.instruction |= inst.operands[0].reg << 8;
10137 inst.instruction |= inst.operands[1].reg;
10138 inst.instruction |= inst.operands[2].imm << 4;
10139 }
10140 else
10141 {
10142 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10143 _("Thumb encoding does not support rotation"));
10144 constraint (1, BAD_HIREG);
10145 }
10146 }
10147
10148 static void
10149 do_t_swi (void)
10150 {
10151 inst.reloc.type = BFD_RELOC_ARM_SWI;
10152 }
10153
10154 static void
10155 do_t_tb (void)
10156 {
10157 int half;
10158
10159 half = (inst.instruction & 0x10) != 0;
10160 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10161 constraint (inst.operands[0].immisreg,
10162 _("instruction requires register index"));
10163 constraint (inst.operands[0].imm == 15,
10164 _("PC is not a valid index register"));
10165 constraint (!half && inst.operands[0].shifted,
10166 _("instruction does not allow shifted index"));
10167 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10168 }
10169
10170 static void
10171 do_t_usat (void)
10172 {
10173 inst.instruction |= inst.operands[0].reg << 8;
10174 inst.instruction |= inst.operands[1].imm;
10175 inst.instruction |= inst.operands[2].reg << 16;
10176
10177 if (inst.operands[3].present)
10178 {
10179 constraint (inst.reloc.exp.X_op != O_constant,
10180 _("expression too complex"));
10181 if (inst.reloc.exp.X_add_number != 0)
10182 {
10183 if (inst.operands[3].shift_kind == SHIFT_ASR)
10184 inst.instruction |= 0x00200000; /* sh bit */
10185
10186 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10187 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10188 }
10189 inst.reloc.type = BFD_RELOC_UNUSED;
10190 }
10191 }
10192
10193 static void
10194 do_t_usat16 (void)
10195 {
10196 inst.instruction |= inst.operands[0].reg << 8;
10197 inst.instruction |= inst.operands[1].imm;
10198 inst.instruction |= inst.operands[2].reg << 16;
10199 }
10200
10201 /* Neon instruction encoder helpers. */
10202
10203 /* Encodings for the different types for various Neon opcodes. */
10204
10205 /* An "invalid" code for the following tables. */
10206 #define N_INV -1u
10207
10208 struct neon_tab_entry
10209 {
10210 unsigned integer;
10211 unsigned float_or_poly;
10212 unsigned scalar_or_imm;
10213 };
10214
10215 /* Map overloaded Neon opcodes to their respective encodings. */
10216 #define NEON_ENC_TAB \
10217 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10218 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10219 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10220 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10221 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10222 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10223 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10224 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10225 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10226 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10227 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10228 /* Register variants of the following two instructions are encoded as
10229 vcge / vcgt with the operands reversed. */ \
10230 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10231 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10232 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10233 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10234 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10235 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10236 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10237 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10238 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10239 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10240 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10241 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10242 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10243 X(vshl, 0x0000400, N_INV, 0x0800510), \
10244 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10245 X(vand, 0x0000110, N_INV, 0x0800030), \
10246 X(vbic, 0x0100110, N_INV, 0x0800030), \
10247 X(veor, 0x1000110, N_INV, N_INV), \
10248 X(vorn, 0x0300110, N_INV, 0x0800010), \
10249 X(vorr, 0x0200110, N_INV, 0x0800010), \
10250 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10251 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10252 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10253 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10254 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10255 X(vst1, 0x0000000, 0x0800000, N_INV), \
10256 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10257 X(vst2, 0x0000100, 0x0800100, N_INV), \
10258 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10259 X(vst3, 0x0000200, 0x0800200, N_INV), \
10260 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10261 X(vst4, 0x0000300, 0x0800300, N_INV), \
10262 X(vmovn, 0x1b20200, N_INV, N_INV), \
10263 X(vtrn, 0x1b20080, N_INV, N_INV), \
10264 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10265 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10266 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10267 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10268 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10269 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10270 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10271 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10272 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10273
10274 enum neon_opc
10275 {
10276 #define X(OPC,I,F,S) N_MNEM_##OPC
10277 NEON_ENC_TAB
10278 #undef X
10279 };
10280
10281 static const struct neon_tab_entry neon_enc_tab[] =
10282 {
10283 #define X(OPC,I,F,S) { (I), (F), (S) }
10284 NEON_ENC_TAB
10285 #undef X
10286 };
10287
10288 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10289 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10290 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10291 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10292 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10293 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10294 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10295 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10296 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10297 #define NEON_ENC_SINGLE(X) \
10298 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10299 #define NEON_ENC_DOUBLE(X) \
10300 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10301
10302 /* Define shapes for instruction operands. The following mnemonic characters
10303 are used in this table:
10304
10305 F - VFP S<n> register
10306 D - Neon D<n> register
10307 Q - Neon Q<n> register
10308 I - Immediate
10309 S - Scalar
10310 R - ARM register
10311 L - D<n> register list
10312
10313 This table is used to generate various data:
10314 - enumerations of the form NS_DDR to be used as arguments to
10315 neon_select_shape.
10316 - a table classifying shapes into single, double, quad, mixed.
10317 - a table used to drive neon_select_shape.
10318 */
10319
10320 #define NEON_SHAPE_DEF \
10321 X(3, (D, D, D), DOUBLE), \
10322 X(3, (Q, Q, Q), QUAD), \
10323 X(3, (D, D, I), DOUBLE), \
10324 X(3, (Q, Q, I), QUAD), \
10325 X(3, (D, D, S), DOUBLE), \
10326 X(3, (Q, Q, S), QUAD), \
10327 X(2, (D, D), DOUBLE), \
10328 X(2, (Q, Q), QUAD), \
10329 X(2, (D, S), DOUBLE), \
10330 X(2, (Q, S), QUAD), \
10331 X(2, (D, R), DOUBLE), \
10332 X(2, (Q, R), QUAD), \
10333 X(2, (D, I), DOUBLE), \
10334 X(2, (Q, I), QUAD), \
10335 X(3, (D, L, D), DOUBLE), \
10336 X(2, (D, Q), MIXED), \
10337 X(2, (Q, D), MIXED), \
10338 X(3, (D, Q, I), MIXED), \
10339 X(3, (Q, D, I), MIXED), \
10340 X(3, (Q, D, D), MIXED), \
10341 X(3, (D, Q, Q), MIXED), \
10342 X(3, (Q, Q, D), MIXED), \
10343 X(3, (Q, D, S), MIXED), \
10344 X(3, (D, Q, S), MIXED), \
10345 X(4, (D, D, D, I), DOUBLE), \
10346 X(4, (Q, Q, Q, I), QUAD), \
10347 X(2, (F, F), SINGLE), \
10348 X(3, (F, F, F), SINGLE), \
10349 X(2, (F, I), SINGLE), \
10350 X(2, (F, D), MIXED), \
10351 X(2, (D, F), MIXED), \
10352 X(3, (F, F, I), MIXED), \
10353 X(4, (R, R, F, F), SINGLE), \
10354 X(4, (F, F, R, R), SINGLE), \
10355 X(3, (D, R, R), DOUBLE), \
10356 X(3, (R, R, D), DOUBLE), \
10357 X(2, (S, R), SINGLE), \
10358 X(2, (R, S), SINGLE), \
10359 X(2, (F, R), SINGLE), \
10360 X(2, (R, F), SINGLE)
10361
10362 #define S2(A,B) NS_##A##B
10363 #define S3(A,B,C) NS_##A##B##C
10364 #define S4(A,B,C,D) NS_##A##B##C##D
10365
10366 #define X(N, L, C) S##N L
10367
10368 enum neon_shape
10369 {
10370 NEON_SHAPE_DEF,
10371 NS_NULL
10372 };
10373
10374 #undef X
10375 #undef S2
10376 #undef S3
10377 #undef S4
10378
10379 enum neon_shape_class
10380 {
10381 SC_SINGLE,
10382 SC_DOUBLE,
10383 SC_QUAD,
10384 SC_MIXED
10385 };
10386
10387 #define X(N, L, C) SC_##C
10388
10389 static enum neon_shape_class neon_shape_class[] =
10390 {
10391 NEON_SHAPE_DEF
10392 };
10393
10394 #undef X
10395
10396 enum neon_shape_el
10397 {
10398 SE_F,
10399 SE_D,
10400 SE_Q,
10401 SE_I,
10402 SE_S,
10403 SE_R,
10404 SE_L
10405 };
10406
10407 /* Register widths of above. */
10408 static unsigned neon_shape_el_size[] =
10409 {
10410 32,
10411 64,
10412 128,
10413 0,
10414 32,
10415 32,
10416 0
10417 };
10418
10419 struct neon_shape_info
10420 {
10421 unsigned els;
10422 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10423 };
10424
10425 #define S2(A,B) { SE_##A, SE_##B }
10426 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10427 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10428
10429 #define X(N, L, C) { N, S##N L }
10430
10431 static struct neon_shape_info neon_shape_tab[] =
10432 {
10433 NEON_SHAPE_DEF
10434 };
10435
10436 #undef X
10437 #undef S2
10438 #undef S3
10439 #undef S4
10440
10441 /* Bit masks used in type checking given instructions.
10442 'N_EQK' means the type must be the same as (or based on in some way) the key
10443 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10444 set, various other bits can be set as well in order to modify the meaning of
10445 the type constraint. */
10446
10447 enum neon_type_mask
10448 {
10449 N_S8 = 0x000001,
10450 N_S16 = 0x000002,
10451 N_S32 = 0x000004,
10452 N_S64 = 0x000008,
10453 N_U8 = 0x000010,
10454 N_U16 = 0x000020,
10455 N_U32 = 0x000040,
10456 N_U64 = 0x000080,
10457 N_I8 = 0x000100,
10458 N_I16 = 0x000200,
10459 N_I32 = 0x000400,
10460 N_I64 = 0x000800,
10461 N_8 = 0x001000,
10462 N_16 = 0x002000,
10463 N_32 = 0x004000,
10464 N_64 = 0x008000,
10465 N_P8 = 0x010000,
10466 N_P16 = 0x020000,
10467 N_F32 = 0x040000,
10468 N_F64 = 0x080000,
10469 N_KEY = 0x100000, /* key element (main type specifier). */
10470 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10471 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10472 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10473 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10474 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10475 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10476 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10477 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10478 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10479 N_UTYP = 0,
10480 N_MAX_NONSPECIAL = N_F64
10481 };
10482
10483 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10484
10485 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10486 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10487 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10488 #define N_SUF_32 (N_SU_32 | N_F32)
10489 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10490 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10491
10492 /* Pass this as the first type argument to neon_check_type to ignore types
10493 altogether. */
10494 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10495
10496 /* Select a "shape" for the current instruction (describing register types or
10497 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10498 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10499 function of operand parsing, so this function doesn't need to be called.
10500 Shapes should be listed in order of decreasing length. */
10501
10502 static enum neon_shape
10503 neon_select_shape (enum neon_shape shape, ...)
10504 {
10505 va_list ap;
10506 enum neon_shape first_shape = shape;
10507
10508 /* Fix missing optional operands. FIXME: we don't know at this point how
10509 many arguments we should have, so this makes the assumption that we have
10510 > 1. This is true of all current Neon opcodes, I think, but may not be
10511 true in the future. */
10512 if (!inst.operands[1].present)
10513 inst.operands[1] = inst.operands[0];
10514
10515 va_start (ap, shape);
10516
10517 for (; shape != NS_NULL; shape = va_arg (ap, int))
10518 {
10519 unsigned j;
10520 int matches = 1;
10521
10522 for (j = 0; j < neon_shape_tab[shape].els; j++)
10523 {
10524 if (!inst.operands[j].present)
10525 {
10526 matches = 0;
10527 break;
10528 }
10529
10530 switch (neon_shape_tab[shape].el[j])
10531 {
10532 case SE_F:
10533 if (!(inst.operands[j].isreg
10534 && inst.operands[j].isvec
10535 && inst.operands[j].issingle
10536 && !inst.operands[j].isquad))
10537 matches = 0;
10538 break;
10539
10540 case SE_D:
10541 if (!(inst.operands[j].isreg
10542 && inst.operands[j].isvec
10543 && !inst.operands[j].isquad
10544 && !inst.operands[j].issingle))
10545 matches = 0;
10546 break;
10547
10548 case SE_R:
10549 if (!(inst.operands[j].isreg
10550 && !inst.operands[j].isvec))
10551 matches = 0;
10552 break;
10553
10554 case SE_Q:
10555 if (!(inst.operands[j].isreg
10556 && inst.operands[j].isvec
10557 && inst.operands[j].isquad
10558 && !inst.operands[j].issingle))
10559 matches = 0;
10560 break;
10561
10562 case SE_I:
10563 if (!(!inst.operands[j].isreg
10564 && !inst.operands[j].isscalar))
10565 matches = 0;
10566 break;
10567
10568 case SE_S:
10569 if (!(!inst.operands[j].isreg
10570 && inst.operands[j].isscalar))
10571 matches = 0;
10572 break;
10573
10574 case SE_L:
10575 break;
10576 }
10577 }
10578 if (matches)
10579 break;
10580 }
10581
10582 va_end (ap);
10583
10584 if (shape == NS_NULL && first_shape != NS_NULL)
10585 first_error (_("invalid instruction shape"));
10586
10587 return shape;
10588 }
10589
10590 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10591 means the Q bit should be set). */
10592
10593 static int
10594 neon_quad (enum neon_shape shape)
10595 {
10596 return neon_shape_class[shape] == SC_QUAD;
10597 }
10598
10599 static void
10600 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10601 unsigned *g_size)
10602 {
10603 /* Allow modification to be made to types which are constrained to be
10604 based on the key element, based on bits set alongside N_EQK. */
10605 if ((typebits & N_EQK) != 0)
10606 {
10607 if ((typebits & N_HLF) != 0)
10608 *g_size /= 2;
10609 else if ((typebits & N_DBL) != 0)
10610 *g_size *= 2;
10611 if ((typebits & N_SGN) != 0)
10612 *g_type = NT_signed;
10613 else if ((typebits & N_UNS) != 0)
10614 *g_type = NT_unsigned;
10615 else if ((typebits & N_INT) != 0)
10616 *g_type = NT_integer;
10617 else if ((typebits & N_FLT) != 0)
10618 *g_type = NT_float;
10619 else if ((typebits & N_SIZ) != 0)
10620 *g_type = NT_untyped;
10621 }
10622 }
10623
10624 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10625 operand type, i.e. the single type specified in a Neon instruction when it
10626 is the only one given. */
10627
10628 static struct neon_type_el
10629 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10630 {
10631 struct neon_type_el dest = *key;
10632
10633 assert ((thisarg & N_EQK) != 0);
10634
10635 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10636
10637 return dest;
10638 }
10639
10640 /* Convert Neon type and size into compact bitmask representation. */
10641
10642 static enum neon_type_mask
10643 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10644 {
10645 switch (type)
10646 {
10647 case NT_untyped:
10648 switch (size)
10649 {
10650 case 8: return N_8;
10651 case 16: return N_16;
10652 case 32: return N_32;
10653 case 64: return N_64;
10654 default: ;
10655 }
10656 break;
10657
10658 case NT_integer:
10659 switch (size)
10660 {
10661 case 8: return N_I8;
10662 case 16: return N_I16;
10663 case 32: return N_I32;
10664 case 64: return N_I64;
10665 default: ;
10666 }
10667 break;
10668
10669 case NT_float:
10670 switch (size)
10671 {
10672 case 32: return N_F32;
10673 case 64: return N_F64;
10674 default: ;
10675 }
10676 break;
10677
10678 case NT_poly:
10679 switch (size)
10680 {
10681 case 8: return N_P8;
10682 case 16: return N_P16;
10683 default: ;
10684 }
10685 break;
10686
10687 case NT_signed:
10688 switch (size)
10689 {
10690 case 8: return N_S8;
10691 case 16: return N_S16;
10692 case 32: return N_S32;
10693 case 64: return N_S64;
10694 default: ;
10695 }
10696 break;
10697
10698 case NT_unsigned:
10699 switch (size)
10700 {
10701 case 8: return N_U8;
10702 case 16: return N_U16;
10703 case 32: return N_U32;
10704 case 64: return N_U64;
10705 default: ;
10706 }
10707 break;
10708
10709 default: ;
10710 }
10711
10712 return N_UTYP;
10713 }
10714
10715 /* Convert compact Neon bitmask type representation to a type and size. Only
10716 handles the case where a single bit is set in the mask. */
10717
10718 static int
10719 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10720 enum neon_type_mask mask)
10721 {
10722 if ((mask & N_EQK) != 0)
10723 return FAIL;
10724
10725 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10726 *size = 8;
10727 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10728 *size = 16;
10729 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10730 *size = 32;
10731 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10732 *size = 64;
10733 else
10734 return FAIL;
10735
10736 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10737 *type = NT_signed;
10738 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10739 *type = NT_unsigned;
10740 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10741 *type = NT_integer;
10742 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10743 *type = NT_untyped;
10744 else if ((mask & (N_P8 | N_P16)) != 0)
10745 *type = NT_poly;
10746 else if ((mask & (N_F32 | N_F64)) != 0)
10747 *type = NT_float;
10748 else
10749 return FAIL;
10750
10751 return SUCCESS;
10752 }
10753
10754 /* Modify a bitmask of allowed types. This is only needed for type
10755 relaxation. */
10756
10757 static unsigned
10758 modify_types_allowed (unsigned allowed, unsigned mods)
10759 {
10760 unsigned size;
10761 enum neon_el_type type;
10762 unsigned destmask;
10763 int i;
10764
10765 destmask = 0;
10766
10767 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10768 {
10769 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10770 {
10771 neon_modify_type_size (mods, &type, &size);
10772 destmask |= type_chk_of_el_type (type, size);
10773 }
10774 }
10775
10776 return destmask;
10777 }
10778
10779 /* Check type and return type classification.
10780 The manual states (paraphrase): If one datatype is given, it indicates the
10781 type given in:
10782 - the second operand, if there is one
10783 - the operand, if there is no second operand
10784 - the result, if there are no operands.
10785 This isn't quite good enough though, so we use a concept of a "key" datatype
10786 which is set on a per-instruction basis, which is the one which matters when
10787 only one data type is written.
10788 Note: this function has side-effects (e.g. filling in missing operands). All
10789 Neon instructions should call it before performing bit encoding. */
10790
10791 static struct neon_type_el
10792 neon_check_type (unsigned els, enum neon_shape ns, ...)
10793 {
10794 va_list ap;
10795 unsigned i, pass, key_el = 0;
10796 unsigned types[NEON_MAX_TYPE_ELS];
10797 enum neon_el_type k_type = NT_invtype;
10798 unsigned k_size = -1u;
10799 struct neon_type_el badtype = {NT_invtype, -1};
10800 unsigned key_allowed = 0;
10801
10802 /* Optional registers in Neon instructions are always (not) in operand 1.
10803 Fill in the missing operand here, if it was omitted. */
10804 if (els > 1 && !inst.operands[1].present)
10805 inst.operands[1] = inst.operands[0];
10806
10807 /* Suck up all the varargs. */
10808 va_start (ap, ns);
10809 for (i = 0; i < els; i++)
10810 {
10811 unsigned thisarg = va_arg (ap, unsigned);
10812 if (thisarg == N_IGNORE_TYPE)
10813 {
10814 va_end (ap);
10815 return badtype;
10816 }
10817 types[i] = thisarg;
10818 if ((thisarg & N_KEY) != 0)
10819 key_el = i;
10820 }
10821 va_end (ap);
10822
10823 if (inst.vectype.elems > 0)
10824 for (i = 0; i < els; i++)
10825 if (inst.operands[i].vectype.type != NT_invtype)
10826 {
10827 first_error (_("types specified in both the mnemonic and operands"));
10828 return badtype;
10829 }
10830
10831 /* Duplicate inst.vectype elements here as necessary.
10832 FIXME: No idea if this is exactly the same as the ARM assembler,
10833 particularly when an insn takes one register and one non-register
10834 operand. */
10835 if (inst.vectype.elems == 1 && els > 1)
10836 {
10837 unsigned j;
10838 inst.vectype.elems = els;
10839 inst.vectype.el[key_el] = inst.vectype.el[0];
10840 for (j = 0; j < els; j++)
10841 if (j != key_el)
10842 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10843 types[j]);
10844 }
10845 else if (inst.vectype.elems == 0 && els > 0)
10846 {
10847 unsigned j;
10848 /* No types were given after the mnemonic, so look for types specified
10849 after each operand. We allow some flexibility here; as long as the
10850 "key" operand has a type, we can infer the others. */
10851 for (j = 0; j < els; j++)
10852 if (inst.operands[j].vectype.type != NT_invtype)
10853 inst.vectype.el[j] = inst.operands[j].vectype;
10854
10855 if (inst.operands[key_el].vectype.type != NT_invtype)
10856 {
10857 for (j = 0; j < els; j++)
10858 if (inst.operands[j].vectype.type == NT_invtype)
10859 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10860 types[j]);
10861 }
10862 else
10863 {
10864 first_error (_("operand types can't be inferred"));
10865 return badtype;
10866 }
10867 }
10868 else if (inst.vectype.elems != els)
10869 {
10870 first_error (_("type specifier has the wrong number of parts"));
10871 return badtype;
10872 }
10873
10874 for (pass = 0; pass < 2; pass++)
10875 {
10876 for (i = 0; i < els; i++)
10877 {
10878 unsigned thisarg = types[i];
10879 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10880 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10881 enum neon_el_type g_type = inst.vectype.el[i].type;
10882 unsigned g_size = inst.vectype.el[i].size;
10883
10884 /* Decay more-specific signed & unsigned types to sign-insensitive
10885 integer types if sign-specific variants are unavailable. */
10886 if ((g_type == NT_signed || g_type == NT_unsigned)
10887 && (types_allowed & N_SU_ALL) == 0)
10888 g_type = NT_integer;
10889
10890 /* If only untyped args are allowed, decay any more specific types to
10891 them. Some instructions only care about signs for some element
10892 sizes, so handle that properly. */
10893 if ((g_size == 8 && (types_allowed & N_8) != 0)
10894 || (g_size == 16 && (types_allowed & N_16) != 0)
10895 || (g_size == 32 && (types_allowed & N_32) != 0)
10896 || (g_size == 64 && (types_allowed & N_64) != 0))
10897 g_type = NT_untyped;
10898
10899 if (pass == 0)
10900 {
10901 if ((thisarg & N_KEY) != 0)
10902 {
10903 k_type = g_type;
10904 k_size = g_size;
10905 key_allowed = thisarg & ~N_KEY;
10906 }
10907 }
10908 else
10909 {
10910 if ((thisarg & N_VFP) != 0)
10911 {
10912 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10913 unsigned regwidth = neon_shape_el_size[regshape], match;
10914
10915 /* In VFP mode, operands must match register widths. If we
10916 have a key operand, use its width, else use the width of
10917 the current operand. */
10918 if (k_size != -1u)
10919 match = k_size;
10920 else
10921 match = g_size;
10922
10923 if (regwidth != match)
10924 {
10925 first_error (_("operand size must match register width"));
10926 return badtype;
10927 }
10928 }
10929
10930 if ((thisarg & N_EQK) == 0)
10931 {
10932 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10933
10934 if ((given_type & types_allowed) == 0)
10935 {
10936 first_error (_("bad type in Neon instruction"));
10937 return badtype;
10938 }
10939 }
10940 else
10941 {
10942 enum neon_el_type mod_k_type = k_type;
10943 unsigned mod_k_size = k_size;
10944 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10945 if (g_type != mod_k_type || g_size != mod_k_size)
10946 {
10947 first_error (_("inconsistent types in Neon instruction"));
10948 return badtype;
10949 }
10950 }
10951 }
10952 }
10953 }
10954
10955 return inst.vectype.el[key_el];
10956 }
10957
10958 /* Neon-style VFP instruction forwarding. */
10959
10960 /* Thumb VFP instructions have 0xE in the condition field. */
10961
10962 static void
10963 do_vfp_cond_or_thumb (void)
10964 {
10965 if (thumb_mode)
10966 inst.instruction |= 0xe0000000;
10967 else
10968 inst.instruction |= inst.cond << 28;
10969 }
10970
10971 /* Look up and encode a simple mnemonic, for use as a helper function for the
10972 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10973 etc. It is assumed that operand parsing has already been done, and that the
10974 operands are in the form expected by the given opcode (this isn't necessarily
10975 the same as the form in which they were parsed, hence some massaging must
10976 take place before this function is called).
10977 Checks current arch version against that in the looked-up opcode. */
10978
10979 static void
10980 do_vfp_nsyn_opcode (const char *opname)
10981 {
10982 const struct asm_opcode *opcode;
10983
10984 opcode = hash_find (arm_ops_hsh, opname);
10985
10986 if (!opcode)
10987 abort ();
10988
10989 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
10990 thumb_mode ? *opcode->tvariant : *opcode->avariant),
10991 _(BAD_FPU));
10992
10993 if (thumb_mode)
10994 {
10995 inst.instruction = opcode->tvalue;
10996 opcode->tencode ();
10997 }
10998 else
10999 {
11000 inst.instruction = (inst.cond << 28) | opcode->avalue;
11001 opcode->aencode ();
11002 }
11003 }
11004
11005 static void
11006 do_vfp_nsyn_add_sub (enum neon_shape rs)
11007 {
11008 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11009
11010 if (rs == NS_FFF)
11011 {
11012 if (is_add)
11013 do_vfp_nsyn_opcode ("fadds");
11014 else
11015 do_vfp_nsyn_opcode ("fsubs");
11016 }
11017 else
11018 {
11019 if (is_add)
11020 do_vfp_nsyn_opcode ("faddd");
11021 else
11022 do_vfp_nsyn_opcode ("fsubd");
11023 }
11024 }
11025
11026 /* Check operand types to see if this is a VFP instruction, and if so call
11027 PFN (). */
11028
11029 static int
11030 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11031 {
11032 enum neon_shape rs;
11033 struct neon_type_el et;
11034
11035 switch (args)
11036 {
11037 case 2:
11038 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11039 et = neon_check_type (2, rs,
11040 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11041 break;
11042
11043 case 3:
11044 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11045 et = neon_check_type (3, rs,
11046 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11047 break;
11048
11049 default:
11050 abort ();
11051 }
11052
11053 if (et.type != NT_invtype)
11054 {
11055 pfn (rs);
11056 return SUCCESS;
11057 }
11058 else
11059 inst.error = NULL;
11060
11061 return FAIL;
11062 }
11063
11064 static void
11065 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11066 {
11067 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11068
11069 if (rs == NS_FFF)
11070 {
11071 if (is_mla)
11072 do_vfp_nsyn_opcode ("fmacs");
11073 else
11074 do_vfp_nsyn_opcode ("fmscs");
11075 }
11076 else
11077 {
11078 if (is_mla)
11079 do_vfp_nsyn_opcode ("fmacd");
11080 else
11081 do_vfp_nsyn_opcode ("fmscd");
11082 }
11083 }
11084
11085 static void
11086 do_vfp_nsyn_mul (enum neon_shape rs)
11087 {
11088 if (rs == NS_FFF)
11089 do_vfp_nsyn_opcode ("fmuls");
11090 else
11091 do_vfp_nsyn_opcode ("fmuld");
11092 }
11093
11094 static void
11095 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11096 {
11097 int is_neg = (inst.instruction & 0x80) != 0;
11098 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11099
11100 if (rs == NS_FF)
11101 {
11102 if (is_neg)
11103 do_vfp_nsyn_opcode ("fnegs");
11104 else
11105 do_vfp_nsyn_opcode ("fabss");
11106 }
11107 else
11108 {
11109 if (is_neg)
11110 do_vfp_nsyn_opcode ("fnegd");
11111 else
11112 do_vfp_nsyn_opcode ("fabsd");
11113 }
11114 }
11115
11116 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11117 insns belong to Neon, and are handled elsewhere. */
11118
11119 static void
11120 do_vfp_nsyn_ldm_stm (int is_dbmode)
11121 {
11122 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11123 if (is_ldm)
11124 {
11125 if (is_dbmode)
11126 do_vfp_nsyn_opcode ("fldmdbs");
11127 else
11128 do_vfp_nsyn_opcode ("fldmias");
11129 }
11130 else
11131 {
11132 if (is_dbmode)
11133 do_vfp_nsyn_opcode ("fstmdbs");
11134 else
11135 do_vfp_nsyn_opcode ("fstmias");
11136 }
11137 }
11138
11139 static void
11140 do_vfp_nsyn_sqrt (void)
11141 {
11142 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11143 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11144
11145 if (rs == NS_FF)
11146 do_vfp_nsyn_opcode ("fsqrts");
11147 else
11148 do_vfp_nsyn_opcode ("fsqrtd");
11149 }
11150
11151 static void
11152 do_vfp_nsyn_div (void)
11153 {
11154 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11155 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11156 N_F32 | N_F64 | N_KEY | N_VFP);
11157
11158 if (rs == NS_FFF)
11159 do_vfp_nsyn_opcode ("fdivs");
11160 else
11161 do_vfp_nsyn_opcode ("fdivd");
11162 }
11163
11164 static void
11165 do_vfp_nsyn_nmul (void)
11166 {
11167 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11168 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11169 N_F32 | N_F64 | N_KEY | N_VFP);
11170
11171 if (rs == NS_FFF)
11172 {
11173 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11174 do_vfp_sp_dyadic ();
11175 }
11176 else
11177 {
11178 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11179 do_vfp_dp_rd_rn_rm ();
11180 }
11181 do_vfp_cond_or_thumb ();
11182 }
11183
11184 static void
11185 do_vfp_nsyn_cmp (void)
11186 {
11187 if (inst.operands[1].isreg)
11188 {
11189 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11190 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11191
11192 if (rs == NS_FF)
11193 {
11194 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11195 do_vfp_sp_monadic ();
11196 }
11197 else
11198 {
11199 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11200 do_vfp_dp_rd_rm ();
11201 }
11202 }
11203 else
11204 {
11205 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11206 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11207
11208 switch (inst.instruction & 0x0fffffff)
11209 {
11210 case N_MNEM_vcmp:
11211 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11212 break;
11213 case N_MNEM_vcmpe:
11214 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11215 break;
11216 default:
11217 abort ();
11218 }
11219
11220 if (rs == NS_FI)
11221 {
11222 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11223 do_vfp_sp_compare_z ();
11224 }
11225 else
11226 {
11227 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11228 do_vfp_dp_rd ();
11229 }
11230 }
11231 do_vfp_cond_or_thumb ();
11232 }
11233
11234 static void
11235 nsyn_insert_sp (void)
11236 {
11237 inst.operands[1] = inst.operands[0];
11238 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11239 inst.operands[0].reg = 13;
11240 inst.operands[0].isreg = 1;
11241 inst.operands[0].writeback = 1;
11242 inst.operands[0].present = 1;
11243 }
11244
11245 static void
11246 do_vfp_nsyn_push (void)
11247 {
11248 nsyn_insert_sp ();
11249 if (inst.operands[1].issingle)
11250 do_vfp_nsyn_opcode ("fstmdbs");
11251 else
11252 do_vfp_nsyn_opcode ("fstmdbd");
11253 }
11254
11255 static void
11256 do_vfp_nsyn_pop (void)
11257 {
11258 nsyn_insert_sp ();
11259 if (inst.operands[1].issingle)
11260 do_vfp_nsyn_opcode ("fldmias");
11261 else
11262 do_vfp_nsyn_opcode ("fldmiad");
11263 }
11264
11265 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11266 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11267
11268 static unsigned
11269 neon_dp_fixup (unsigned i)
11270 {
11271 if (thumb_mode)
11272 {
11273 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11274 if (i & (1 << 24))
11275 i |= 1 << 28;
11276
11277 i &= ~(1 << 24);
11278
11279 i |= 0xef000000;
11280 }
11281 else
11282 i |= 0xf2000000;
11283
11284 return i;
11285 }
11286
11287 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11288 (0, 1, 2, 3). */
11289
11290 static unsigned
11291 neon_logbits (unsigned x)
11292 {
11293 return ffs (x) - 4;
11294 }
11295
11296 #define LOW4(R) ((R) & 0xf)
11297 #define HI1(R) (((R) >> 4) & 1)
11298
11299 /* Encode insns with bit pattern:
11300
11301 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11302 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11303
11304 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11305 different meaning for some instruction. */
11306
11307 static void
11308 neon_three_same (int isquad, int ubit, int size)
11309 {
11310 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11311 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11312 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11313 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11314 inst.instruction |= LOW4 (inst.operands[2].reg);
11315 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11316 inst.instruction |= (isquad != 0) << 6;
11317 inst.instruction |= (ubit != 0) << 24;
11318 if (size != -1)
11319 inst.instruction |= neon_logbits (size) << 20;
11320
11321 inst.instruction = neon_dp_fixup (inst.instruction);
11322 }
11323
11324 /* Encode instructions of the form:
11325
11326 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11327 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11328
11329 Don't write size if SIZE == -1. */
11330
11331 static void
11332 neon_two_same (int qbit, int ubit, int size)
11333 {
11334 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11335 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11336 inst.instruction |= LOW4 (inst.operands[1].reg);
11337 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11338 inst.instruction |= (qbit != 0) << 6;
11339 inst.instruction |= (ubit != 0) << 24;
11340
11341 if (size != -1)
11342 inst.instruction |= neon_logbits (size) << 18;
11343
11344 inst.instruction = neon_dp_fixup (inst.instruction);
11345 }
11346
11347 /* Neon instruction encoders, in approximate order of appearance. */
11348
11349 static void
11350 do_neon_dyadic_i_su (void)
11351 {
11352 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11353 struct neon_type_el et = neon_check_type (3, rs,
11354 N_EQK, N_EQK, N_SU_32 | N_KEY);
11355 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11356 }
11357
11358 static void
11359 do_neon_dyadic_i64_su (void)
11360 {
11361 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11362 struct neon_type_el et = neon_check_type (3, rs,
11363 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11364 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11365 }
11366
11367 static void
11368 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11369 unsigned immbits)
11370 {
11371 unsigned size = et.size >> 3;
11372 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11373 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11374 inst.instruction |= LOW4 (inst.operands[1].reg);
11375 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11376 inst.instruction |= (isquad != 0) << 6;
11377 inst.instruction |= immbits << 16;
11378 inst.instruction |= (size >> 3) << 7;
11379 inst.instruction |= (size & 0x7) << 19;
11380 if (write_ubit)
11381 inst.instruction |= (uval != 0) << 24;
11382
11383 inst.instruction = neon_dp_fixup (inst.instruction);
11384 }
11385
11386 static void
11387 do_neon_shl_imm (void)
11388 {
11389 if (!inst.operands[2].isreg)
11390 {
11391 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11392 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11393 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11394 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11395 }
11396 else
11397 {
11398 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11399 struct neon_type_el et = neon_check_type (3, rs,
11400 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11401 unsigned int tmp;
11402
11403 /* VSHL/VQSHL 3-register variants have syntax such as:
11404 vshl.xx Dd, Dm, Dn
11405 whereas other 3-register operations encoded by neon_three_same have
11406 syntax like:
11407 vadd.xx Dd, Dn, Dm
11408 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11409 here. */
11410 tmp = inst.operands[2].reg;
11411 inst.operands[2].reg = inst.operands[1].reg;
11412 inst.operands[1].reg = tmp;
11413 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11414 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11415 }
11416 }
11417
11418 static void
11419 do_neon_qshl_imm (void)
11420 {
11421 if (!inst.operands[2].isreg)
11422 {
11423 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11424 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11425
11426 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11427 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11428 inst.operands[2].imm);
11429 }
11430 else
11431 {
11432 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11433 struct neon_type_el et = neon_check_type (3, rs,
11434 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11435 unsigned int tmp;
11436
11437 /* See note in do_neon_shl_imm. */
11438 tmp = inst.operands[2].reg;
11439 inst.operands[2].reg = inst.operands[1].reg;
11440 inst.operands[1].reg = tmp;
11441 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11442 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11443 }
11444 }
11445
11446 static void
11447 do_neon_rshl (void)
11448 {
11449 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11450 struct neon_type_el et = neon_check_type (3, rs,
11451 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11452 unsigned int tmp;
11453
11454 tmp = inst.operands[2].reg;
11455 inst.operands[2].reg = inst.operands[1].reg;
11456 inst.operands[1].reg = tmp;
11457 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11458 }
11459
11460 static int
11461 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11462 {
11463 /* Handle .I8 pseudo-instructions. */
11464 if (size == 8)
11465 {
11466 /* Unfortunately, this will make everything apart from zero out-of-range.
11467 FIXME is this the intended semantics? There doesn't seem much point in
11468 accepting .I8 if so. */
11469 immediate |= immediate << 8;
11470 size = 16;
11471 }
11472
11473 if (size >= 32)
11474 {
11475 if (immediate == (immediate & 0x000000ff))
11476 {
11477 *immbits = immediate;
11478 return 0x1;
11479 }
11480 else if (immediate == (immediate & 0x0000ff00))
11481 {
11482 *immbits = immediate >> 8;
11483 return 0x3;
11484 }
11485 else if (immediate == (immediate & 0x00ff0000))
11486 {
11487 *immbits = immediate >> 16;
11488 return 0x5;
11489 }
11490 else if (immediate == (immediate & 0xff000000))
11491 {
11492 *immbits = immediate >> 24;
11493 return 0x7;
11494 }
11495 if ((immediate & 0xffff) != (immediate >> 16))
11496 goto bad_immediate;
11497 immediate &= 0xffff;
11498 }
11499
11500 if (immediate == (immediate & 0x000000ff))
11501 {
11502 *immbits = immediate;
11503 return 0x9;
11504 }
11505 else if (immediate == (immediate & 0x0000ff00))
11506 {
11507 *immbits = immediate >> 8;
11508 return 0xb;
11509 }
11510
11511 bad_immediate:
11512 first_error (_("immediate value out of range"));
11513 return FAIL;
11514 }
11515
11516 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11517 A, B, C, D. */
11518
11519 static int
11520 neon_bits_same_in_bytes (unsigned imm)
11521 {
11522 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11523 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11524 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11525 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11526 }
11527
11528 /* For immediate of above form, return 0bABCD. */
11529
11530 static unsigned
11531 neon_squash_bits (unsigned imm)
11532 {
11533 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11534 | ((imm & 0x01000000) >> 21);
11535 }
11536
11537 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11538
11539 static unsigned
11540 neon_qfloat_bits (unsigned imm)
11541 {
11542 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11543 }
11544
11545 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11546 the instruction. *OP is passed as the initial value of the op field, and
11547 may be set to a different value depending on the constant (i.e.
11548 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11549 MVN). If the immediate looks like a repeated parttern then also
11550 try smaller element sizes. */
11551
11552 static int
11553 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
11554 int *op, int size, enum neon_el_type type)
11555 {
11556 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11557 {
11558 if (size != 32 || *op == 1)
11559 return FAIL;
11560 *immbits = neon_qfloat_bits (immlo);
11561 return 0xf;
11562 }
11563
11564 if (size == 64)
11565 {
11566 if (neon_bits_same_in_bytes (immhi)
11567 && neon_bits_same_in_bytes (immlo))
11568 {
11569 if (*op == 1)
11570 return FAIL;
11571 *immbits = (neon_squash_bits (immhi) << 4)
11572 | neon_squash_bits (immlo);
11573 *op = 1;
11574 return 0xe;
11575 }
11576
11577 if (immhi != immlo)
11578 return FAIL;
11579 }
11580
11581 if (size >= 32)
11582 {
11583 if (immlo == (immlo & 0x000000ff))
11584 {
11585 *immbits = immlo;
11586 return 0x0;
11587 }
11588 else if (immlo == (immlo & 0x0000ff00))
11589 {
11590 *immbits = immlo >> 8;
11591 return 0x2;
11592 }
11593 else if (immlo == (immlo & 0x00ff0000))
11594 {
11595 *immbits = immlo >> 16;
11596 return 0x4;
11597 }
11598 else if (immlo == (immlo & 0xff000000))
11599 {
11600 *immbits = immlo >> 24;
11601 return 0x6;
11602 }
11603 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11604 {
11605 *immbits = (immlo >> 8) & 0xff;
11606 return 0xc;
11607 }
11608 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11609 {
11610 *immbits = (immlo >> 16) & 0xff;
11611 return 0xd;
11612 }
11613
11614 if ((immlo & 0xffff) != (immlo >> 16))
11615 return FAIL;
11616 immlo &= 0xffff;
11617 }
11618
11619 if (size >= 16)
11620 {
11621 if (immlo == (immlo & 0x000000ff))
11622 {
11623 *immbits = immlo;
11624 return 0x8;
11625 }
11626 else if (immlo == (immlo & 0x0000ff00))
11627 {
11628 *immbits = immlo >> 8;
11629 return 0xa;
11630 }
11631
11632 if ((immlo & 0xff) != (immlo >> 8))
11633 return FAIL;
11634 immlo &= 0xff;
11635 }
11636
11637 if (immlo == (immlo & 0x000000ff))
11638 {
11639 /* Don't allow MVN with 8-bit immediate. */
11640 if (*op == 1)
11641 return FAIL;
11642 *immbits = immlo;
11643 return 0xe;
11644 }
11645
11646 return FAIL;
11647 }
11648
11649 /* Write immediate bits [7:0] to the following locations:
11650
11651 |28/24|23 19|18 16|15 4|3 0|
11652 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11653
11654 This function is used by VMOV/VMVN/VORR/VBIC. */
11655
11656 static void
11657 neon_write_immbits (unsigned immbits)
11658 {
11659 inst.instruction |= immbits & 0xf;
11660 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11661 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11662 }
11663
11664 /* Invert low-order SIZE bits of XHI:XLO. */
11665
11666 static void
11667 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11668 {
11669 unsigned immlo = xlo ? *xlo : 0;
11670 unsigned immhi = xhi ? *xhi : 0;
11671
11672 switch (size)
11673 {
11674 case 8:
11675 immlo = (~immlo) & 0xff;
11676 break;
11677
11678 case 16:
11679 immlo = (~immlo) & 0xffff;
11680 break;
11681
11682 case 64:
11683 immhi = (~immhi) & 0xffffffff;
11684 /* fall through. */
11685
11686 case 32:
11687 immlo = (~immlo) & 0xffffffff;
11688 break;
11689
11690 default:
11691 abort ();
11692 }
11693
11694 if (xlo)
11695 *xlo = immlo;
11696
11697 if (xhi)
11698 *xhi = immhi;
11699 }
11700
11701 static void
11702 do_neon_logic (void)
11703 {
11704 if (inst.operands[2].present && inst.operands[2].isreg)
11705 {
11706 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11707 neon_check_type (3, rs, N_IGNORE_TYPE);
11708 /* U bit and size field were set as part of the bitmask. */
11709 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11710 neon_three_same (neon_quad (rs), 0, -1);
11711 }
11712 else
11713 {
11714 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11715 struct neon_type_el et = neon_check_type (2, rs,
11716 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11717 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11718 unsigned immbits;
11719 int cmode;
11720
11721 if (et.type == NT_invtype)
11722 return;
11723
11724 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11725
11726 immbits = inst.operands[1].imm;
11727 if (et.size == 64)
11728 {
11729 /* .i64 is a pseudo-op, so the immediate must be a repeating
11730 pattern. */
11731 if (immbits != (inst.operands[1].regisimm ?
11732 inst.operands[1].reg : 0))
11733 {
11734 /* Set immbits to an invalid constant. */
11735 immbits = 0xdeadbeef;
11736 }
11737 }
11738
11739 switch (opcode)
11740 {
11741 case N_MNEM_vbic:
11742 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11743 break;
11744
11745 case N_MNEM_vorr:
11746 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11747 break;
11748
11749 case N_MNEM_vand:
11750 /* Pseudo-instruction for VBIC. */
11751 neon_invert_size (&immbits, 0, et.size);
11752 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11753 break;
11754
11755 case N_MNEM_vorn:
11756 /* Pseudo-instruction for VORR. */
11757 neon_invert_size (&immbits, 0, et.size);
11758 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11759 break;
11760
11761 default:
11762 abort ();
11763 }
11764
11765 if (cmode == FAIL)
11766 return;
11767
11768 inst.instruction |= neon_quad (rs) << 6;
11769 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11770 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11771 inst.instruction |= cmode << 8;
11772 neon_write_immbits (immbits);
11773
11774 inst.instruction = neon_dp_fixup (inst.instruction);
11775 }
11776 }
11777
11778 static void
11779 do_neon_bitfield (void)
11780 {
11781 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11782 neon_check_type (3, rs, N_IGNORE_TYPE);
11783 neon_three_same (neon_quad (rs), 0, -1);
11784 }
11785
11786 static void
11787 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11788 unsigned destbits)
11789 {
11790 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11791 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11792 types | N_KEY);
11793 if (et.type == NT_float)
11794 {
11795 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11796 neon_three_same (neon_quad (rs), 0, -1);
11797 }
11798 else
11799 {
11800 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11801 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11802 }
11803 }
11804
11805 static void
11806 do_neon_dyadic_if_su (void)
11807 {
11808 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11809 }
11810
11811 static void
11812 do_neon_dyadic_if_su_d (void)
11813 {
11814 /* This version only allow D registers, but that constraint is enforced during
11815 operand parsing so we don't need to do anything extra here. */
11816 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11817 }
11818
11819 static void
11820 do_neon_dyadic_if_i_d (void)
11821 {
11822 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11823 affected if we specify unsigned args. */
11824 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11825 }
11826
11827 enum vfp_or_neon_is_neon_bits
11828 {
11829 NEON_CHECK_CC = 1,
11830 NEON_CHECK_ARCH = 2
11831 };
11832
11833 /* Call this function if an instruction which may have belonged to the VFP or
11834 Neon instruction sets, but turned out to be a Neon instruction (due to the
11835 operand types involved, etc.). We have to check and/or fix-up a couple of
11836 things:
11837
11838 - Make sure the user hasn't attempted to make a Neon instruction
11839 conditional.
11840 - Alter the value in the condition code field if necessary.
11841 - Make sure that the arch supports Neon instructions.
11842
11843 Which of these operations take place depends on bits from enum
11844 vfp_or_neon_is_neon_bits.
11845
11846 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11847 current instruction's condition is COND_ALWAYS, the condition field is
11848 changed to inst.uncond_value. This is necessary because instructions shared
11849 between VFP and Neon may be conditional for the VFP variants only, and the
11850 unconditional Neon version must have, e.g., 0xF in the condition field. */
11851
11852 static int
11853 vfp_or_neon_is_neon (unsigned check)
11854 {
11855 /* Conditions are always legal in Thumb mode (IT blocks). */
11856 if (!thumb_mode && (check & NEON_CHECK_CC))
11857 {
11858 if (inst.cond != COND_ALWAYS)
11859 {
11860 first_error (_(BAD_COND));
11861 return FAIL;
11862 }
11863 if (inst.uncond_value != -1)
11864 inst.instruction |= inst.uncond_value << 28;
11865 }
11866
11867 if ((check & NEON_CHECK_ARCH)
11868 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11869 {
11870 first_error (_(BAD_FPU));
11871 return FAIL;
11872 }
11873
11874 return SUCCESS;
11875 }
11876
11877 static void
11878 do_neon_addsub_if_i (void)
11879 {
11880 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11881 return;
11882
11883 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11884 return;
11885
11886 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11887 affected if we specify unsigned args. */
11888 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11889 }
11890
11891 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11892 result to be:
11893 V<op> A,B (A is operand 0, B is operand 2)
11894 to mean:
11895 V<op> A,B,A
11896 not:
11897 V<op> A,B,B
11898 so handle that case specially. */
11899
11900 static void
11901 neon_exchange_operands (void)
11902 {
11903 void *scratch = alloca (sizeof (inst.operands[0]));
11904 if (inst.operands[1].present)
11905 {
11906 /* Swap operands[1] and operands[2]. */
11907 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11908 inst.operands[1] = inst.operands[2];
11909 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11910 }
11911 else
11912 {
11913 inst.operands[1] = inst.operands[2];
11914 inst.operands[2] = inst.operands[0];
11915 }
11916 }
11917
11918 static void
11919 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11920 {
11921 if (inst.operands[2].isreg)
11922 {
11923 if (invert)
11924 neon_exchange_operands ();
11925 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11926 }
11927 else
11928 {
11929 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11930 struct neon_type_el et = neon_check_type (2, rs,
11931 N_EQK | N_SIZ, immtypes | N_KEY);
11932
11933 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11934 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11935 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11936 inst.instruction |= LOW4 (inst.operands[1].reg);
11937 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11938 inst.instruction |= neon_quad (rs) << 6;
11939 inst.instruction |= (et.type == NT_float) << 10;
11940 inst.instruction |= neon_logbits (et.size) << 18;
11941
11942 inst.instruction = neon_dp_fixup (inst.instruction);
11943 }
11944 }
11945
11946 static void
11947 do_neon_cmp (void)
11948 {
11949 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
11950 }
11951
11952 static void
11953 do_neon_cmp_inv (void)
11954 {
11955 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
11956 }
11957
11958 static void
11959 do_neon_ceq (void)
11960 {
11961 neon_compare (N_IF_32, N_IF_32, FALSE);
11962 }
11963
11964 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11965 scalars, which are encoded in 5 bits, M : Rm.
11966 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11967 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11968 index in M. */
11969
11970 static unsigned
11971 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
11972 {
11973 unsigned regno = NEON_SCALAR_REG (scalar);
11974 unsigned elno = NEON_SCALAR_INDEX (scalar);
11975
11976 switch (elsize)
11977 {
11978 case 16:
11979 if (regno > 7 || elno > 3)
11980 goto bad_scalar;
11981 return regno | (elno << 3);
11982
11983 case 32:
11984 if (regno > 15 || elno > 1)
11985 goto bad_scalar;
11986 return regno | (elno << 4);
11987
11988 default:
11989 bad_scalar:
11990 first_error (_("scalar out of range for multiply instruction"));
11991 }
11992
11993 return 0;
11994 }
11995
11996 /* Encode multiply / multiply-accumulate scalar instructions. */
11997
11998 static void
11999 neon_mul_mac (struct neon_type_el et, int ubit)
12000 {
12001 unsigned scalar;
12002
12003 /* Give a more helpful error message if we have an invalid type. */
12004 if (et.type == NT_invtype)
12005 return;
12006
12007 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12008 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12009 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12010 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12011 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12012 inst.instruction |= LOW4 (scalar);
12013 inst.instruction |= HI1 (scalar) << 5;
12014 inst.instruction |= (et.type == NT_float) << 8;
12015 inst.instruction |= neon_logbits (et.size) << 20;
12016 inst.instruction |= (ubit != 0) << 24;
12017
12018 inst.instruction = neon_dp_fixup (inst.instruction);
12019 }
12020
12021 static void
12022 do_neon_mac_maybe_scalar (void)
12023 {
12024 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12025 return;
12026
12027 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12028 return;
12029
12030 if (inst.operands[2].isscalar)
12031 {
12032 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12033 struct neon_type_el et = neon_check_type (3, rs,
12034 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12035 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12036 neon_mul_mac (et, neon_quad (rs));
12037 }
12038 else
12039 {
12040 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12041 affected if we specify unsigned args. */
12042 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12043 }
12044 }
12045
12046 static void
12047 do_neon_tst (void)
12048 {
12049 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12050 struct neon_type_el et = neon_check_type (3, rs,
12051 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12052 neon_three_same (neon_quad (rs), 0, et.size);
12053 }
12054
12055 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12056 same types as the MAC equivalents. The polynomial type for this instruction
12057 is encoded the same as the integer type. */
12058
12059 static void
12060 do_neon_mul (void)
12061 {
12062 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12063 return;
12064
12065 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12066 return;
12067
12068 if (inst.operands[2].isscalar)
12069 do_neon_mac_maybe_scalar ();
12070 else
12071 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12072 }
12073
12074 static void
12075 do_neon_qdmulh (void)
12076 {
12077 if (inst.operands[2].isscalar)
12078 {
12079 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12080 struct neon_type_el et = neon_check_type (3, rs,
12081 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12082 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12083 neon_mul_mac (et, neon_quad (rs));
12084 }
12085 else
12086 {
12087 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12088 struct neon_type_el et = neon_check_type (3, rs,
12089 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12090 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12091 /* The U bit (rounding) comes from bit mask. */
12092 neon_three_same (neon_quad (rs), 0, et.size);
12093 }
12094 }
12095
12096 static void
12097 do_neon_fcmp_absolute (void)
12098 {
12099 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12100 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12101 /* Size field comes from bit mask. */
12102 neon_three_same (neon_quad (rs), 1, -1);
12103 }
12104
12105 static void
12106 do_neon_fcmp_absolute_inv (void)
12107 {
12108 neon_exchange_operands ();
12109 do_neon_fcmp_absolute ();
12110 }
12111
12112 static void
12113 do_neon_step (void)
12114 {
12115 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12116 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12117 neon_three_same (neon_quad (rs), 0, -1);
12118 }
12119
12120 static void
12121 do_neon_abs_neg (void)
12122 {
12123 enum neon_shape rs;
12124 struct neon_type_el et;
12125
12126 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12127 return;
12128
12129 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12130 return;
12131
12132 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12133 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12134
12135 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12136 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12137 inst.instruction |= LOW4 (inst.operands[1].reg);
12138 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12139 inst.instruction |= neon_quad (rs) << 6;
12140 inst.instruction |= (et.type == NT_float) << 10;
12141 inst.instruction |= neon_logbits (et.size) << 18;
12142
12143 inst.instruction = neon_dp_fixup (inst.instruction);
12144 }
12145
12146 static void
12147 do_neon_sli (void)
12148 {
12149 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12150 struct neon_type_el et = neon_check_type (2, rs,
12151 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12152 int imm = inst.operands[2].imm;
12153 constraint (imm < 0 || (unsigned)imm >= et.size,
12154 _("immediate out of range for insert"));
12155 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12156 }
12157
12158 static void
12159 do_neon_sri (void)
12160 {
12161 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12162 struct neon_type_el et = neon_check_type (2, rs,
12163 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12164 int imm = inst.operands[2].imm;
12165 constraint (imm < 1 || (unsigned)imm > et.size,
12166 _("immediate out of range for insert"));
12167 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12168 }
12169
12170 static void
12171 do_neon_qshlu_imm (void)
12172 {
12173 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12174 struct neon_type_el et = neon_check_type (2, rs,
12175 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12176 int imm = inst.operands[2].imm;
12177 constraint (imm < 0 || (unsigned)imm >= et.size,
12178 _("immediate out of range for shift"));
12179 /* Only encodes the 'U present' variant of the instruction.
12180 In this case, signed types have OP (bit 8) set to 0.
12181 Unsigned types have OP set to 1. */
12182 inst.instruction |= (et.type == NT_unsigned) << 8;
12183 /* The rest of the bits are the same as other immediate shifts. */
12184 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12185 }
12186
12187 static void
12188 do_neon_qmovn (void)
12189 {
12190 struct neon_type_el et = neon_check_type (2, NS_DQ,
12191 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12192 /* Saturating move where operands can be signed or unsigned, and the
12193 destination has the same signedness. */
12194 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12195 if (et.type == NT_unsigned)
12196 inst.instruction |= 0xc0;
12197 else
12198 inst.instruction |= 0x80;
12199 neon_two_same (0, 1, et.size / 2);
12200 }
12201
12202 static void
12203 do_neon_qmovun (void)
12204 {
12205 struct neon_type_el et = neon_check_type (2, NS_DQ,
12206 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12207 /* Saturating move with unsigned results. Operands must be signed. */
12208 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12209 neon_two_same (0, 1, et.size / 2);
12210 }
12211
12212 static void
12213 do_neon_rshift_sat_narrow (void)
12214 {
12215 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12216 or unsigned. If operands are unsigned, results must also be unsigned. */
12217 struct neon_type_el et = neon_check_type (2, NS_DQI,
12218 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12219 int imm = inst.operands[2].imm;
12220 /* This gets the bounds check, size encoding and immediate bits calculation
12221 right. */
12222 et.size /= 2;
12223
12224 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12225 VQMOVN.I<size> <Dd>, <Qm>. */
12226 if (imm == 0)
12227 {
12228 inst.operands[2].present = 0;
12229 inst.instruction = N_MNEM_vqmovn;
12230 do_neon_qmovn ();
12231 return;
12232 }
12233
12234 constraint (imm < 1 || (unsigned)imm > et.size,
12235 _("immediate out of range"));
12236 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12237 }
12238
12239 static void
12240 do_neon_rshift_sat_narrow_u (void)
12241 {
12242 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12243 or unsigned. If operands are unsigned, results must also be unsigned. */
12244 struct neon_type_el et = neon_check_type (2, NS_DQI,
12245 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12246 int imm = inst.operands[2].imm;
12247 /* This gets the bounds check, size encoding and immediate bits calculation
12248 right. */
12249 et.size /= 2;
12250
12251 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12252 VQMOVUN.I<size> <Dd>, <Qm>. */
12253 if (imm == 0)
12254 {
12255 inst.operands[2].present = 0;
12256 inst.instruction = N_MNEM_vqmovun;
12257 do_neon_qmovun ();
12258 return;
12259 }
12260
12261 constraint (imm < 1 || (unsigned)imm > et.size,
12262 _("immediate out of range"));
12263 /* FIXME: The manual is kind of unclear about what value U should have in
12264 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12265 must be 1. */
12266 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12267 }
12268
12269 static void
12270 do_neon_movn (void)
12271 {
12272 struct neon_type_el et = neon_check_type (2, NS_DQ,
12273 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12274 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12275 neon_two_same (0, 1, et.size / 2);
12276 }
12277
12278 static void
12279 do_neon_rshift_narrow (void)
12280 {
12281 struct neon_type_el et = neon_check_type (2, NS_DQI,
12282 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12283 int imm = inst.operands[2].imm;
12284 /* This gets the bounds check, size encoding and immediate bits calculation
12285 right. */
12286 et.size /= 2;
12287
12288 /* If immediate is zero then we are a pseudo-instruction for
12289 VMOVN.I<size> <Dd>, <Qm> */
12290 if (imm == 0)
12291 {
12292 inst.operands[2].present = 0;
12293 inst.instruction = N_MNEM_vmovn;
12294 do_neon_movn ();
12295 return;
12296 }
12297
12298 constraint (imm < 1 || (unsigned)imm > et.size,
12299 _("immediate out of range for narrowing operation"));
12300 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12301 }
12302
12303 static void
12304 do_neon_shll (void)
12305 {
12306 /* FIXME: Type checking when lengthening. */
12307 struct neon_type_el et = neon_check_type (2, NS_QDI,
12308 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12309 unsigned imm = inst.operands[2].imm;
12310
12311 if (imm == et.size)
12312 {
12313 /* Maximum shift variant. */
12314 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12315 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12316 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12317 inst.instruction |= LOW4 (inst.operands[1].reg);
12318 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12319 inst.instruction |= neon_logbits (et.size) << 18;
12320
12321 inst.instruction = neon_dp_fixup (inst.instruction);
12322 }
12323 else
12324 {
12325 /* A more-specific type check for non-max versions. */
12326 et = neon_check_type (2, NS_QDI,
12327 N_EQK | N_DBL, N_SU_32 | N_KEY);
12328 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12329 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12330 }
12331 }
12332
12333 /* Check the various types for the VCVT instruction, and return which version
12334 the current instruction is. */
12335
12336 static int
12337 neon_cvt_flavour (enum neon_shape rs)
12338 {
12339 #define CVT_VAR(C,X,Y) \
12340 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12341 if (et.type != NT_invtype) \
12342 { \
12343 inst.error = NULL; \
12344 return (C); \
12345 }
12346 struct neon_type_el et;
12347 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12348 || rs == NS_FF) ? N_VFP : 0;
12349 /* The instruction versions which take an immediate take one register
12350 argument, which is extended to the width of the full register. Thus the
12351 "source" and "destination" registers must have the same width. Hack that
12352 here by making the size equal to the key (wider, in this case) operand. */
12353 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12354
12355 CVT_VAR (0, N_S32, N_F32);
12356 CVT_VAR (1, N_U32, N_F32);
12357 CVT_VAR (2, N_F32, N_S32);
12358 CVT_VAR (3, N_F32, N_U32);
12359
12360 whole_reg = N_VFP;
12361
12362 /* VFP instructions. */
12363 CVT_VAR (4, N_F32, N_F64);
12364 CVT_VAR (5, N_F64, N_F32);
12365 CVT_VAR (6, N_S32, N_F64 | key);
12366 CVT_VAR (7, N_U32, N_F64 | key);
12367 CVT_VAR (8, N_F64 | key, N_S32);
12368 CVT_VAR (9, N_F64 | key, N_U32);
12369 /* VFP instructions with bitshift. */
12370 CVT_VAR (10, N_F32 | key, N_S16);
12371 CVT_VAR (11, N_F32 | key, N_U16);
12372 CVT_VAR (12, N_F64 | key, N_S16);
12373 CVT_VAR (13, N_F64 | key, N_U16);
12374 CVT_VAR (14, N_S16, N_F32 | key);
12375 CVT_VAR (15, N_U16, N_F32 | key);
12376 CVT_VAR (16, N_S16, N_F64 | key);
12377 CVT_VAR (17, N_U16, N_F64 | key);
12378
12379 return -1;
12380 #undef CVT_VAR
12381 }
12382
12383 /* Neon-syntax VFP conversions. */
12384
12385 static void
12386 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12387 {
12388 const char *opname = 0;
12389
12390 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12391 {
12392 /* Conversions with immediate bitshift. */
12393 const char *enc[] =
12394 {
12395 "ftosls",
12396 "ftouls",
12397 "fsltos",
12398 "fultos",
12399 NULL,
12400 NULL,
12401 "ftosld",
12402 "ftould",
12403 "fsltod",
12404 "fultod",
12405 "fshtos",
12406 "fuhtos",
12407 "fshtod",
12408 "fuhtod",
12409 "ftoshs",
12410 "ftouhs",
12411 "ftoshd",
12412 "ftouhd"
12413 };
12414
12415 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12416 {
12417 opname = enc[flavour];
12418 constraint (inst.operands[0].reg != inst.operands[1].reg,
12419 _("operands 0 and 1 must be the same register"));
12420 inst.operands[1] = inst.operands[2];
12421 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12422 }
12423 }
12424 else
12425 {
12426 /* Conversions without bitshift. */
12427 const char *enc[] =
12428 {
12429 "ftosis",
12430 "ftouis",
12431 "fsitos",
12432 "fuitos",
12433 "fcvtsd",
12434 "fcvtds",
12435 "ftosid",
12436 "ftouid",
12437 "fsitod",
12438 "fuitod"
12439 };
12440
12441 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12442 opname = enc[flavour];
12443 }
12444
12445 if (opname)
12446 do_vfp_nsyn_opcode (opname);
12447 }
12448
12449 static void
12450 do_vfp_nsyn_cvtz (void)
12451 {
12452 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12453 int flavour = neon_cvt_flavour (rs);
12454 const char *enc[] =
12455 {
12456 "ftosizs",
12457 "ftouizs",
12458 NULL,
12459 NULL,
12460 NULL,
12461 NULL,
12462 "ftosizd",
12463 "ftouizd"
12464 };
12465
12466 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12467 do_vfp_nsyn_opcode (enc[flavour]);
12468 }
12469
12470 static void
12471 do_neon_cvt (void)
12472 {
12473 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12474 NS_FD, NS_DF, NS_FF, NS_NULL);
12475 int flavour = neon_cvt_flavour (rs);
12476
12477 /* VFP rather than Neon conversions. */
12478 if (flavour >= 4)
12479 {
12480 do_vfp_nsyn_cvt (rs, flavour);
12481 return;
12482 }
12483
12484 switch (rs)
12485 {
12486 case NS_DDI:
12487 case NS_QQI:
12488 {
12489 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12490 return;
12491
12492 /* Fixed-point conversion with #0 immediate is encoded as an
12493 integer conversion. */
12494 if (inst.operands[2].present && inst.operands[2].imm == 0)
12495 goto int_encode;
12496 unsigned immbits = 32 - inst.operands[2].imm;
12497 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12498 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12499 if (flavour != -1)
12500 inst.instruction |= enctab[flavour];
12501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12502 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12503 inst.instruction |= LOW4 (inst.operands[1].reg);
12504 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12505 inst.instruction |= neon_quad (rs) << 6;
12506 inst.instruction |= 1 << 21;
12507 inst.instruction |= immbits << 16;
12508
12509 inst.instruction = neon_dp_fixup (inst.instruction);
12510 }
12511 break;
12512
12513 case NS_DD:
12514 case NS_QQ:
12515 int_encode:
12516 {
12517 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12518
12519 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12520
12521 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12522 return;
12523
12524 if (flavour != -1)
12525 inst.instruction |= enctab[flavour];
12526
12527 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12528 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12529 inst.instruction |= LOW4 (inst.operands[1].reg);
12530 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12531 inst.instruction |= neon_quad (rs) << 6;
12532 inst.instruction |= 2 << 18;
12533
12534 inst.instruction = neon_dp_fixup (inst.instruction);
12535 }
12536 break;
12537
12538 default:
12539 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12540 do_vfp_nsyn_cvt (rs, flavour);
12541 }
12542 }
12543
12544 static void
12545 neon_move_immediate (void)
12546 {
12547 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12548 struct neon_type_el et = neon_check_type (2, rs,
12549 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12550 unsigned immlo, immhi = 0, immbits;
12551 int op, cmode;
12552
12553 constraint (et.type == NT_invtype,
12554 _("operand size must be specified for immediate VMOV"));
12555
12556 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12557 op = (inst.instruction & (1 << 5)) != 0;
12558
12559 immlo = inst.operands[1].imm;
12560 if (inst.operands[1].regisimm)
12561 immhi = inst.operands[1].reg;
12562
12563 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12564 _("immediate has bits set outside the operand size"));
12565
12566 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12567 et.size, et.type)) == FAIL)
12568 {
12569 /* Invert relevant bits only. */
12570 neon_invert_size (&immlo, &immhi, et.size);
12571 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12572 with one or the other; those cases are caught by
12573 neon_cmode_for_move_imm. */
12574 op = !op;
12575 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12576 et.size, et.type)) == FAIL)
12577 {
12578 first_error (_("immediate out of range"));
12579 return;
12580 }
12581 }
12582
12583 inst.instruction &= ~(1 << 5);
12584 inst.instruction |= op << 5;
12585
12586 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12587 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12588 inst.instruction |= neon_quad (rs) << 6;
12589 inst.instruction |= cmode << 8;
12590
12591 neon_write_immbits (immbits);
12592 }
12593
12594 static void
12595 do_neon_mvn (void)
12596 {
12597 if (inst.operands[1].isreg)
12598 {
12599 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12600
12601 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12602 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12603 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12604 inst.instruction |= LOW4 (inst.operands[1].reg);
12605 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12606 inst.instruction |= neon_quad (rs) << 6;
12607 }
12608 else
12609 {
12610 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12611 neon_move_immediate ();
12612 }
12613
12614 inst.instruction = neon_dp_fixup (inst.instruction);
12615 }
12616
12617 /* Encode instructions of form:
12618
12619 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12620 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12621
12622 */
12623
12624 static void
12625 neon_mixed_length (struct neon_type_el et, unsigned size)
12626 {
12627 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12628 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12629 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12630 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12631 inst.instruction |= LOW4 (inst.operands[2].reg);
12632 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12633 inst.instruction |= (et.type == NT_unsigned) << 24;
12634 inst.instruction |= neon_logbits (size) << 20;
12635
12636 inst.instruction = neon_dp_fixup (inst.instruction);
12637 }
12638
12639 static void
12640 do_neon_dyadic_long (void)
12641 {
12642 /* FIXME: Type checking for lengthening op. */
12643 struct neon_type_el et = neon_check_type (3, NS_QDD,
12644 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12645 neon_mixed_length (et, et.size);
12646 }
12647
12648 static void
12649 do_neon_abal (void)
12650 {
12651 struct neon_type_el et = neon_check_type (3, NS_QDD,
12652 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12653 neon_mixed_length (et, et.size);
12654 }
12655
12656 static void
12657 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12658 {
12659 if (inst.operands[2].isscalar)
12660 {
12661 struct neon_type_el et = neon_check_type (3, NS_QDS,
12662 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12663 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12664 neon_mul_mac (et, et.type == NT_unsigned);
12665 }
12666 else
12667 {
12668 struct neon_type_el et = neon_check_type (3, NS_QDD,
12669 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12670 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12671 neon_mixed_length (et, et.size);
12672 }
12673 }
12674
12675 static void
12676 do_neon_mac_maybe_scalar_long (void)
12677 {
12678 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12679 }
12680
12681 static void
12682 do_neon_dyadic_wide (void)
12683 {
12684 struct neon_type_el et = neon_check_type (3, NS_QQD,
12685 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12686 neon_mixed_length (et, et.size);
12687 }
12688
12689 static void
12690 do_neon_dyadic_narrow (void)
12691 {
12692 struct neon_type_el et = neon_check_type (3, NS_QDD,
12693 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12694 /* Operand sign is unimportant, and the U bit is part of the opcode,
12695 so force the operand type to integer. */
12696 et.type = NT_integer;
12697 neon_mixed_length (et, et.size / 2);
12698 }
12699
12700 static void
12701 do_neon_mul_sat_scalar_long (void)
12702 {
12703 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12704 }
12705
12706 static void
12707 do_neon_vmull (void)
12708 {
12709 if (inst.operands[2].isscalar)
12710 do_neon_mac_maybe_scalar_long ();
12711 else
12712 {
12713 struct neon_type_el et = neon_check_type (3, NS_QDD,
12714 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12715 if (et.type == NT_poly)
12716 inst.instruction = NEON_ENC_POLY (inst.instruction);
12717 else
12718 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12719 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12720 zero. Should be OK as-is. */
12721 neon_mixed_length (et, et.size);
12722 }
12723 }
12724
12725 static void
12726 do_neon_ext (void)
12727 {
12728 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12729 struct neon_type_el et = neon_check_type (3, rs,
12730 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12731 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12732 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12733 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12734 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12735 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12736 inst.instruction |= LOW4 (inst.operands[2].reg);
12737 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12738 inst.instruction |= neon_quad (rs) << 6;
12739 inst.instruction |= imm << 8;
12740
12741 inst.instruction = neon_dp_fixup (inst.instruction);
12742 }
12743
12744 static void
12745 do_neon_rev (void)
12746 {
12747 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12748 struct neon_type_el et = neon_check_type (2, rs,
12749 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12750 unsigned op = (inst.instruction >> 7) & 3;
12751 /* N (width of reversed regions) is encoded as part of the bitmask. We
12752 extract it here to check the elements to be reversed are smaller.
12753 Otherwise we'd get a reserved instruction. */
12754 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12755 assert (elsize != 0);
12756 constraint (et.size >= elsize,
12757 _("elements must be smaller than reversal region"));
12758 neon_two_same (neon_quad (rs), 1, et.size);
12759 }
12760
12761 static void
12762 do_neon_dup (void)
12763 {
12764 if (inst.operands[1].isscalar)
12765 {
12766 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12767 struct neon_type_el et = neon_check_type (2, rs,
12768 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12769 unsigned sizebits = et.size >> 3;
12770 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12771 int logsize = neon_logbits (et.size);
12772 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12773
12774 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12775 return;
12776
12777 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12778 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12779 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12780 inst.instruction |= LOW4 (dm);
12781 inst.instruction |= HI1 (dm) << 5;
12782 inst.instruction |= neon_quad (rs) << 6;
12783 inst.instruction |= x << 17;
12784 inst.instruction |= sizebits << 16;
12785
12786 inst.instruction = neon_dp_fixup (inst.instruction);
12787 }
12788 else
12789 {
12790 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12791 struct neon_type_el et = neon_check_type (2, rs,
12792 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12793 /* Duplicate ARM register to lanes of vector. */
12794 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12795 switch (et.size)
12796 {
12797 case 8: inst.instruction |= 0x400000; break;
12798 case 16: inst.instruction |= 0x000020; break;
12799 case 32: inst.instruction |= 0x000000; break;
12800 default: break;
12801 }
12802 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12803 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12804 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12805 inst.instruction |= neon_quad (rs) << 21;
12806 /* The encoding for this instruction is identical for the ARM and Thumb
12807 variants, except for the condition field. */
12808 do_vfp_cond_or_thumb ();
12809 }
12810 }
12811
12812 /* VMOV has particularly many variations. It can be one of:
12813 0. VMOV<c><q> <Qd>, <Qm>
12814 1. VMOV<c><q> <Dd>, <Dm>
12815 (Register operations, which are VORR with Rm = Rn.)
12816 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12817 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12818 (Immediate loads.)
12819 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12820 (ARM register to scalar.)
12821 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12822 (Two ARM registers to vector.)
12823 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12824 (Scalar to ARM register.)
12825 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12826 (Vector to two ARM registers.)
12827 8. VMOV.F32 <Sd>, <Sm>
12828 9. VMOV.F64 <Dd>, <Dm>
12829 (VFP register moves.)
12830 10. VMOV.F32 <Sd>, #imm
12831 11. VMOV.F64 <Dd>, #imm
12832 (VFP float immediate load.)
12833 12. VMOV <Rd>, <Sm>
12834 (VFP single to ARM reg.)
12835 13. VMOV <Sd>, <Rm>
12836 (ARM reg to VFP single.)
12837 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12838 (Two ARM regs to two VFP singles.)
12839 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12840 (Two VFP singles to two ARM regs.)
12841
12842 These cases can be disambiguated using neon_select_shape, except cases 1/9
12843 and 3/11 which depend on the operand type too.
12844
12845 All the encoded bits are hardcoded by this function.
12846
12847 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12848 Cases 5, 7 may be used with VFPv2 and above.
12849
12850 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12851 can specify a type where it doesn't make sense to, and is ignored).
12852 */
12853
12854 static void
12855 do_neon_mov (void)
12856 {
12857 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12858 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12859 NS_NULL);
12860 struct neon_type_el et;
12861 const char *ldconst = 0;
12862
12863 switch (rs)
12864 {
12865 case NS_DD: /* case 1/9. */
12866 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12867 /* It is not an error here if no type is given. */
12868 inst.error = NULL;
12869 if (et.type == NT_float && et.size == 64)
12870 {
12871 do_vfp_nsyn_opcode ("fcpyd");
12872 break;
12873 }
12874 /* fall through. */
12875
12876 case NS_QQ: /* case 0/1. */
12877 {
12878 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12879 return;
12880 /* The architecture manual I have doesn't explicitly state which
12881 value the U bit should have for register->register moves, but
12882 the equivalent VORR instruction has U = 0, so do that. */
12883 inst.instruction = 0x0200110;
12884 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12885 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12886 inst.instruction |= LOW4 (inst.operands[1].reg);
12887 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12888 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12889 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12890 inst.instruction |= neon_quad (rs) << 6;
12891
12892 inst.instruction = neon_dp_fixup (inst.instruction);
12893 }
12894 break;
12895
12896 case NS_DI: /* case 3/11. */
12897 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12898 inst.error = NULL;
12899 if (et.type == NT_float && et.size == 64)
12900 {
12901 /* case 11 (fconstd). */
12902 ldconst = "fconstd";
12903 goto encode_fconstd;
12904 }
12905 /* fall through. */
12906
12907 case NS_QI: /* case 2/3. */
12908 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12909 return;
12910 inst.instruction = 0x0800010;
12911 neon_move_immediate ();
12912 inst.instruction = neon_dp_fixup (inst.instruction);
12913 break;
12914
12915 case NS_SR: /* case 4. */
12916 {
12917 unsigned bcdebits = 0;
12918 struct neon_type_el et = neon_check_type (2, NS_NULL,
12919 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12920 int logsize = neon_logbits (et.size);
12921 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12922 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12923
12924 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12925 _(BAD_FPU));
12926 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12927 && et.size != 32, _(BAD_FPU));
12928 constraint (et.type == NT_invtype, _("bad type for scalar"));
12929 constraint (x >= 64 / et.size, _("scalar index out of range"));
12930
12931 switch (et.size)
12932 {
12933 case 8: bcdebits = 0x8; break;
12934 case 16: bcdebits = 0x1; break;
12935 case 32: bcdebits = 0x0; break;
12936 default: ;
12937 }
12938
12939 bcdebits |= x << logsize;
12940
12941 inst.instruction = 0xe000b10;
12942 do_vfp_cond_or_thumb ();
12943 inst.instruction |= LOW4 (dn) << 16;
12944 inst.instruction |= HI1 (dn) << 7;
12945 inst.instruction |= inst.operands[1].reg << 12;
12946 inst.instruction |= (bcdebits & 3) << 5;
12947 inst.instruction |= (bcdebits >> 2) << 21;
12948 }
12949 break;
12950
12951 case NS_DRR: /* case 5 (fmdrr). */
12952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12953 _(BAD_FPU));
12954
12955 inst.instruction = 0xc400b10;
12956 do_vfp_cond_or_thumb ();
12957 inst.instruction |= LOW4 (inst.operands[0].reg);
12958 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
12959 inst.instruction |= inst.operands[1].reg << 12;
12960 inst.instruction |= inst.operands[2].reg << 16;
12961 break;
12962
12963 case NS_RS: /* case 6. */
12964 {
12965 struct neon_type_el et = neon_check_type (2, NS_NULL,
12966 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
12967 unsigned logsize = neon_logbits (et.size);
12968 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
12969 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
12970 unsigned abcdebits = 0;
12971
12972 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12973 _(BAD_FPU));
12974 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12975 && et.size != 32, _(BAD_FPU));
12976 constraint (et.type == NT_invtype, _("bad type for scalar"));
12977 constraint (x >= 64 / et.size, _("scalar index out of range"));
12978
12979 switch (et.size)
12980 {
12981 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
12982 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
12983 case 32: abcdebits = 0x00; break;
12984 default: ;
12985 }
12986
12987 abcdebits |= x << logsize;
12988 inst.instruction = 0xe100b10;
12989 do_vfp_cond_or_thumb ();
12990 inst.instruction |= LOW4 (dn) << 16;
12991 inst.instruction |= HI1 (dn) << 7;
12992 inst.instruction |= inst.operands[0].reg << 12;
12993 inst.instruction |= (abcdebits & 3) << 5;
12994 inst.instruction |= (abcdebits >> 2) << 21;
12995 }
12996 break;
12997
12998 case NS_RRD: /* case 7 (fmrrd). */
12999 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13000 _(BAD_FPU));
13001
13002 inst.instruction = 0xc500b10;
13003 do_vfp_cond_or_thumb ();
13004 inst.instruction |= inst.operands[0].reg << 12;
13005 inst.instruction |= inst.operands[1].reg << 16;
13006 inst.instruction |= LOW4 (inst.operands[2].reg);
13007 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13008 break;
13009
13010 case NS_FF: /* case 8 (fcpys). */
13011 do_vfp_nsyn_opcode ("fcpys");
13012 break;
13013
13014 case NS_FI: /* case 10 (fconsts). */
13015 ldconst = "fconsts";
13016 encode_fconstd:
13017 if (is_quarter_float (inst.operands[1].imm))
13018 {
13019 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13020 do_vfp_nsyn_opcode (ldconst);
13021 }
13022 else
13023 first_error (_("immediate out of range"));
13024 break;
13025
13026 case NS_RF: /* case 12 (fmrs). */
13027 do_vfp_nsyn_opcode ("fmrs");
13028 break;
13029
13030 case NS_FR: /* case 13 (fmsr). */
13031 do_vfp_nsyn_opcode ("fmsr");
13032 break;
13033
13034 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13035 (one of which is a list), but we have parsed four. Do some fiddling to
13036 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13037 expect. */
13038 case NS_RRFF: /* case 14 (fmrrs). */
13039 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13040 _("VFP registers must be adjacent"));
13041 inst.operands[2].imm = 2;
13042 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13043 do_vfp_nsyn_opcode ("fmrrs");
13044 break;
13045
13046 case NS_FFRR: /* case 15 (fmsrr). */
13047 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13048 _("VFP registers must be adjacent"));
13049 inst.operands[1] = inst.operands[2];
13050 inst.operands[2] = inst.operands[3];
13051 inst.operands[0].imm = 2;
13052 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13053 do_vfp_nsyn_opcode ("fmsrr");
13054 break;
13055
13056 default:
13057 abort ();
13058 }
13059 }
13060
13061 static void
13062 do_neon_rshift_round_imm (void)
13063 {
13064 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13065 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13066 int imm = inst.operands[2].imm;
13067
13068 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13069 if (imm == 0)
13070 {
13071 inst.operands[2].present = 0;
13072 do_neon_mov ();
13073 return;
13074 }
13075
13076 constraint (imm < 1 || (unsigned)imm > et.size,
13077 _("immediate out of range for shift"));
13078 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13079 et.size - imm);
13080 }
13081
13082 static void
13083 do_neon_movl (void)
13084 {
13085 struct neon_type_el et = neon_check_type (2, NS_QD,
13086 N_EQK | N_DBL, N_SU_32 | N_KEY);
13087 unsigned sizebits = et.size >> 3;
13088 inst.instruction |= sizebits << 19;
13089 neon_two_same (0, et.type == NT_unsigned, -1);
13090 }
13091
13092 static void
13093 do_neon_trn (void)
13094 {
13095 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13096 struct neon_type_el et = neon_check_type (2, rs,
13097 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13098 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13099 neon_two_same (neon_quad (rs), 1, et.size);
13100 }
13101
13102 static void
13103 do_neon_zip_uzp (void)
13104 {
13105 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13106 struct neon_type_el et = neon_check_type (2, rs,
13107 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13108 if (rs == NS_DD && et.size == 32)
13109 {
13110 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13111 inst.instruction = N_MNEM_vtrn;
13112 do_neon_trn ();
13113 return;
13114 }
13115 neon_two_same (neon_quad (rs), 1, et.size);
13116 }
13117
13118 static void
13119 do_neon_sat_abs_neg (void)
13120 {
13121 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13122 struct neon_type_el et = neon_check_type (2, rs,
13123 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13124 neon_two_same (neon_quad (rs), 1, et.size);
13125 }
13126
13127 static void
13128 do_neon_pair_long (void)
13129 {
13130 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13131 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13132 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13133 inst.instruction |= (et.type == NT_unsigned) << 7;
13134 neon_two_same (neon_quad (rs), 1, et.size);
13135 }
13136
13137 static void
13138 do_neon_recip_est (void)
13139 {
13140 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13141 struct neon_type_el et = neon_check_type (2, rs,
13142 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13143 inst.instruction |= (et.type == NT_float) << 8;
13144 neon_two_same (neon_quad (rs), 1, et.size);
13145 }
13146
13147 static void
13148 do_neon_cls (void)
13149 {
13150 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13151 struct neon_type_el et = neon_check_type (2, rs,
13152 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13153 neon_two_same (neon_quad (rs), 1, et.size);
13154 }
13155
13156 static void
13157 do_neon_clz (void)
13158 {
13159 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13160 struct neon_type_el et = neon_check_type (2, rs,
13161 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13162 neon_two_same (neon_quad (rs), 1, et.size);
13163 }
13164
13165 static void
13166 do_neon_cnt (void)
13167 {
13168 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13169 struct neon_type_el et = neon_check_type (2, rs,
13170 N_EQK | N_INT, N_8 | N_KEY);
13171 neon_two_same (neon_quad (rs), 1, et.size);
13172 }
13173
13174 static void
13175 do_neon_swp (void)
13176 {
13177 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13178 neon_two_same (neon_quad (rs), 1, -1);
13179 }
13180
13181 static void
13182 do_neon_tbl_tbx (void)
13183 {
13184 unsigned listlenbits;
13185 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13186
13187 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13188 {
13189 first_error (_("bad list length for table lookup"));
13190 return;
13191 }
13192
13193 listlenbits = inst.operands[1].imm - 1;
13194 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13195 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13196 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13197 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13198 inst.instruction |= LOW4 (inst.operands[2].reg);
13199 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13200 inst.instruction |= listlenbits << 8;
13201
13202 inst.instruction = neon_dp_fixup (inst.instruction);
13203 }
13204
13205 static void
13206 do_neon_ldm_stm (void)
13207 {
13208 /* P, U and L bits are part of bitmask. */
13209 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13210 unsigned offsetbits = inst.operands[1].imm * 2;
13211
13212 if (inst.operands[1].issingle)
13213 {
13214 do_vfp_nsyn_ldm_stm (is_dbmode);
13215 return;
13216 }
13217
13218 constraint (is_dbmode && !inst.operands[0].writeback,
13219 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13220
13221 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13222 _("register list must contain at least 1 and at most 16 "
13223 "registers"));
13224
13225 inst.instruction |= inst.operands[0].reg << 16;
13226 inst.instruction |= inst.operands[0].writeback << 21;
13227 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13228 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13229
13230 inst.instruction |= offsetbits;
13231
13232 do_vfp_cond_or_thumb ();
13233 }
13234
13235 static void
13236 do_neon_ldr_str (void)
13237 {
13238 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13239
13240 if (inst.operands[0].issingle)
13241 {
13242 if (is_ldr)
13243 do_vfp_nsyn_opcode ("flds");
13244 else
13245 do_vfp_nsyn_opcode ("fsts");
13246 }
13247 else
13248 {
13249 if (is_ldr)
13250 do_vfp_nsyn_opcode ("fldd");
13251 else
13252 do_vfp_nsyn_opcode ("fstd");
13253 }
13254 }
13255
13256 /* "interleave" version also handles non-interleaving register VLD1/VST1
13257 instructions. */
13258
13259 static void
13260 do_neon_ld_st_interleave (void)
13261 {
13262 struct neon_type_el et = neon_check_type (1, NS_NULL,
13263 N_8 | N_16 | N_32 | N_64);
13264 unsigned alignbits = 0;
13265 unsigned idx;
13266 /* The bits in this table go:
13267 0: register stride of one (0) or two (1)
13268 1,2: register list length, minus one (1, 2, 3, 4).
13269 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13270 We use -1 for invalid entries. */
13271 const int typetable[] =
13272 {
13273 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13274 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13275 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13276 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13277 };
13278 int typebits;
13279
13280 if (et.type == NT_invtype)
13281 return;
13282
13283 if (inst.operands[1].immisalign)
13284 switch (inst.operands[1].imm >> 8)
13285 {
13286 case 64: alignbits = 1; break;
13287 case 128:
13288 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13289 goto bad_alignment;
13290 alignbits = 2;
13291 break;
13292 case 256:
13293 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13294 goto bad_alignment;
13295 alignbits = 3;
13296 break;
13297 default:
13298 bad_alignment:
13299 first_error (_("bad alignment"));
13300 return;
13301 }
13302
13303 inst.instruction |= alignbits << 4;
13304 inst.instruction |= neon_logbits (et.size) << 6;
13305
13306 /* Bits [4:6] of the immediate in a list specifier encode register stride
13307 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13308 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13309 up the right value for "type" in a table based on this value and the given
13310 list style, then stick it back. */
13311 idx = ((inst.operands[0].imm >> 4) & 7)
13312 | (((inst.instruction >> 8) & 3) << 3);
13313
13314 typebits = typetable[idx];
13315
13316 constraint (typebits == -1, _("bad list type for instruction"));
13317
13318 inst.instruction &= ~0xf00;
13319 inst.instruction |= typebits << 8;
13320 }
13321
13322 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13323 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13324 otherwise. The variable arguments are a list of pairs of legal (size, align)
13325 values, terminated with -1. */
13326
13327 static int
13328 neon_alignment_bit (int size, int align, int *do_align, ...)
13329 {
13330 va_list ap;
13331 int result = FAIL, thissize, thisalign;
13332
13333 if (!inst.operands[1].immisalign)
13334 {
13335 *do_align = 0;
13336 return SUCCESS;
13337 }
13338
13339 va_start (ap, do_align);
13340
13341 do
13342 {
13343 thissize = va_arg (ap, int);
13344 if (thissize == -1)
13345 break;
13346 thisalign = va_arg (ap, int);
13347
13348 if (size == thissize && align == thisalign)
13349 result = SUCCESS;
13350 }
13351 while (result != SUCCESS);
13352
13353 va_end (ap);
13354
13355 if (result == SUCCESS)
13356 *do_align = 1;
13357 else
13358 first_error (_("unsupported alignment for instruction"));
13359
13360 return result;
13361 }
13362
13363 static void
13364 do_neon_ld_st_lane (void)
13365 {
13366 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13367 int align_good, do_align = 0;
13368 int logsize = neon_logbits (et.size);
13369 int align = inst.operands[1].imm >> 8;
13370 int n = (inst.instruction >> 8) & 3;
13371 int max_el = 64 / et.size;
13372
13373 if (et.type == NT_invtype)
13374 return;
13375
13376 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13377 _("bad list length"));
13378 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13379 _("scalar index out of range"));
13380 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13381 && et.size == 8,
13382 _("stride of 2 unavailable when element size is 8"));
13383
13384 switch (n)
13385 {
13386 case 0: /* VLD1 / VST1. */
13387 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13388 32, 32, -1);
13389 if (align_good == FAIL)
13390 return;
13391 if (do_align)
13392 {
13393 unsigned alignbits = 0;
13394 switch (et.size)
13395 {
13396 case 16: alignbits = 0x1; break;
13397 case 32: alignbits = 0x3; break;
13398 default: ;
13399 }
13400 inst.instruction |= alignbits << 4;
13401 }
13402 break;
13403
13404 case 1: /* VLD2 / VST2. */
13405 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13406 32, 64, -1);
13407 if (align_good == FAIL)
13408 return;
13409 if (do_align)
13410 inst.instruction |= 1 << 4;
13411 break;
13412
13413 case 2: /* VLD3 / VST3. */
13414 constraint (inst.operands[1].immisalign,
13415 _("can't use alignment with this instruction"));
13416 break;
13417
13418 case 3: /* VLD4 / VST4. */
13419 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13420 16, 64, 32, 64, 32, 128, -1);
13421 if (align_good == FAIL)
13422 return;
13423 if (do_align)
13424 {
13425 unsigned alignbits = 0;
13426 switch (et.size)
13427 {
13428 case 8: alignbits = 0x1; break;
13429 case 16: alignbits = 0x1; break;
13430 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13431 default: ;
13432 }
13433 inst.instruction |= alignbits << 4;
13434 }
13435 break;
13436
13437 default: ;
13438 }
13439
13440 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13441 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13442 inst.instruction |= 1 << (4 + logsize);
13443
13444 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13445 inst.instruction |= logsize << 10;
13446 }
13447
13448 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13449
13450 static void
13451 do_neon_ld_dup (void)
13452 {
13453 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13454 int align_good, do_align = 0;
13455
13456 if (et.type == NT_invtype)
13457 return;
13458
13459 switch ((inst.instruction >> 8) & 3)
13460 {
13461 case 0: /* VLD1. */
13462 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13463 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13464 &do_align, 16, 16, 32, 32, -1);
13465 if (align_good == FAIL)
13466 return;
13467 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13468 {
13469 case 1: break;
13470 case 2: inst.instruction |= 1 << 5; break;
13471 default: first_error (_("bad list length")); return;
13472 }
13473 inst.instruction |= neon_logbits (et.size) << 6;
13474 break;
13475
13476 case 1: /* VLD2. */
13477 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13478 &do_align, 8, 16, 16, 32, 32, 64, -1);
13479 if (align_good == FAIL)
13480 return;
13481 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13482 _("bad list length"));
13483 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13484 inst.instruction |= 1 << 5;
13485 inst.instruction |= neon_logbits (et.size) << 6;
13486 break;
13487
13488 case 2: /* VLD3. */
13489 constraint (inst.operands[1].immisalign,
13490 _("can't use alignment with this instruction"));
13491 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13492 _("bad list length"));
13493 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13494 inst.instruction |= 1 << 5;
13495 inst.instruction |= neon_logbits (et.size) << 6;
13496 break;
13497
13498 case 3: /* VLD4. */
13499 {
13500 int align = inst.operands[1].imm >> 8;
13501 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13502 16, 64, 32, 64, 32, 128, -1);
13503 if (align_good == FAIL)
13504 return;
13505 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13506 _("bad list length"));
13507 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13508 inst.instruction |= 1 << 5;
13509 if (et.size == 32 && align == 128)
13510 inst.instruction |= 0x3 << 6;
13511 else
13512 inst.instruction |= neon_logbits (et.size) << 6;
13513 }
13514 break;
13515
13516 default: ;
13517 }
13518
13519 inst.instruction |= do_align << 4;
13520 }
13521
13522 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13523 apart from bits [11:4]. */
13524
13525 static void
13526 do_neon_ldx_stx (void)
13527 {
13528 switch (NEON_LANE (inst.operands[0].imm))
13529 {
13530 case NEON_INTERLEAVE_LANES:
13531 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13532 do_neon_ld_st_interleave ();
13533 break;
13534
13535 case NEON_ALL_LANES:
13536 inst.instruction = NEON_ENC_DUP (inst.instruction);
13537 do_neon_ld_dup ();
13538 break;
13539
13540 default:
13541 inst.instruction = NEON_ENC_LANE (inst.instruction);
13542 do_neon_ld_st_lane ();
13543 }
13544
13545 /* L bit comes from bit mask. */
13546 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13547 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13548 inst.instruction |= inst.operands[1].reg << 16;
13549
13550 if (inst.operands[1].postind)
13551 {
13552 int postreg = inst.operands[1].imm & 0xf;
13553 constraint (!inst.operands[1].immisreg,
13554 _("post-index must be a register"));
13555 constraint (postreg == 0xd || postreg == 0xf,
13556 _("bad register for post-index"));
13557 inst.instruction |= postreg;
13558 }
13559 else if (inst.operands[1].writeback)
13560 {
13561 inst.instruction |= 0xd;
13562 }
13563 else
13564 inst.instruction |= 0xf;
13565
13566 if (thumb_mode)
13567 inst.instruction |= 0xf9000000;
13568 else
13569 inst.instruction |= 0xf4000000;
13570 }
13571
13572 \f
13573 /* Overall per-instruction processing. */
13574
13575 /* We need to be able to fix up arbitrary expressions in some statements.
13576 This is so that we can handle symbols that are an arbitrary distance from
13577 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13578 which returns part of an address in a form which will be valid for
13579 a data instruction. We do this by pushing the expression into a symbol
13580 in the expr_section, and creating a fix for that. */
13581
13582 static void
13583 fix_new_arm (fragS * frag,
13584 int where,
13585 short int size,
13586 expressionS * exp,
13587 int pc_rel,
13588 int reloc)
13589 {
13590 fixS * new_fix;
13591
13592 switch (exp->X_op)
13593 {
13594 case O_constant:
13595 case O_symbol:
13596 case O_add:
13597 case O_subtract:
13598 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13599 break;
13600
13601 default:
13602 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13603 pc_rel, reloc);
13604 break;
13605 }
13606
13607 /* Mark whether the fix is to a THUMB instruction, or an ARM
13608 instruction. */
13609 new_fix->tc_fix_data = thumb_mode;
13610 }
13611
13612 /* Create a frg for an instruction requiring relaxation. */
13613 static void
13614 output_relax_insn (void)
13615 {
13616 char * to;
13617 symbolS *sym;
13618 int offset;
13619
13620 /* The size of the instruction is unknown, so tie the debug info to the
13621 start of the instruction. */
13622 dwarf2_emit_insn (0);
13623
13624 switch (inst.reloc.exp.X_op)
13625 {
13626 case O_symbol:
13627 sym = inst.reloc.exp.X_add_symbol;
13628 offset = inst.reloc.exp.X_add_number;
13629 break;
13630 case O_constant:
13631 sym = NULL;
13632 offset = inst.reloc.exp.X_add_number;
13633 break;
13634 default:
13635 sym = make_expr_symbol (&inst.reloc.exp);
13636 offset = 0;
13637 break;
13638 }
13639 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13640 inst.relax, sym, offset, NULL/*offset, opcode*/);
13641 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13642 }
13643
13644 /* Write a 32-bit thumb instruction to buf. */
13645 static void
13646 put_thumb32_insn (char * buf, unsigned long insn)
13647 {
13648 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13649 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13650 }
13651
13652 static void
13653 output_inst (const char * str)
13654 {
13655 char * to = NULL;
13656
13657 if (inst.error)
13658 {
13659 as_bad ("%s -- `%s'", inst.error, str);
13660 return;
13661 }
13662 if (inst.relax) {
13663 output_relax_insn();
13664 return;
13665 }
13666 if (inst.size == 0)
13667 return;
13668
13669 to = frag_more (inst.size);
13670
13671 if (thumb_mode && (inst.size > THUMB_SIZE))
13672 {
13673 assert (inst.size == (2 * THUMB_SIZE));
13674 put_thumb32_insn (to, inst.instruction);
13675 }
13676 else if (inst.size > INSN_SIZE)
13677 {
13678 assert (inst.size == (2 * INSN_SIZE));
13679 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13680 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13681 }
13682 else
13683 md_number_to_chars (to, inst.instruction, inst.size);
13684
13685 if (inst.reloc.type != BFD_RELOC_UNUSED)
13686 fix_new_arm (frag_now, to - frag_now->fr_literal,
13687 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13688 inst.reloc.type);
13689
13690 dwarf2_emit_insn (inst.size);
13691 }
13692
13693 /* Tag values used in struct asm_opcode's tag field. */
13694 enum opcode_tag
13695 {
13696 OT_unconditional, /* Instruction cannot be conditionalized.
13697 The ARM condition field is still 0xE. */
13698 OT_unconditionalF, /* Instruction cannot be conditionalized
13699 and carries 0xF in its ARM condition field. */
13700 OT_csuffix, /* Instruction takes a conditional suffix. */
13701 OT_csuffixF, /* Some forms of the instruction take a conditional
13702 suffix, others place 0xF where the condition field
13703 would be. */
13704 OT_cinfix3, /* Instruction takes a conditional infix,
13705 beginning at character index 3. (In
13706 unified mode, it becomes a suffix.) */
13707 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13708 tsts, cmps, cmns, and teqs. */
13709 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13710 character index 3, even in unified mode. Used for
13711 legacy instructions where suffix and infix forms
13712 may be ambiguous. */
13713 OT_csuf_or_in3, /* Instruction takes either a conditional
13714 suffix or an infix at character index 3. */
13715 OT_odd_infix_unc, /* This is the unconditional variant of an
13716 instruction that takes a conditional infix
13717 at an unusual position. In unified mode,
13718 this variant will accept a suffix. */
13719 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13720 are the conditional variants of instructions that
13721 take conditional infixes in unusual positions.
13722 The infix appears at character index
13723 (tag - OT_odd_infix_0). These are not accepted
13724 in unified mode. */
13725 };
13726
13727 /* Subroutine of md_assemble, responsible for looking up the primary
13728 opcode from the mnemonic the user wrote. STR points to the
13729 beginning of the mnemonic.
13730
13731 This is not simply a hash table lookup, because of conditional
13732 variants. Most instructions have conditional variants, which are
13733 expressed with a _conditional affix_ to the mnemonic. If we were
13734 to encode each conditional variant as a literal string in the opcode
13735 table, it would have approximately 20,000 entries.
13736
13737 Most mnemonics take this affix as a suffix, and in unified syntax,
13738 'most' is upgraded to 'all'. However, in the divided syntax, some
13739 instructions take the affix as an infix, notably the s-variants of
13740 the arithmetic instructions. Of those instructions, all but six
13741 have the infix appear after the third character of the mnemonic.
13742
13743 Accordingly, the algorithm for looking up primary opcodes given
13744 an identifier is:
13745
13746 1. Look up the identifier in the opcode table.
13747 If we find a match, go to step U.
13748
13749 2. Look up the last two characters of the identifier in the
13750 conditions table. If we find a match, look up the first N-2
13751 characters of the identifier in the opcode table. If we
13752 find a match, go to step CE.
13753
13754 3. Look up the fourth and fifth characters of the identifier in
13755 the conditions table. If we find a match, extract those
13756 characters from the identifier, and look up the remaining
13757 characters in the opcode table. If we find a match, go
13758 to step CM.
13759
13760 4. Fail.
13761
13762 U. Examine the tag field of the opcode structure, in case this is
13763 one of the six instructions with its conditional infix in an
13764 unusual place. If it is, the tag tells us where to find the
13765 infix; look it up in the conditions table and set inst.cond
13766 accordingly. Otherwise, this is an unconditional instruction.
13767 Again set inst.cond accordingly. Return the opcode structure.
13768
13769 CE. Examine the tag field to make sure this is an instruction that
13770 should receive a conditional suffix. If it is not, fail.
13771 Otherwise, set inst.cond from the suffix we already looked up,
13772 and return the opcode structure.
13773
13774 CM. Examine the tag field to make sure this is an instruction that
13775 should receive a conditional infix after the third character.
13776 If it is not, fail. Otherwise, undo the edits to the current
13777 line of input and proceed as for case CE. */
13778
13779 static const struct asm_opcode *
13780 opcode_lookup (char **str)
13781 {
13782 char *end, *base;
13783 char *affix;
13784 const struct asm_opcode *opcode;
13785 const struct asm_cond *cond;
13786 char save[2];
13787 bfd_boolean neon_supported;
13788
13789 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13790
13791 /* Scan up to the end of the mnemonic, which must end in white space,
13792 '.' (in unified mode, or for Neon instructions), or end of string. */
13793 for (base = end = *str; *end != '\0'; end++)
13794 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13795 break;
13796
13797 if (end == base)
13798 return 0;
13799
13800 /* Handle a possible width suffix and/or Neon type suffix. */
13801 if (end[0] == '.')
13802 {
13803 int offset = 2;
13804
13805 /* The .w and .n suffixes are only valid if the unified syntax is in
13806 use. */
13807 if (unified_syntax && end[1] == 'w')
13808 inst.size_req = 4;
13809 else if (unified_syntax && end[1] == 'n')
13810 inst.size_req = 2;
13811 else
13812 offset = 0;
13813
13814 inst.vectype.elems = 0;
13815
13816 *str = end + offset;
13817
13818 if (end[offset] == '.')
13819 {
13820 /* See if we have a Neon type suffix (possible in either unified or
13821 non-unified ARM syntax mode). */
13822 if (parse_neon_type (&inst.vectype, str) == FAIL)
13823 return 0;
13824 }
13825 else if (end[offset] != '\0' && end[offset] != ' ')
13826 return 0;
13827 }
13828 else
13829 *str = end;
13830
13831 /* Look for unaffixed or special-case affixed mnemonic. */
13832 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13833 if (opcode)
13834 {
13835 /* step U */
13836 if (opcode->tag < OT_odd_infix_0)
13837 {
13838 inst.cond = COND_ALWAYS;
13839 return opcode;
13840 }
13841
13842 if (unified_syntax)
13843 as_warn (_("conditional infixes are deprecated in unified syntax"));
13844 affix = base + (opcode->tag - OT_odd_infix_0);
13845 cond = hash_find_n (arm_cond_hsh, affix, 2);
13846 assert (cond);
13847
13848 inst.cond = cond->value;
13849 return opcode;
13850 }
13851
13852 /* Cannot have a conditional suffix on a mnemonic of less than two
13853 characters. */
13854 if (end - base < 3)
13855 return 0;
13856
13857 /* Look for suffixed mnemonic. */
13858 affix = end - 2;
13859 cond = hash_find_n (arm_cond_hsh, affix, 2);
13860 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13861 if (opcode && cond)
13862 {
13863 /* step CE */
13864 switch (opcode->tag)
13865 {
13866 case OT_cinfix3_legacy:
13867 /* Ignore conditional suffixes matched on infix only mnemonics. */
13868 break;
13869
13870 case OT_cinfix3:
13871 case OT_cinfix3_deprecated:
13872 case OT_odd_infix_unc:
13873 if (!unified_syntax)
13874 return 0;
13875 /* else fall through */
13876
13877 case OT_csuffix:
13878 case OT_csuffixF:
13879 case OT_csuf_or_in3:
13880 inst.cond = cond->value;
13881 return opcode;
13882
13883 case OT_unconditional:
13884 case OT_unconditionalF:
13885 if (thumb_mode)
13886 {
13887 inst.cond = cond->value;
13888 }
13889 else
13890 {
13891 /* delayed diagnostic */
13892 inst.error = BAD_COND;
13893 inst.cond = COND_ALWAYS;
13894 }
13895 return opcode;
13896
13897 default:
13898 return 0;
13899 }
13900 }
13901
13902 /* Cannot have a usual-position infix on a mnemonic of less than
13903 six characters (five would be a suffix). */
13904 if (end - base < 6)
13905 return 0;
13906
13907 /* Look for infixed mnemonic in the usual position. */
13908 affix = base + 3;
13909 cond = hash_find_n (arm_cond_hsh, affix, 2);
13910 if (!cond)
13911 return 0;
13912
13913 memcpy (save, affix, 2);
13914 memmove (affix, affix + 2, (end - affix) - 2);
13915 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13916 memmove (affix + 2, affix, (end - affix) - 2);
13917 memcpy (affix, save, 2);
13918
13919 if (opcode
13920 && (opcode->tag == OT_cinfix3
13921 || opcode->tag == OT_cinfix3_deprecated
13922 || opcode->tag == OT_csuf_or_in3
13923 || opcode->tag == OT_cinfix3_legacy))
13924 {
13925 /* step CM */
13926 if (unified_syntax
13927 && (opcode->tag == OT_cinfix3
13928 || opcode->tag == OT_cinfix3_deprecated))
13929 as_warn (_("conditional infixes are deprecated in unified syntax"));
13930
13931 inst.cond = cond->value;
13932 return opcode;
13933 }
13934
13935 return 0;
13936 }
13937
13938 void
13939 md_assemble (char *str)
13940 {
13941 char *p = str;
13942 const struct asm_opcode * opcode;
13943
13944 /* Align the previous label if needed. */
13945 if (last_label_seen != NULL)
13946 {
13947 symbol_set_frag (last_label_seen, frag_now);
13948 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
13949 S_SET_SEGMENT (last_label_seen, now_seg);
13950 }
13951
13952 memset (&inst, '\0', sizeof (inst));
13953 inst.reloc.type = BFD_RELOC_UNUSED;
13954
13955 opcode = opcode_lookup (&p);
13956 if (!opcode)
13957 {
13958 /* It wasn't an instruction, but it might be a register alias of
13959 the form alias .req reg, or a Neon .dn/.qn directive. */
13960 if (!create_register_alias (str, p)
13961 && !create_neon_reg_alias (str, p))
13962 as_bad (_("bad instruction `%s'"), str);
13963
13964 return;
13965 }
13966
13967 if (opcode->tag == OT_cinfix3_deprecated)
13968 as_warn (_("s suffix on comparison instruction is deprecated"));
13969
13970 /* The value which unconditional instructions should have in place of the
13971 condition field. */
13972 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
13973
13974 if (thumb_mode)
13975 {
13976 arm_feature_set variant;
13977
13978 variant = cpu_variant;
13979 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13980 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
13981 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
13982 /* Check that this instruction is supported for this CPU. */
13983 if (!opcode->tvariant
13984 || (thumb_mode == 1
13985 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
13986 {
13987 as_bad (_("selected processor does not support `%s'"), str);
13988 return;
13989 }
13990 if (inst.cond != COND_ALWAYS && !unified_syntax
13991 && opcode->tencode != do_t_branch)
13992 {
13993 as_bad (_("Thumb does not support conditional execution"));
13994 return;
13995 }
13996
13997 /* Check conditional suffixes. */
13998 if (current_it_mask)
13999 {
14000 int cond;
14001 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14002 current_it_mask <<= 1;
14003 current_it_mask &= 0x1f;
14004 /* The BKPT instruction is unconditional even in an IT block. */
14005 if (!inst.error
14006 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14007 {
14008 as_bad (_("incorrect condition in IT block"));
14009 return;
14010 }
14011 }
14012 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14013 {
14014 as_bad (_("thumb conditional instrunction not in IT block"));
14015 return;
14016 }
14017
14018 mapping_state (MAP_THUMB);
14019 inst.instruction = opcode->tvalue;
14020
14021 if (!parse_operands (p, opcode->operands))
14022 opcode->tencode ();
14023
14024 /* Clear current_it_mask at the end of an IT block. */
14025 if (current_it_mask == 0x10)
14026 current_it_mask = 0;
14027
14028 if (!(inst.error || inst.relax))
14029 {
14030 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14031 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14032 if (inst.size_req && inst.size_req != inst.size)
14033 {
14034 as_bad (_("cannot honor width suffix -- `%s'"), str);
14035 return;
14036 }
14037 }
14038 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14039 *opcode->tvariant);
14040 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14041 set those bits when Thumb-2 32-bit instructions are seen. ie.
14042 anything other than bl/blx.
14043 This is overly pessimistic for relaxable instructions. */
14044 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14045 || inst.relax)
14046 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14047 arm_ext_v6t2);
14048 }
14049 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14050 {
14051 /* Check that this instruction is supported for this CPU. */
14052 if (!opcode->avariant ||
14053 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14054 {
14055 as_bad (_("selected processor does not support `%s'"), str);
14056 return;
14057 }
14058 if (inst.size_req)
14059 {
14060 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14061 return;
14062 }
14063
14064 mapping_state (MAP_ARM);
14065 inst.instruction = opcode->avalue;
14066 if (opcode->tag == OT_unconditionalF)
14067 inst.instruction |= 0xF << 28;
14068 else
14069 inst.instruction |= inst.cond << 28;
14070 inst.size = INSN_SIZE;
14071 if (!parse_operands (p, opcode->operands))
14072 opcode->aencode ();
14073 /* Arm mode bx is marked as both v4T and v5 because it's still required
14074 on a hypothetical non-thumb v5 core. */
14075 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14076 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14077 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14078 else
14079 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14080 *opcode->avariant);
14081 }
14082 else
14083 {
14084 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14085 "-- `%s'"), str);
14086 return;
14087 }
14088 output_inst (str);
14089 }
14090
14091 /* Various frobbings of labels and their addresses. */
14092
14093 void
14094 arm_start_line_hook (void)
14095 {
14096 last_label_seen = NULL;
14097 }
14098
14099 void
14100 arm_frob_label (symbolS * sym)
14101 {
14102 last_label_seen = sym;
14103
14104 ARM_SET_THUMB (sym, thumb_mode);
14105
14106 #if defined OBJ_COFF || defined OBJ_ELF
14107 ARM_SET_INTERWORK (sym, support_interwork);
14108 #endif
14109
14110 /* Note - do not allow local symbols (.Lxxx) to be labeled
14111 as Thumb functions. This is because these labels, whilst
14112 they exist inside Thumb code, are not the entry points for
14113 possible ARM->Thumb calls. Also, these labels can be used
14114 as part of a computed goto or switch statement. eg gcc
14115 can generate code that looks like this:
14116
14117 ldr r2, [pc, .Laaa]
14118 lsl r3, r3, #2
14119 ldr r2, [r3, r2]
14120 mov pc, r2
14121
14122 .Lbbb: .word .Lxxx
14123 .Lccc: .word .Lyyy
14124 ..etc...
14125 .Laaa: .word Lbbb
14126
14127 The first instruction loads the address of the jump table.
14128 The second instruction converts a table index into a byte offset.
14129 The third instruction gets the jump address out of the table.
14130 The fourth instruction performs the jump.
14131
14132 If the address stored at .Laaa is that of a symbol which has the
14133 Thumb_Func bit set, then the linker will arrange for this address
14134 to have the bottom bit set, which in turn would mean that the
14135 address computation performed by the third instruction would end
14136 up with the bottom bit set. Since the ARM is capable of unaligned
14137 word loads, the instruction would then load the incorrect address
14138 out of the jump table, and chaos would ensue. */
14139 if (label_is_thumb_function_name
14140 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14141 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14142 {
14143 /* When the address of a Thumb function is taken the bottom
14144 bit of that address should be set. This will allow
14145 interworking between Arm and Thumb functions to work
14146 correctly. */
14147
14148 THUMB_SET_FUNC (sym, 1);
14149
14150 label_is_thumb_function_name = FALSE;
14151 }
14152
14153 dwarf2_emit_label (sym);
14154 }
14155
14156 int
14157 arm_data_in_code (void)
14158 {
14159 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14160 {
14161 *input_line_pointer = '/';
14162 input_line_pointer += 5;
14163 *input_line_pointer = 0;
14164 return 1;
14165 }
14166
14167 return 0;
14168 }
14169
14170 char *
14171 arm_canonicalize_symbol_name (char * name)
14172 {
14173 int len;
14174
14175 if (thumb_mode && (len = strlen (name)) > 5
14176 && streq (name + len - 5, "/data"))
14177 *(name + len - 5) = 0;
14178
14179 return name;
14180 }
14181 \f
14182 /* Table of all register names defined by default. The user can
14183 define additional names with .req. Note that all register names
14184 should appear in both upper and lowercase variants. Some registers
14185 also have mixed-case names. */
14186
14187 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14188 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14189 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14190 #define REGSET(p,t) \
14191 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14192 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14193 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14194 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14195 #define REGSETH(p,t) \
14196 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14197 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14198 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14199 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14200 #define REGSET2(p,t) \
14201 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14202 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14203 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14204 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14205
14206 static const struct reg_entry reg_names[] =
14207 {
14208 /* ARM integer registers. */
14209 REGSET(r, RN), REGSET(R, RN),
14210
14211 /* ATPCS synonyms. */
14212 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14213 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14214 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14215
14216 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14217 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14218 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14219
14220 /* Well-known aliases. */
14221 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14222 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14223
14224 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14225 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14226
14227 /* Coprocessor numbers. */
14228 REGSET(p, CP), REGSET(P, CP),
14229
14230 /* Coprocessor register numbers. The "cr" variants are for backward
14231 compatibility. */
14232 REGSET(c, CN), REGSET(C, CN),
14233 REGSET(cr, CN), REGSET(CR, CN),
14234
14235 /* FPA registers. */
14236 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14237 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14238
14239 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14240 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14241
14242 /* VFP SP registers. */
14243 REGSET(s,VFS), REGSET(S,VFS),
14244 REGSETH(s,VFS), REGSETH(S,VFS),
14245
14246 /* VFP DP Registers. */
14247 REGSET(d,VFD), REGSET(D,VFD),
14248 /* Extra Neon DP registers. */
14249 REGSETH(d,VFD), REGSETH(D,VFD),
14250
14251 /* Neon QP registers. */
14252 REGSET2(q,NQ), REGSET2(Q,NQ),
14253
14254 /* VFP control registers. */
14255 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14256 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14257
14258 /* Maverick DSP coprocessor registers. */
14259 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14260 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14261
14262 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14263 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14264 REGDEF(dspsc,0,DSPSC),
14265
14266 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14267 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14268 REGDEF(DSPSC,0,DSPSC),
14269
14270 /* iWMMXt data registers - p0, c0-15. */
14271 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14272
14273 /* iWMMXt control registers - p1, c0-3. */
14274 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14275 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14276 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14277 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14278
14279 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14280 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14281 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14282 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14283 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14284
14285 /* XScale accumulator registers. */
14286 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14287 };
14288 #undef REGDEF
14289 #undef REGNUM
14290 #undef REGSET
14291
14292 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14293 within psr_required_here. */
14294 static const struct asm_psr psrs[] =
14295 {
14296 /* Backward compatibility notation. Note that "all" is no longer
14297 truly all possible PSR bits. */
14298 {"all", PSR_c | PSR_f},
14299 {"flg", PSR_f},
14300 {"ctl", PSR_c},
14301
14302 /* Individual flags. */
14303 {"f", PSR_f},
14304 {"c", PSR_c},
14305 {"x", PSR_x},
14306 {"s", PSR_s},
14307 /* Combinations of flags. */
14308 {"fs", PSR_f | PSR_s},
14309 {"fx", PSR_f | PSR_x},
14310 {"fc", PSR_f | PSR_c},
14311 {"sf", PSR_s | PSR_f},
14312 {"sx", PSR_s | PSR_x},
14313 {"sc", PSR_s | PSR_c},
14314 {"xf", PSR_x | PSR_f},
14315 {"xs", PSR_x | PSR_s},
14316 {"xc", PSR_x | PSR_c},
14317 {"cf", PSR_c | PSR_f},
14318 {"cs", PSR_c | PSR_s},
14319 {"cx", PSR_c | PSR_x},
14320 {"fsx", PSR_f | PSR_s | PSR_x},
14321 {"fsc", PSR_f | PSR_s | PSR_c},
14322 {"fxs", PSR_f | PSR_x | PSR_s},
14323 {"fxc", PSR_f | PSR_x | PSR_c},
14324 {"fcs", PSR_f | PSR_c | PSR_s},
14325 {"fcx", PSR_f | PSR_c | PSR_x},
14326 {"sfx", PSR_s | PSR_f | PSR_x},
14327 {"sfc", PSR_s | PSR_f | PSR_c},
14328 {"sxf", PSR_s | PSR_x | PSR_f},
14329 {"sxc", PSR_s | PSR_x | PSR_c},
14330 {"scf", PSR_s | PSR_c | PSR_f},
14331 {"scx", PSR_s | PSR_c | PSR_x},
14332 {"xfs", PSR_x | PSR_f | PSR_s},
14333 {"xfc", PSR_x | PSR_f | PSR_c},
14334 {"xsf", PSR_x | PSR_s | PSR_f},
14335 {"xsc", PSR_x | PSR_s | PSR_c},
14336 {"xcf", PSR_x | PSR_c | PSR_f},
14337 {"xcs", PSR_x | PSR_c | PSR_s},
14338 {"cfs", PSR_c | PSR_f | PSR_s},
14339 {"cfx", PSR_c | PSR_f | PSR_x},
14340 {"csf", PSR_c | PSR_s | PSR_f},
14341 {"csx", PSR_c | PSR_s | PSR_x},
14342 {"cxf", PSR_c | PSR_x | PSR_f},
14343 {"cxs", PSR_c | PSR_x | PSR_s},
14344 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14345 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14346 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14347 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14348 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14349 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14350 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14351 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14352 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14353 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14354 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14355 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14356 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14357 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14358 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14359 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14360 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14361 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14362 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14363 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14364 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14365 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14366 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14367 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14368 };
14369
14370 /* Table of V7M psr names. */
14371 static const struct asm_psr v7m_psrs[] =
14372 {
14373 {"apsr", 0 },
14374 {"iapsr", 1 },
14375 {"eapsr", 2 },
14376 {"psr", 3 },
14377 {"ipsr", 5 },
14378 {"epsr", 6 },
14379 {"iepsr", 7 },
14380 {"msp", 8 },
14381 {"psp", 9 },
14382 {"primask", 16},
14383 {"basepri", 17},
14384 {"basepri_max", 18},
14385 {"faultmask", 19},
14386 {"control", 20}
14387 };
14388
14389 /* Table of all shift-in-operand names. */
14390 static const struct asm_shift_name shift_names [] =
14391 {
14392 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14393 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14394 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14395 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14396 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14397 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14398 };
14399
14400 /* Table of all explicit relocation names. */
14401 #ifdef OBJ_ELF
14402 static struct reloc_entry reloc_names[] =
14403 {
14404 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14405 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14406 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14407 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14408 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14409 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14410 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14411 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14412 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14413 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14414 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14415 };
14416 #endif
14417
14418 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14419 static const struct asm_cond conds[] =
14420 {
14421 {"eq", 0x0},
14422 {"ne", 0x1},
14423 {"cs", 0x2}, {"hs", 0x2},
14424 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14425 {"mi", 0x4},
14426 {"pl", 0x5},
14427 {"vs", 0x6},
14428 {"vc", 0x7},
14429 {"hi", 0x8},
14430 {"ls", 0x9},
14431 {"ge", 0xa},
14432 {"lt", 0xb},
14433 {"gt", 0xc},
14434 {"le", 0xd},
14435 {"al", 0xe}
14436 };
14437
14438 static struct asm_barrier_opt barrier_opt_names[] =
14439 {
14440 { "sy", 0xf },
14441 { "un", 0x7 },
14442 { "st", 0xe },
14443 { "unst", 0x6 }
14444 };
14445
14446 /* Table of ARM-format instructions. */
14447
14448 /* Macros for gluing together operand strings. N.B. In all cases
14449 other than OPS0, the trailing OP_stop comes from default
14450 zero-initialization of the unspecified elements of the array. */
14451 #define OPS0() { OP_stop, }
14452 #define OPS1(a) { OP_##a, }
14453 #define OPS2(a,b) { OP_##a,OP_##b, }
14454 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14455 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14456 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14457 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14458
14459 /* These macros abstract out the exact format of the mnemonic table and
14460 save some repeated characters. */
14461
14462 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14463 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14464 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14465 THUMB_VARIANT, do_##ae, do_##te }
14466
14467 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14468 a T_MNEM_xyz enumerator. */
14469 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14470 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14471 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14472 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14473
14474 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14475 infix after the third character. */
14476 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14477 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14478 THUMB_VARIANT, do_##ae, do_##te }
14479 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14480 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14481 THUMB_VARIANT, do_##ae, do_##te }
14482 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14483 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14484 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14485 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14486 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14487 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14488 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14489 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14490
14491 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14492 appear in the condition table. */
14493 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14494 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14495 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14496
14497 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14498 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14499 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14500 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14501 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14502 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14503 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14504 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14505 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14506 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14507 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14508 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14509 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14510 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14511 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14512 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14513 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14514 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14515 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14516 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14517
14518 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14519 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14520 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14521 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14522
14523 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14524 field is still 0xE. Many of the Thumb variants can be executed
14525 conditionally, so this is checked separately. */
14526 #define TUE(mnem, op, top, nops, ops, ae, te) \
14527 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14528 THUMB_VARIANT, do_##ae, do_##te }
14529
14530 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14531 condition code field. */
14532 #define TUF(mnem, op, top, nops, ops, ae, te) \
14533 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14534 THUMB_VARIANT, do_##ae, do_##te }
14535
14536 /* ARM-only variants of all the above. */
14537 #define CE(mnem, op, nops, ops, ae) \
14538 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14539
14540 #define C3(mnem, op, nops, ops, ae) \
14541 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14542
14543 /* Legacy mnemonics that always have conditional infix after the third
14544 character. */
14545 #define CL(mnem, op, nops, ops, ae) \
14546 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14547 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14548
14549 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14550 #define cCE(mnem, op, nops, ops, ae) \
14551 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14552
14553 /* Legacy coprocessor instructions where conditional infix and conditional
14554 suffix are ambiguous. For consistency this includes all FPA instructions,
14555 not just the potentially ambiguous ones. */
14556 #define cCL(mnem, op, nops, ops, ae) \
14557 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14558 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14559
14560 /* Coprocessor, takes either a suffix or a position-3 infix
14561 (for an FPA corner case). */
14562 #define C3E(mnem, op, nops, ops, ae) \
14563 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14564 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14565
14566 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14567 { #m1 #m2 #m3, OPS##nops ops, \
14568 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14569 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14570
14571 #define CM(m1, m2, op, nops, ops, ae) \
14572 xCM_(m1, , m2, op, nops, ops, ae), \
14573 xCM_(m1, eq, m2, op, nops, ops, ae), \
14574 xCM_(m1, ne, m2, op, nops, ops, ae), \
14575 xCM_(m1, cs, m2, op, nops, ops, ae), \
14576 xCM_(m1, hs, m2, op, nops, ops, ae), \
14577 xCM_(m1, cc, m2, op, nops, ops, ae), \
14578 xCM_(m1, ul, m2, op, nops, ops, ae), \
14579 xCM_(m1, lo, m2, op, nops, ops, ae), \
14580 xCM_(m1, mi, m2, op, nops, ops, ae), \
14581 xCM_(m1, pl, m2, op, nops, ops, ae), \
14582 xCM_(m1, vs, m2, op, nops, ops, ae), \
14583 xCM_(m1, vc, m2, op, nops, ops, ae), \
14584 xCM_(m1, hi, m2, op, nops, ops, ae), \
14585 xCM_(m1, ls, m2, op, nops, ops, ae), \
14586 xCM_(m1, ge, m2, op, nops, ops, ae), \
14587 xCM_(m1, lt, m2, op, nops, ops, ae), \
14588 xCM_(m1, gt, m2, op, nops, ops, ae), \
14589 xCM_(m1, le, m2, op, nops, ops, ae), \
14590 xCM_(m1, al, m2, op, nops, ops, ae)
14591
14592 #define UE(mnem, op, nops, ops, ae) \
14593 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14594
14595 #define UF(mnem, op, nops, ops, ae) \
14596 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14597
14598 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14599 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14600 use the same encoding function for each. */
14601 #define NUF(mnem, op, nops, ops, enc) \
14602 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14603 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14604
14605 /* Neon data processing, version which indirects through neon_enc_tab for
14606 the various overloaded versions of opcodes. */
14607 #define nUF(mnem, op, nops, ops, enc) \
14608 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14609 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14610
14611 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14612 version. */
14613 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14614 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14615 THUMB_VARIANT, do_##enc, do_##enc }
14616
14617 #define NCE(mnem, op, nops, ops, enc) \
14618 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14619
14620 #define NCEF(mnem, op, nops, ops, enc) \
14621 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14622
14623 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14624 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14625 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14626 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14627
14628 #define nCE(mnem, op, nops, ops, enc) \
14629 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14630
14631 #define nCEF(mnem, op, nops, ops, enc) \
14632 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14633
14634 #define do_0 0
14635
14636 /* Thumb-only, unconditional. */
14637 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14638
14639 static const struct asm_opcode insns[] =
14640 {
14641 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14642 #define THUMB_VARIANT &arm_ext_v4t
14643 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14644 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14645 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14646 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14647 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14648 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14649 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14650 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14651 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14652 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14653 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14654 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14655 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14656 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14657 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14658 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14659
14660 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14661 for setting PSR flag bits. They are obsolete in V6 and do not
14662 have Thumb equivalents. */
14663 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14664 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14665 CL(tstp, 110f000, 2, (RR, SH), cmp),
14666 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14667 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14668 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14669 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14670 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14671 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14672
14673 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14674 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14675 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14676 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14677
14678 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14679 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14680 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14681 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14682
14683 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14684 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14685 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14686 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14687 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14688 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14689
14690 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14691 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14692 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14693 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14694
14695 /* Pseudo ops. */
14696 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14697 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14698 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14699
14700 /* Thumb-compatibility pseudo ops. */
14701 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14702 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14703 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14704 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14705 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14706 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14707 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14708 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14709 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14710 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14711 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14712 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14713
14714 #undef THUMB_VARIANT
14715 #define THUMB_VARIANT &arm_ext_v6
14716 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14717
14718 /* V1 instructions with no Thumb analogue prior to V6T2. */
14719 #undef THUMB_VARIANT
14720 #define THUMB_VARIANT &arm_ext_v6t2
14721 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14722 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14723 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14724 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14725 CL(teqp, 130f000, 2, (RR, SH), cmp),
14726
14727 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14728 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14729 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14730 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14731
14732 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14733 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14734
14735 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14736 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14737
14738 /* V1 instructions with no Thumb analogue at all. */
14739 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14740 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14741
14742 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14743 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14744 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14745 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14746 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14747 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14748 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14749 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14750
14751 #undef ARM_VARIANT
14752 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14753 #undef THUMB_VARIANT
14754 #define THUMB_VARIANT &arm_ext_v4t
14755 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14756 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14757
14758 #undef THUMB_VARIANT
14759 #define THUMB_VARIANT &arm_ext_v6t2
14760 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14761 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14762
14763 /* Generic coprocessor instructions. */
14764 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14765 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14766 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14767 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14768 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14769 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14770 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14771
14772 #undef ARM_VARIANT
14773 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14774 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14775 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14776
14777 #undef ARM_VARIANT
14778 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14779 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14780 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14781
14782 #undef ARM_VARIANT
14783 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14784 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14785 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14786 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14787 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14788 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14789 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14790 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14791 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14792
14793 #undef ARM_VARIANT
14794 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14795 #undef THUMB_VARIANT
14796 #define THUMB_VARIANT &arm_ext_v4t
14797 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14798 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14799 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14800 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14801 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14802 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14803
14804 #undef ARM_VARIANT
14805 #define ARM_VARIANT &arm_ext_v4t_5
14806 /* ARM Architecture 4T. */
14807 /* Note: bx (and blx) are required on V5, even if the processor does
14808 not support Thumb. */
14809 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14810
14811 #undef ARM_VARIANT
14812 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14813 #undef THUMB_VARIANT
14814 #define THUMB_VARIANT &arm_ext_v5t
14815 /* Note: blx has 2 variants; the .value coded here is for
14816 BLX(2). Only this variant has conditional execution. */
14817 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14818 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14819
14820 #undef THUMB_VARIANT
14821 #define THUMB_VARIANT &arm_ext_v6t2
14822 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14823 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14824 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14825 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14826 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14827 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14828 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14829 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14830
14831 #undef ARM_VARIANT
14832 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14833 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14834 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14835 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14836 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14837
14838 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14839 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14840
14841 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14842 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14843 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14844 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14845
14846 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14847 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14848 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14849 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14850
14851 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14852 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14853
14854 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14855 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14856 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14857 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14858
14859 #undef ARM_VARIANT
14860 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14861 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14862 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14863 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14864
14865 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14866 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14867
14868 #undef ARM_VARIANT
14869 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14870 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14871
14872 #undef ARM_VARIANT
14873 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14874 #undef THUMB_VARIANT
14875 #define THUMB_VARIANT &arm_ext_v6
14876 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14877 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14878 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14879 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14880 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14881 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14882 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14883 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14884 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14885 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14886
14887 #undef THUMB_VARIANT
14888 #define THUMB_VARIANT &arm_ext_v6t2
14889 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14890 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14891 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14892
14893 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14894 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14895
14896 /* ARM V6 not included in V7M (eg. integer SIMD). */
14897 #undef THUMB_VARIANT
14898 #define THUMB_VARIANT &arm_ext_v6_notm
14899 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14900 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14901 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14902 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14903 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14904 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14905 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14906 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14907 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14908 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14909 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14910 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14911 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14912 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14913 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14914 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14915 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14916 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14917 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14918 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14919 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14920 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14921 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14922 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14923 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14924 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14925 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14926 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14927 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14928 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14929 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14930 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14931 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14932 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14933 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14934 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14935 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14936 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14937 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14938 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14939 UF(rfeib, 9900a00, 1, (RRw), rfe),
14940 UF(rfeda, 8100a00, 1, (RRw), rfe),
14941 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14942 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14943 UF(rfefa, 9900a00, 1, (RRw), rfe),
14944 UF(rfeea, 8100a00, 1, (RRw), rfe),
14945 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14946 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14947 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14948 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14949 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14950 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14951 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14952 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14953 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14954 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14955 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14956 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14957 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14958 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14959 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14960 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14961 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14962 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14963 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14964 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14965 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14966 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14967 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14968 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14969 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14970 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14971 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14972 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14973 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
14974 UF(srsib, 9cd0500, 1, (I31w), srs),
14975 UF(srsda, 84d0500, 1, (I31w), srs),
14976 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
14977 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
14978 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
14979 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
14980 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14981 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14982 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
14983
14984 #undef ARM_VARIANT
14985 #define ARM_VARIANT &arm_ext_v6k
14986 #undef THUMB_VARIANT
14987 #define THUMB_VARIANT &arm_ext_v6k
14988 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
14989 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
14990 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
14991 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
14992
14993 #undef THUMB_VARIANT
14994 #define THUMB_VARIANT &arm_ext_v6_notm
14995 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
14996 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
14997
14998 #undef THUMB_VARIANT
14999 #define THUMB_VARIANT &arm_ext_v6t2
15000 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15001 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15002 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15003 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15004 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15005
15006 #undef ARM_VARIANT
15007 #define ARM_VARIANT &arm_ext_v6z
15008 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15009
15010 #undef ARM_VARIANT
15011 #define ARM_VARIANT &arm_ext_v6t2
15012 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15013 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15014 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15015 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15016
15017 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15018 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15019 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15020 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15021
15022 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15023 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15024 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15025 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15026
15027 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15028 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15029 /* ARM does not really have an IT instruction, so always allow it. */
15030 #undef ARM_VARIANT
15031 #define ARM_VARIANT &arm_ext_v1
15032 TUE(it, 0, bf08, 1, (COND), it, t_it),
15033 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15034 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15035 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15036 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15037 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15038 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15039 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15040 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15041 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15042 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15043 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15044 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15045 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15046 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15047
15048 /* Thumb2 only instructions. */
15049 #undef ARM_VARIANT
15050 #define ARM_VARIANT NULL
15051
15052 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15053 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15054 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15055 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15056
15057 /* Thumb-2 hardware division instructions (R and M profiles only). */
15058 #undef THUMB_VARIANT
15059 #define THUMB_VARIANT &arm_ext_div
15060 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15061 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15062
15063 /* ARM V7 instructions. */
15064 #undef ARM_VARIANT
15065 #define ARM_VARIANT &arm_ext_v7
15066 #undef THUMB_VARIANT
15067 #define THUMB_VARIANT &arm_ext_v7
15068 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15069 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15070 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15071 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15072 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15073
15074 #undef ARM_VARIANT
15075 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15076 cCE(wfs, e200110, 1, (RR), rd),
15077 cCE(rfs, e300110, 1, (RR), rd),
15078 cCE(wfc, e400110, 1, (RR), rd),
15079 cCE(rfc, e500110, 1, (RR), rd),
15080
15081 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15082 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15083 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15084 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15085
15086 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15087 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15088 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15089 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15090
15091 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15092 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15093 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15094 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15095 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15096 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15097 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15098 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15099 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15100 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15101 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15102 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15103
15104 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15105 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15106 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15107 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15108 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15109 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15110 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15111 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15112 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15113 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15114 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15115 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15116
15117 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15118 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15119 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15120 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15121 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15122 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15123 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15124 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15125 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15126 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15127 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15128 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15129
15130 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15131 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15132 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15133 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15134 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15135 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15136 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15137 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15138 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15139 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15140 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15141 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15142
15143 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15144 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15145 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15146 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15147 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15148 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15149 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15150 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15151 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15152 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15153 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15154 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15155
15156 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15157 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15158 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15159 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15160 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15161 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15162 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15163 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15164 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15165 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15166 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15167 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15168
15169 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15170 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15171 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15172 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15173 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15174 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15175 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15176 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15177 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15178 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15179 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15180 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15181
15182 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15183 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15184 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15185 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15186 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15187 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15188 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15189 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15190 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15191 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15192 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15193 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15194
15195 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15196 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15197 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15198 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15199 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15200 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15201 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15202 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15203 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15204 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15205 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15206 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15207
15208 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15209 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15210 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15211 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15212 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15213 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15214 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15215 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15216 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15217 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15218 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15219 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15220
15221 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15222 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15223 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15224 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15225 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15226 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15227 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15228 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15229 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15230 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15231 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15232 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15233
15234 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15235 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15236 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15237 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15238 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15239 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15240 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15241 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15242 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15243 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15244 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15245 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15246
15247 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15248 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15249 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15250 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15251 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15252 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15253 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15254 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15255 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15256 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15257 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15258 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15259
15260 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15261 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15262 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15263 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15264 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15265 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15266 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15267 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15268 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15269 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15270 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15271 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15272
15273 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15274 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15275 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15276 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15277 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15278 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15279 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15280 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15281 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15282 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15283 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15284 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15285
15286 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15287 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15288 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15289 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15290 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15291 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15292 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15293 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15294 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15295 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15296 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15297 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15298
15299 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15300 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15301 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15302 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15303 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15304 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15305 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15306 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15307 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15308 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15309 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15310 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15311
15312 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15313 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15314 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15315 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15316 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15317 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15318 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15319 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15320 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15321 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15322 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15323 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15324
15325 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15326 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15327 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15328 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15329 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15330 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15331 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15332 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15333 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15334 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15335 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15336 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15337
15338 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15339 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15340 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15341 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15342 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15343 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15344 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15345 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15346 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15347 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15348 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15349 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15350
15351 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15352 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15353 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15354 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15355 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15356 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15357 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15358 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15359 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15360 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15361 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15362 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15363
15364 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15365 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15366 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15367 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15368 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15369 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15370 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15371 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15372 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15373 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15374 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15375 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15376
15377 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15378 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15379 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15380 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15381 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15382 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15383 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15384 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15385 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15386 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15387 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15388 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15389
15390 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15391 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15392 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15393 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15394 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15395 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15396 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15397 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15398 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15399 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15400 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15401 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15402
15403 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15404 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15405 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15406 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15407 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15408 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15409 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15410 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15411 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15412 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15413 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15414 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15415
15416 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15417 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15418 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15419 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15420 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15421 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15422 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15423 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15424 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15425 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15426 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15427 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15428
15429 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15430 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15431 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15432 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15433 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15434 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15435 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15436 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15437 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15438 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15439 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15440 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15441
15442 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15443 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15444 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15445 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15446 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15447 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15448 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15449 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15450 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15451 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15452 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15453 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15454
15455 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15456 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15457 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15458 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15459 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15460 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15461 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15462 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15463 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15464 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15465 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15466 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15467
15468 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15469 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15470 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15471 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15472
15473 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15474 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15475 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15476 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15477 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15478 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15479 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15480 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15481 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15482 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15483 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15484 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15485
15486 /* The implementation of the FIX instruction is broken on some
15487 assemblers, in that it accepts a precision specifier as well as a
15488 rounding specifier, despite the fact that this is meaningless.
15489 To be more compatible, we accept it as well, though of course it
15490 does not set any bits. */
15491 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15492 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15493 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15494 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15495 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15496 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15497 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15498 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15499 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15500 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15501 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15502 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15503 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15504
15505 /* Instructions that were new with the real FPA, call them V2. */
15506 #undef ARM_VARIANT
15507 #define ARM_VARIANT &fpu_fpa_ext_v2
15508 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15509 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15510 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15511 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15512 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15513 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15514
15515 #undef ARM_VARIANT
15516 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15517 /* Moves and type conversions. */
15518 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15519 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15520 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15521 cCE(fmstat, ef1fa10, 0, (), noargs),
15522 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15523 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15524 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15525 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15526 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15527 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15528 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15529 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15530
15531 /* Memory operations. */
15532 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15533 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15534 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15535 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15536 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15537 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15538 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15539 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15540 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15541 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15542 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15543 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15544 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15545 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15546 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15547 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15548 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15549 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15550
15551 /* Monadic operations. */
15552 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15553 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15554 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15555
15556 /* Dyadic operations. */
15557 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15558 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15559 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15560 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15561 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15562 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15563 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15564 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15565 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15566
15567 /* Comparisons. */
15568 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15569 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15570 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15571 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15572
15573 #undef ARM_VARIANT
15574 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15575 /* Moves and type conversions. */
15576 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15577 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15578 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15579 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15580 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15581 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15582 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15583 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15584 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15585 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15586 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15587 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15588 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15589
15590 /* Memory operations. */
15591 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15592 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15593 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15594 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15595 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15596 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15597 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15598 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15599 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15600 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15601
15602 /* Monadic operations. */
15603 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15604 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15605 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15606
15607 /* Dyadic operations. */
15608 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15609 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15610 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15611 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15612 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15613 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15614 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15615 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15616 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15617
15618 /* Comparisons. */
15619 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15620 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15621 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15622 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15623
15624 #undef ARM_VARIANT
15625 #define ARM_VARIANT &fpu_vfp_ext_v2
15626 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15627 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15628 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15629 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15630
15631 /* Instructions which may belong to either the Neon or VFP instruction sets.
15632 Individual encoder functions perform additional architecture checks. */
15633 #undef ARM_VARIANT
15634 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15635 #undef THUMB_VARIANT
15636 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15637 /* These mnemonics are unique to VFP. */
15638 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15639 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15640 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15641 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15642 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15643 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15644 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15645 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15646 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15647 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15648
15649 /* Mnemonics shared by Neon and VFP. */
15650 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15651 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15652 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15653
15654 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15655 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15656
15657 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15658 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15659
15660 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15661 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15662 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15663 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15664 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15665 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15666 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15667 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15668
15669 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15670
15671 /* NOTE: All VMOV encoding is special-cased! */
15672 NCE(vmov, 0, 1, (VMOV), neon_mov),
15673 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15674
15675 #undef THUMB_VARIANT
15676 #define THUMB_VARIANT &fpu_neon_ext_v1
15677 #undef ARM_VARIANT
15678 #define ARM_VARIANT &fpu_neon_ext_v1
15679 /* Data processing with three registers of the same length. */
15680 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15681 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15682 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15683 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15684 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15685 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15686 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15687 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15688 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15689 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15690 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15691 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15692 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15693 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15694 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15695 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15696 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15697 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15698 /* If not immediate, fall back to neon_dyadic_i64_su.
15699 shl_imm should accept I8 I16 I32 I64,
15700 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15701 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15702 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15703 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15704 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15705 /* Logic ops, types optional & ignored. */
15706 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15707 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15708 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15709 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15710 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15711 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15712 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15713 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15714 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15715 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15716 /* Bitfield ops, untyped. */
15717 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15718 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15719 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15720 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15721 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15722 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15723 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15724 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15725 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15726 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15727 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15728 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15729 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15730 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15731 back to neon_dyadic_if_su. */
15732 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15733 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15734 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15735 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15736 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15737 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15738 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15739 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15740 /* Comparison. Type I8 I16 I32 F32. */
15741 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15742 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15743 /* As above, D registers only. */
15744 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15745 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15746 /* Int and float variants, signedness unimportant. */
15747 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15748 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15749 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15750 /* Add/sub take types I8 I16 I32 I64 F32. */
15751 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15752 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15753 /* vtst takes sizes 8, 16, 32. */
15754 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15755 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15756 /* VMUL takes I8 I16 I32 F32 P8. */
15757 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15758 /* VQD{R}MULH takes S16 S32. */
15759 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15760 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15761 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15762 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15763 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15764 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15765 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15766 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15767 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15768 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15769 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15770 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15771 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15772 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15773 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15774 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15775
15776 /* Two address, int/float. Types S8 S16 S32 F32. */
15777 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15778 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15779
15780 /* Data processing with two registers and a shift amount. */
15781 /* Right shifts, and variants with rounding.
15782 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15783 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15784 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15785 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15786 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15787 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15788 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15789 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15790 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15791 /* Shift and insert. Sizes accepted 8 16 32 64. */
15792 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15793 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15794 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15795 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15796 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15797 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15798 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15799 /* Right shift immediate, saturating & narrowing, with rounding variants.
15800 Types accepted S16 S32 S64 U16 U32 U64. */
15801 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15802 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15803 /* As above, unsigned. Types accepted S16 S32 S64. */
15804 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15805 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15806 /* Right shift narrowing. Types accepted I16 I32 I64. */
15807 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15808 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15809 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15810 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15811 /* CVT with optional immediate for fixed-point variant. */
15812 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15813
15814 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15815 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15816
15817 /* Data processing, three registers of different lengths. */
15818 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15819 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15820 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15821 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15822 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15823 /* If not scalar, fall back to neon_dyadic_long.
15824 Vector types as above, scalar types S16 S32 U16 U32. */
15825 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15826 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15827 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15828 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15829 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15830 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15831 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15832 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15833 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15834 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15835 /* Saturating doubling multiplies. Types S16 S32. */
15836 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15837 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15838 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15839 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15840 S16 S32 U16 U32. */
15841 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15842
15843 /* Extract. Size 8. */
15844 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15845 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15846
15847 /* Two registers, miscellaneous. */
15848 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15849 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15850 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15851 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15852 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15853 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15854 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15855 /* Vector replicate. Sizes 8 16 32. */
15856 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15857 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15858 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15859 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15860 /* VMOVN. Types I16 I32 I64. */
15861 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15862 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15863 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15864 /* VQMOVUN. Types S16 S32 S64. */
15865 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15866 /* VZIP / VUZP. Sizes 8 16 32. */
15867 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15868 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15869 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15870 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15871 /* VQABS / VQNEG. Types S8 S16 S32. */
15872 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15873 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15874 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15875 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15876 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15877 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15878 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15879 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15880 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15881 /* Reciprocal estimates. Types U32 F32. */
15882 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15883 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15884 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15885 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15886 /* VCLS. Types S8 S16 S32. */
15887 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15888 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15889 /* VCLZ. Types I8 I16 I32. */
15890 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15891 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15892 /* VCNT. Size 8. */
15893 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15894 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15895 /* Two address, untyped. */
15896 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15897 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15898 /* VTRN. Sizes 8 16 32. */
15899 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15900 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15901
15902 /* Table lookup. Size 8. */
15903 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15904 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15905
15906 #undef THUMB_VARIANT
15907 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15908 #undef ARM_VARIANT
15909 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15910 /* Neon element/structure load/store. */
15911 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15912 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15913 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15914 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15915 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15916 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15917 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15918 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15919
15920 #undef THUMB_VARIANT
15921 #define THUMB_VARIANT &fpu_vfp_ext_v3
15922 #undef ARM_VARIANT
15923 #define ARM_VARIANT &fpu_vfp_ext_v3
15924 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15925 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15926 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15927 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15928 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15929 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15930 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15931 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15932 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15933 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15934 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15935 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15936 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15937 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15938 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15939 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15940 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15941 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15942
15943 #undef THUMB_VARIANT
15944 #undef ARM_VARIANT
15945 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15946 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15947 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15948 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15949 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15950 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15951 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15952 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
15953 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
15954
15955 #undef ARM_VARIANT
15956 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15957 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
15958 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
15959 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
15960 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
15961 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
15962 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
15963 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
15964 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
15965 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
15966 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15967 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15968 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15969 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15970 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15971 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15972 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15973 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15974 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15975 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
15976 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
15977 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15978 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15979 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15980 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15981 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15982 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15983 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
15984 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
15985 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
15986 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
15987 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
15988 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
15989 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
15990 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
15991 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
15992 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
15993 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
15994 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15995 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15996 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15997 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15998 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15999 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16000 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16001 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16002 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16003 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16004 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16005 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16006 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16007 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16008 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16009 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16010 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16011 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16012 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16013 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16014 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16015 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16016 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16017 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16018 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16019 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16020 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16021 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16022 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16023 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16024 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16025 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16026 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16027 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16028 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16029 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16030 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16031 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16032 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16033 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16034 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16035 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16036 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16037 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16038 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16039 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16040 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16041 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16042 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16043 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16044 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16045 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16046 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16047 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16048 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16049 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16050 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16051 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16052 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16053 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16054 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16055 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16056 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16057 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16058 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16059 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16060 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16061 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16062 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16063 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16064 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16065 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16066 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16067 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16068 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16069 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16070 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16071 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16072 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16073 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16074 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16075 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16076 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16077 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16078 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16079 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16080 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16081 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16082 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16083 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16084 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16085 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16086 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16087 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16088 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16089 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16090 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16091 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16092 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16093 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16094 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16095 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16096 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16097 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16098 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16099 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16100 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16101 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16102 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16103 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16104 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16105 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16106 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16107 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16108 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16109 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16110 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16111 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16112 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16113 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16114 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16115 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16116 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16117 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16118 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16119
16120 #undef ARM_VARIANT
16121 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16122 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16123 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16124 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16125 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16126 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16127 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16128 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16129 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16130 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16131 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16132 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16133 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16134 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16135 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16136 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16137 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16138 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16139 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16140 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16141 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16142 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16143 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16144 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16145 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16146 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16147 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16148 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16149 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16150 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16151 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16152 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16153 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16154 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16155 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16156 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16157 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16158 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16159 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16160 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16161 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16162 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16163 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16164 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16165 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16166 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16167 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16168 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16169 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16170 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16171 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16172 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16173 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16174 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16175 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16176 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16177 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16178 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16179
16180 #undef ARM_VARIANT
16181 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16182 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16183 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16184 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16185 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16186 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16187 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16188 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16189 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16190 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16191 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16192 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16193 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16194 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16195 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16196 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16197 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16198 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16199 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16200 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16201 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16202 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16203 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16204 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16205 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16206 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16207 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16208 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16209 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16210 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16211 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16212 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16213 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16214 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16215 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16216 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16217 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16218 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16219 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16220 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16221 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16222 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16223 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16224 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16225 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16226 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16227 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16228 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16229 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16230 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16231 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16232 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16233 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16234 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16235 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16236 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16237 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16238 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16239 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16240 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16241 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16242 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16243 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16244 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16245 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16246 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16247 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16248 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16249 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16250 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16251 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16252 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16253 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16254 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16255 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16256 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16257 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16258 };
16259 #undef ARM_VARIANT
16260 #undef THUMB_VARIANT
16261 #undef TCE
16262 #undef TCM
16263 #undef TUE
16264 #undef TUF
16265 #undef TCC
16266 #undef cCE
16267 #undef cCL
16268 #undef C3E
16269 #undef CE
16270 #undef CM
16271 #undef UE
16272 #undef UF
16273 #undef UT
16274 #undef NUF
16275 #undef nUF
16276 #undef NCE
16277 #undef nCE
16278 #undef OPS0
16279 #undef OPS1
16280 #undef OPS2
16281 #undef OPS3
16282 #undef OPS4
16283 #undef OPS5
16284 #undef OPS6
16285 #undef do_0
16286 \f
16287 /* MD interface: bits in the object file. */
16288
16289 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16290 for use in the a.out file, and stores them in the array pointed to by buf.
16291 This knows about the endian-ness of the target machine and does
16292 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16293 2 (short) and 4 (long) Floating numbers are put out as a series of
16294 LITTLENUMS (shorts, here at least). */
16295
16296 void
16297 md_number_to_chars (char * buf, valueT val, int n)
16298 {
16299 if (target_big_endian)
16300 number_to_chars_bigendian (buf, val, n);
16301 else
16302 number_to_chars_littleendian (buf, val, n);
16303 }
16304
16305 static valueT
16306 md_chars_to_number (char * buf, int n)
16307 {
16308 valueT result = 0;
16309 unsigned char * where = (unsigned char *) buf;
16310
16311 if (target_big_endian)
16312 {
16313 while (n--)
16314 {
16315 result <<= 8;
16316 result |= (*where++ & 255);
16317 }
16318 }
16319 else
16320 {
16321 while (n--)
16322 {
16323 result <<= 8;
16324 result |= (where[n] & 255);
16325 }
16326 }
16327
16328 return result;
16329 }
16330
16331 /* MD interface: Sections. */
16332
16333 /* Estimate the size of a frag before relaxing. Assume everything fits in
16334 2 bytes. */
16335
16336 int
16337 md_estimate_size_before_relax (fragS * fragp,
16338 segT segtype ATTRIBUTE_UNUSED)
16339 {
16340 fragp->fr_var = 2;
16341 return 2;
16342 }
16343
16344 /* Convert a machine dependent frag. */
16345
16346 void
16347 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16348 {
16349 unsigned long insn;
16350 unsigned long old_op;
16351 char *buf;
16352 expressionS exp;
16353 fixS *fixp;
16354 int reloc_type;
16355 int pc_rel;
16356 int opcode;
16357
16358 buf = fragp->fr_literal + fragp->fr_fix;
16359
16360 old_op = bfd_get_16(abfd, buf);
16361 if (fragp->fr_symbol) {
16362 exp.X_op = O_symbol;
16363 exp.X_add_symbol = fragp->fr_symbol;
16364 } else {
16365 exp.X_op = O_constant;
16366 }
16367 exp.X_add_number = fragp->fr_offset;
16368 opcode = fragp->fr_subtype;
16369 switch (opcode)
16370 {
16371 case T_MNEM_ldr_pc:
16372 case T_MNEM_ldr_pc2:
16373 case T_MNEM_ldr_sp:
16374 case T_MNEM_str_sp:
16375 case T_MNEM_ldr:
16376 case T_MNEM_ldrb:
16377 case T_MNEM_ldrh:
16378 case T_MNEM_str:
16379 case T_MNEM_strb:
16380 case T_MNEM_strh:
16381 if (fragp->fr_var == 4)
16382 {
16383 insn = THUMB_OP32(opcode);
16384 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16385 {
16386 insn |= (old_op & 0x700) << 4;
16387 }
16388 else
16389 {
16390 insn |= (old_op & 7) << 12;
16391 insn |= (old_op & 0x38) << 13;
16392 }
16393 insn |= 0x00000c00;
16394 put_thumb32_insn (buf, insn);
16395 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16396 }
16397 else
16398 {
16399 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16400 }
16401 pc_rel = (opcode == T_MNEM_ldr_pc2);
16402 break;
16403 case T_MNEM_adr:
16404 if (fragp->fr_var == 4)
16405 {
16406 insn = THUMB_OP32 (opcode);
16407 insn |= (old_op & 0xf0) << 4;
16408 put_thumb32_insn (buf, insn);
16409 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16410 }
16411 else
16412 {
16413 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16414 exp.X_add_number -= 4;
16415 }
16416 pc_rel = 1;
16417 break;
16418 case T_MNEM_mov:
16419 case T_MNEM_movs:
16420 case T_MNEM_cmp:
16421 case T_MNEM_cmn:
16422 if (fragp->fr_var == 4)
16423 {
16424 int r0off = (opcode == T_MNEM_mov
16425 || opcode == T_MNEM_movs) ? 0 : 8;
16426 insn = THUMB_OP32 (opcode);
16427 insn = (insn & 0xe1ffffff) | 0x10000000;
16428 insn |= (old_op & 0x700) << r0off;
16429 put_thumb32_insn (buf, insn);
16430 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16431 }
16432 else
16433 {
16434 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16435 }
16436 pc_rel = 0;
16437 break;
16438 case T_MNEM_b:
16439 if (fragp->fr_var == 4)
16440 {
16441 insn = THUMB_OP32(opcode);
16442 put_thumb32_insn (buf, insn);
16443 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16444 }
16445 else
16446 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16447 pc_rel = 1;
16448 break;
16449 case T_MNEM_bcond:
16450 if (fragp->fr_var == 4)
16451 {
16452 insn = THUMB_OP32(opcode);
16453 insn |= (old_op & 0xf00) << 14;
16454 put_thumb32_insn (buf, insn);
16455 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16456 }
16457 else
16458 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16459 pc_rel = 1;
16460 break;
16461 case T_MNEM_add_sp:
16462 case T_MNEM_add_pc:
16463 case T_MNEM_inc_sp:
16464 case T_MNEM_dec_sp:
16465 if (fragp->fr_var == 4)
16466 {
16467 /* ??? Choose between add and addw. */
16468 insn = THUMB_OP32 (opcode);
16469 insn |= (old_op & 0xf0) << 4;
16470 put_thumb32_insn (buf, insn);
16471 if (opcode == T_MNEM_add_pc)
16472 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16473 else
16474 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16475 }
16476 else
16477 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16478 pc_rel = 0;
16479 break;
16480
16481 case T_MNEM_addi:
16482 case T_MNEM_addis:
16483 case T_MNEM_subi:
16484 case T_MNEM_subis:
16485 if (fragp->fr_var == 4)
16486 {
16487 insn = THUMB_OP32 (opcode);
16488 insn |= (old_op & 0xf0) << 4;
16489 insn |= (old_op & 0xf) << 16;
16490 put_thumb32_insn (buf, insn);
16491 if (insn & (1 << 20))
16492 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16493 else
16494 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16495 }
16496 else
16497 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16498 pc_rel = 0;
16499 break;
16500 default:
16501 abort();
16502 }
16503 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16504 reloc_type);
16505 fixp->fx_file = fragp->fr_file;
16506 fixp->fx_line = fragp->fr_line;
16507 fragp->fr_fix += fragp->fr_var;
16508 }
16509
16510 /* Return the size of a relaxable immediate operand instruction.
16511 SHIFT and SIZE specify the form of the allowable immediate. */
16512 static int
16513 relax_immediate (fragS *fragp, int size, int shift)
16514 {
16515 offsetT offset;
16516 offsetT mask;
16517 offsetT low;
16518
16519 /* ??? Should be able to do better than this. */
16520 if (fragp->fr_symbol)
16521 return 4;
16522
16523 low = (1 << shift) - 1;
16524 mask = (1 << (shift + size)) - (1 << shift);
16525 offset = fragp->fr_offset;
16526 /* Force misaligned offsets to 32-bit variant. */
16527 if (offset & low)
16528 return 4;
16529 if (offset & ~mask)
16530 return 4;
16531 return 2;
16532 }
16533
16534 /* Get the address of a symbol during relaxation. */
16535 static addressT
16536 relaxed_symbol_addr(fragS *fragp, long stretch)
16537 {
16538 fragS *sym_frag;
16539 addressT addr;
16540 symbolS *sym;
16541
16542 sym = fragp->fr_symbol;
16543 sym_frag = symbol_get_frag (sym);
16544 know (S_GET_SEGMENT (sym) != absolute_section
16545 || sym_frag == &zero_address_frag);
16546 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16547
16548 /* If frag has yet to be reached on this pass, assume it will
16549 move by STRETCH just as we did. If this is not so, it will
16550 be because some frag between grows, and that will force
16551 another pass. */
16552
16553 if (stretch != 0
16554 && sym_frag->relax_marker != fragp->relax_marker)
16555 addr += stretch;
16556
16557 return addr;
16558 }
16559
16560 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16561 load. */
16562 static int
16563 relax_adr (fragS *fragp, asection *sec, long stretch)
16564 {
16565 addressT addr;
16566 offsetT val;
16567
16568 /* Assume worst case for symbols not known to be in the same section. */
16569 if (!S_IS_DEFINED(fragp->fr_symbol)
16570 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16571 return 4;
16572
16573 val = relaxed_symbol_addr(fragp, stretch);
16574 addr = fragp->fr_address + fragp->fr_fix;
16575 addr = (addr + 4) & ~3;
16576 /* Force misaligned targets to 32-bit variant. */
16577 if (val & 3)
16578 return 4;
16579 val -= addr;
16580 if (val < 0 || val > 1020)
16581 return 4;
16582 return 2;
16583 }
16584
16585 /* Return the size of a relaxable add/sub immediate instruction. */
16586 static int
16587 relax_addsub (fragS *fragp, asection *sec)
16588 {
16589 char *buf;
16590 int op;
16591
16592 buf = fragp->fr_literal + fragp->fr_fix;
16593 op = bfd_get_16(sec->owner, buf);
16594 if ((op & 0xf) == ((op >> 4) & 0xf))
16595 return relax_immediate (fragp, 8, 0);
16596 else
16597 return relax_immediate (fragp, 3, 0);
16598 }
16599
16600
16601 /* Return the size of a relaxable branch instruction. BITS is the
16602 size of the offset field in the narrow instruction. */
16603
16604 static int
16605 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16606 {
16607 addressT addr;
16608 offsetT val;
16609 offsetT limit;
16610
16611 /* Assume worst case for symbols not known to be in the same section. */
16612 if (!S_IS_DEFINED(fragp->fr_symbol)
16613 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16614 return 4;
16615
16616 val = relaxed_symbol_addr(fragp, stretch);
16617 addr = fragp->fr_address + fragp->fr_fix + 4;
16618 val -= addr;
16619
16620 /* Offset is a signed value *2 */
16621 limit = 1 << bits;
16622 if (val >= limit || val < -limit)
16623 return 4;
16624 return 2;
16625 }
16626
16627
16628 /* Relax a machine dependent frag. This returns the amount by which
16629 the current size of the frag should change. */
16630
16631 int
16632 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
16633 {
16634 int oldsize;
16635 int newsize;
16636
16637 oldsize = fragp->fr_var;
16638 switch (fragp->fr_subtype)
16639 {
16640 case T_MNEM_ldr_pc2:
16641 newsize = relax_adr(fragp, sec, stretch);
16642 break;
16643 case T_MNEM_ldr_pc:
16644 case T_MNEM_ldr_sp:
16645 case T_MNEM_str_sp:
16646 newsize = relax_immediate(fragp, 8, 2);
16647 break;
16648 case T_MNEM_ldr:
16649 case T_MNEM_str:
16650 newsize = relax_immediate(fragp, 5, 2);
16651 break;
16652 case T_MNEM_ldrh:
16653 case T_MNEM_strh:
16654 newsize = relax_immediate(fragp, 5, 1);
16655 break;
16656 case T_MNEM_ldrb:
16657 case T_MNEM_strb:
16658 newsize = relax_immediate(fragp, 5, 0);
16659 break;
16660 case T_MNEM_adr:
16661 newsize = relax_adr(fragp, sec, stretch);
16662 break;
16663 case T_MNEM_mov:
16664 case T_MNEM_movs:
16665 case T_MNEM_cmp:
16666 case T_MNEM_cmn:
16667 newsize = relax_immediate(fragp, 8, 0);
16668 break;
16669 case T_MNEM_b:
16670 newsize = relax_branch(fragp, sec, 11, stretch);
16671 break;
16672 case T_MNEM_bcond:
16673 newsize = relax_branch(fragp, sec, 8, stretch);
16674 break;
16675 case T_MNEM_add_sp:
16676 case T_MNEM_add_pc:
16677 newsize = relax_immediate (fragp, 8, 2);
16678 break;
16679 case T_MNEM_inc_sp:
16680 case T_MNEM_dec_sp:
16681 newsize = relax_immediate (fragp, 7, 2);
16682 break;
16683 case T_MNEM_addi:
16684 case T_MNEM_addis:
16685 case T_MNEM_subi:
16686 case T_MNEM_subis:
16687 newsize = relax_addsub (fragp, sec);
16688 break;
16689 default:
16690 abort();
16691 }
16692
16693 fragp->fr_var = newsize;
16694 /* Freeze wide instructions that are at or before the same location as
16695 in the previous pass. This avoids infinite loops.
16696 Don't freeze them unconditionally because targets may be artificialy
16697 misaligned by the expansion of preceeding frags. */
16698 if (stretch <= 0 && newsize > 2)
16699 {
16700 md_convert_frag (sec->owner, sec, fragp);
16701 frag_wane(fragp);
16702 }
16703
16704 return newsize - oldsize;
16705 }
16706
16707 /* Round up a section size to the appropriate boundary. */
16708
16709 valueT
16710 md_section_align (segT segment ATTRIBUTE_UNUSED,
16711 valueT size)
16712 {
16713 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16714 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16715 {
16716 /* For a.out, force the section size to be aligned. If we don't do
16717 this, BFD will align it for us, but it will not write out the
16718 final bytes of the section. This may be a bug in BFD, but it is
16719 easier to fix it here since that is how the other a.out targets
16720 work. */
16721 int align;
16722
16723 align = bfd_get_section_alignment (stdoutput, segment);
16724 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16725 }
16726 #endif
16727
16728 return size;
16729 }
16730
16731 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16732 of an rs_align_code fragment. */
16733
16734 void
16735 arm_handle_align (fragS * fragP)
16736 {
16737 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16738 static char const thumb_noop[2] = { 0xc0, 0x46 };
16739 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16740 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16741
16742 int bytes, fix, noop_size;
16743 char * p;
16744 const char * noop;
16745
16746 if (fragP->fr_type != rs_align_code)
16747 return;
16748
16749 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16750 p = fragP->fr_literal + fragP->fr_fix;
16751 fix = 0;
16752
16753 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16754 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16755
16756 if (fragP->tc_frag_data)
16757 {
16758 if (target_big_endian)
16759 noop = thumb_bigend_noop;
16760 else
16761 noop = thumb_noop;
16762 noop_size = sizeof (thumb_noop);
16763 }
16764 else
16765 {
16766 if (target_big_endian)
16767 noop = arm_bigend_noop;
16768 else
16769 noop = arm_noop;
16770 noop_size = sizeof (arm_noop);
16771 }
16772
16773 if (bytes & (noop_size - 1))
16774 {
16775 fix = bytes & (noop_size - 1);
16776 memset (p, 0, fix);
16777 p += fix;
16778 bytes -= fix;
16779 }
16780
16781 while (bytes >= noop_size)
16782 {
16783 memcpy (p, noop, noop_size);
16784 p += noop_size;
16785 bytes -= noop_size;
16786 fix += noop_size;
16787 }
16788
16789 fragP->fr_fix += fix;
16790 fragP->fr_var = noop_size;
16791 }
16792
16793 /* Called from md_do_align. Used to create an alignment
16794 frag in a code section. */
16795
16796 void
16797 arm_frag_align_code (int n, int max)
16798 {
16799 char * p;
16800
16801 /* We assume that there will never be a requirement
16802 to support alignments greater than 32 bytes. */
16803 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16804 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16805
16806 p = frag_var (rs_align_code,
16807 MAX_MEM_FOR_RS_ALIGN_CODE,
16808 1,
16809 (relax_substateT) max,
16810 (symbolS *) NULL,
16811 (offsetT) n,
16812 (char *) NULL);
16813 *p = 0;
16814 }
16815
16816 /* Perform target specific initialisation of a frag. */
16817
16818 void
16819 arm_init_frag (fragS * fragP)
16820 {
16821 /* Record whether this frag is in an ARM or a THUMB area. */
16822 fragP->tc_frag_data = thumb_mode;
16823 }
16824
16825 #ifdef OBJ_ELF
16826 /* When we change sections we need to issue a new mapping symbol. */
16827
16828 void
16829 arm_elf_change_section (void)
16830 {
16831 flagword flags;
16832 segment_info_type *seginfo;
16833
16834 /* Link an unlinked unwind index table section to the .text section. */
16835 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16836 && elf_linked_to_section (now_seg) == NULL)
16837 elf_linked_to_section (now_seg) = text_section;
16838
16839 if (!SEG_NORMAL (now_seg))
16840 return;
16841
16842 flags = bfd_get_section_flags (stdoutput, now_seg);
16843
16844 /* We can ignore sections that only contain debug info. */
16845 if ((flags & SEC_ALLOC) == 0)
16846 return;
16847
16848 seginfo = seg_info (now_seg);
16849 mapstate = seginfo->tc_segment_info_data.mapstate;
16850 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16851 }
16852
16853 int
16854 arm_elf_section_type (const char * str, size_t len)
16855 {
16856 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16857 return SHT_ARM_EXIDX;
16858
16859 return -1;
16860 }
16861 \f
16862 /* Code to deal with unwinding tables. */
16863
16864 static void add_unwind_adjustsp (offsetT);
16865
16866 /* Cenerate and deferred unwind frame offset. */
16867
16868 static void
16869 flush_pending_unwind (void)
16870 {
16871 offsetT offset;
16872
16873 offset = unwind.pending_offset;
16874 unwind.pending_offset = 0;
16875 if (offset != 0)
16876 add_unwind_adjustsp (offset);
16877 }
16878
16879 /* Add an opcode to this list for this function. Two-byte opcodes should
16880 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16881 order. */
16882
16883 static void
16884 add_unwind_opcode (valueT op, int length)
16885 {
16886 /* Add any deferred stack adjustment. */
16887 if (unwind.pending_offset)
16888 flush_pending_unwind ();
16889
16890 unwind.sp_restored = 0;
16891
16892 if (unwind.opcode_count + length > unwind.opcode_alloc)
16893 {
16894 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16895 if (unwind.opcodes)
16896 unwind.opcodes = xrealloc (unwind.opcodes,
16897 unwind.opcode_alloc);
16898 else
16899 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16900 }
16901 while (length > 0)
16902 {
16903 length--;
16904 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16905 op >>= 8;
16906 unwind.opcode_count++;
16907 }
16908 }
16909
16910 /* Add unwind opcodes to adjust the stack pointer. */
16911
16912 static void
16913 add_unwind_adjustsp (offsetT offset)
16914 {
16915 valueT op;
16916
16917 if (offset > 0x200)
16918 {
16919 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16920 char bytes[5];
16921 int n;
16922 valueT o;
16923
16924 /* Long form: 0xb2, uleb128. */
16925 /* This might not fit in a word so add the individual bytes,
16926 remembering the list is built in reverse order. */
16927 o = (valueT) ((offset - 0x204) >> 2);
16928 if (o == 0)
16929 add_unwind_opcode (0, 1);
16930
16931 /* Calculate the uleb128 encoding of the offset. */
16932 n = 0;
16933 while (o)
16934 {
16935 bytes[n] = o & 0x7f;
16936 o >>= 7;
16937 if (o)
16938 bytes[n] |= 0x80;
16939 n++;
16940 }
16941 /* Add the insn. */
16942 for (; n; n--)
16943 add_unwind_opcode (bytes[n - 1], 1);
16944 add_unwind_opcode (0xb2, 1);
16945 }
16946 else if (offset > 0x100)
16947 {
16948 /* Two short opcodes. */
16949 add_unwind_opcode (0x3f, 1);
16950 op = (offset - 0x104) >> 2;
16951 add_unwind_opcode (op, 1);
16952 }
16953 else if (offset > 0)
16954 {
16955 /* Short opcode. */
16956 op = (offset - 4) >> 2;
16957 add_unwind_opcode (op, 1);
16958 }
16959 else if (offset < 0)
16960 {
16961 offset = -offset;
16962 while (offset > 0x100)
16963 {
16964 add_unwind_opcode (0x7f, 1);
16965 offset -= 0x100;
16966 }
16967 op = ((offset - 4) >> 2) | 0x40;
16968 add_unwind_opcode (op, 1);
16969 }
16970 }
16971
16972 /* Finish the list of unwind opcodes for this function. */
16973 static void
16974 finish_unwind_opcodes (void)
16975 {
16976 valueT op;
16977
16978 if (unwind.fp_used)
16979 {
16980 /* Adjust sp as necessary. */
16981 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
16982 flush_pending_unwind ();
16983
16984 /* After restoring sp from the frame pointer. */
16985 op = 0x90 | unwind.fp_reg;
16986 add_unwind_opcode (op, 1);
16987 }
16988 else
16989 flush_pending_unwind ();
16990 }
16991
16992
16993 /* Start an exception table entry. If idx is nonzero this is an index table
16994 entry. */
16995
16996 static void
16997 start_unwind_section (const segT text_seg, int idx)
16998 {
16999 const char * text_name;
17000 const char * prefix;
17001 const char * prefix_once;
17002 const char * group_name;
17003 size_t prefix_len;
17004 size_t text_len;
17005 char * sec_name;
17006 size_t sec_name_len;
17007 int type;
17008 int flags;
17009 int linkonce;
17010
17011 if (idx)
17012 {
17013 prefix = ELF_STRING_ARM_unwind;
17014 prefix_once = ELF_STRING_ARM_unwind_once;
17015 type = SHT_ARM_EXIDX;
17016 }
17017 else
17018 {
17019 prefix = ELF_STRING_ARM_unwind_info;
17020 prefix_once = ELF_STRING_ARM_unwind_info_once;
17021 type = SHT_PROGBITS;
17022 }
17023
17024 text_name = segment_name (text_seg);
17025 if (streq (text_name, ".text"))
17026 text_name = "";
17027
17028 if (strncmp (text_name, ".gnu.linkonce.t.",
17029 strlen (".gnu.linkonce.t.")) == 0)
17030 {
17031 prefix = prefix_once;
17032 text_name += strlen (".gnu.linkonce.t.");
17033 }
17034
17035 prefix_len = strlen (prefix);
17036 text_len = strlen (text_name);
17037 sec_name_len = prefix_len + text_len;
17038 sec_name = xmalloc (sec_name_len + 1);
17039 memcpy (sec_name, prefix, prefix_len);
17040 memcpy (sec_name + prefix_len, text_name, text_len);
17041 sec_name[prefix_len + text_len] = '\0';
17042
17043 flags = SHF_ALLOC;
17044 linkonce = 0;
17045 group_name = 0;
17046
17047 /* Handle COMDAT group. */
17048 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17049 {
17050 group_name = elf_group_name (text_seg);
17051 if (group_name == NULL)
17052 {
17053 as_bad ("Group section `%s' has no group signature",
17054 segment_name (text_seg));
17055 ignore_rest_of_line ();
17056 return;
17057 }
17058 flags |= SHF_GROUP;
17059 linkonce = 1;
17060 }
17061
17062 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17063
17064 /* Set the setion link for index tables. */
17065 if (idx)
17066 elf_linked_to_section (now_seg) = text_seg;
17067 }
17068
17069
17070 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17071 personality routine data. Returns zero, or the index table value for
17072 and inline entry. */
17073
17074 static valueT
17075 create_unwind_entry (int have_data)
17076 {
17077 int size;
17078 addressT where;
17079 char *ptr;
17080 /* The current word of data. */
17081 valueT data;
17082 /* The number of bytes left in this word. */
17083 int n;
17084
17085 finish_unwind_opcodes ();
17086
17087 /* Remember the current text section. */
17088 unwind.saved_seg = now_seg;
17089 unwind.saved_subseg = now_subseg;
17090
17091 start_unwind_section (now_seg, 0);
17092
17093 if (unwind.personality_routine == NULL)
17094 {
17095 if (unwind.personality_index == -2)
17096 {
17097 if (have_data)
17098 as_bad (_("handerdata in cantunwind frame"));
17099 return 1; /* EXIDX_CANTUNWIND. */
17100 }
17101
17102 /* Use a default personality routine if none is specified. */
17103 if (unwind.personality_index == -1)
17104 {
17105 if (unwind.opcode_count > 3)
17106 unwind.personality_index = 1;
17107 else
17108 unwind.personality_index = 0;
17109 }
17110
17111 /* Space for the personality routine entry. */
17112 if (unwind.personality_index == 0)
17113 {
17114 if (unwind.opcode_count > 3)
17115 as_bad (_("too many unwind opcodes for personality routine 0"));
17116
17117 if (!have_data)
17118 {
17119 /* All the data is inline in the index table. */
17120 data = 0x80;
17121 n = 3;
17122 while (unwind.opcode_count > 0)
17123 {
17124 unwind.opcode_count--;
17125 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17126 n--;
17127 }
17128
17129 /* Pad with "finish" opcodes. */
17130 while (n--)
17131 data = (data << 8) | 0xb0;
17132
17133 return data;
17134 }
17135 size = 0;
17136 }
17137 else
17138 /* We get two opcodes "free" in the first word. */
17139 size = unwind.opcode_count - 2;
17140 }
17141 else
17142 /* An extra byte is required for the opcode count. */
17143 size = unwind.opcode_count + 1;
17144
17145 size = (size + 3) >> 2;
17146 if (size > 0xff)
17147 as_bad (_("too many unwind opcodes"));
17148
17149 frag_align (2, 0, 0);
17150 record_alignment (now_seg, 2);
17151 unwind.table_entry = expr_build_dot ();
17152
17153 /* Allocate the table entry. */
17154 ptr = frag_more ((size << 2) + 4);
17155 where = frag_now_fix () - ((size << 2) + 4);
17156
17157 switch (unwind.personality_index)
17158 {
17159 case -1:
17160 /* ??? Should this be a PLT generating relocation? */
17161 /* Custom personality routine. */
17162 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17163 BFD_RELOC_ARM_PREL31);
17164
17165 where += 4;
17166 ptr += 4;
17167
17168 /* Set the first byte to the number of additional words. */
17169 data = size - 1;
17170 n = 3;
17171 break;
17172
17173 /* ABI defined personality routines. */
17174 case 0:
17175 /* Three opcodes bytes are packed into the first word. */
17176 data = 0x80;
17177 n = 3;
17178 break;
17179
17180 case 1:
17181 case 2:
17182 /* The size and first two opcode bytes go in the first word. */
17183 data = ((0x80 + unwind.personality_index) << 8) | size;
17184 n = 2;
17185 break;
17186
17187 default:
17188 /* Should never happen. */
17189 abort ();
17190 }
17191
17192 /* Pack the opcodes into words (MSB first), reversing the list at the same
17193 time. */
17194 while (unwind.opcode_count > 0)
17195 {
17196 if (n == 0)
17197 {
17198 md_number_to_chars (ptr, data, 4);
17199 ptr += 4;
17200 n = 4;
17201 data = 0;
17202 }
17203 unwind.opcode_count--;
17204 n--;
17205 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17206 }
17207
17208 /* Finish off the last word. */
17209 if (n < 4)
17210 {
17211 /* Pad with "finish" opcodes. */
17212 while (n--)
17213 data = (data << 8) | 0xb0;
17214
17215 md_number_to_chars (ptr, data, 4);
17216 }
17217
17218 if (!have_data)
17219 {
17220 /* Add an empty descriptor if there is no user-specified data. */
17221 ptr = frag_more (4);
17222 md_number_to_chars (ptr, 0, 4);
17223 }
17224
17225 return 0;
17226 }
17227
17228
17229 /* Initialize the DWARF-2 unwind information for this procedure. */
17230
17231 void
17232 tc_arm_frame_initial_instructions (void)
17233 {
17234 cfi_add_CFA_def_cfa (REG_SP, 0);
17235 }
17236 #endif /* OBJ_ELF */
17237
17238 /* Convert REGNAME to a DWARF-2 register number. */
17239
17240 int
17241 tc_arm_regname_to_dw2regnum (char *regname)
17242 {
17243 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17244
17245 if (reg == FAIL)
17246 return -1;
17247
17248 return reg;
17249 }
17250
17251 #ifdef TE_PE
17252 void
17253 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17254 {
17255 expressionS expr;
17256
17257 expr.X_op = O_secrel;
17258 expr.X_add_symbol = symbol;
17259 expr.X_add_number = 0;
17260 emit_expr (&expr, size);
17261 }
17262 #endif
17263
17264 /* MD interface: Symbol and relocation handling. */
17265
17266 /* Return the address within the segment that a PC-relative fixup is
17267 relative to. For ARM, PC-relative fixups applied to instructions
17268 are generally relative to the location of the fixup plus 8 bytes.
17269 Thumb branches are offset by 4, and Thumb loads relative to PC
17270 require special handling. */
17271
17272 long
17273 md_pcrel_from_section (fixS * fixP, segT seg)
17274 {
17275 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17276
17277 /* If this is pc-relative and we are going to emit a relocation
17278 then we just want to put out any pipeline compensation that the linker
17279 will need. Otherwise we want to use the calculated base.
17280 For WinCE we skip the bias for externals as well, since this
17281 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17282 if (fixP->fx_pcrel
17283 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17284 || (arm_force_relocation (fixP)
17285 #ifdef TE_WINCE
17286 && !S_IS_EXTERNAL (fixP->fx_addsy)
17287 #endif
17288 )))
17289 base = 0;
17290
17291 switch (fixP->fx_r_type)
17292 {
17293 /* PC relative addressing on the Thumb is slightly odd as the
17294 bottom two bits of the PC are forced to zero for the
17295 calculation. This happens *after* application of the
17296 pipeline offset. However, Thumb adrl already adjusts for
17297 this, so we need not do it again. */
17298 case BFD_RELOC_ARM_THUMB_ADD:
17299 return base & ~3;
17300
17301 case BFD_RELOC_ARM_THUMB_OFFSET:
17302 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17303 case BFD_RELOC_ARM_T32_ADD_PC12:
17304 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17305 return (base + 4) & ~3;
17306
17307 /* Thumb branches are simply offset by +4. */
17308 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17309 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17310 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17311 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17312 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17313 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17314 case BFD_RELOC_THUMB_PCREL_BLX:
17315 return base + 4;
17316
17317 /* ARM mode branches are offset by +8. However, the Windows CE
17318 loader expects the relocation not to take this into account. */
17319 case BFD_RELOC_ARM_PCREL_BRANCH:
17320 case BFD_RELOC_ARM_PCREL_CALL:
17321 case BFD_RELOC_ARM_PCREL_JUMP:
17322 case BFD_RELOC_ARM_PCREL_BLX:
17323 case BFD_RELOC_ARM_PLT32:
17324 #ifdef TE_WINCE
17325 /* When handling fixups immediately, because we have already
17326 discovered the value of a symbol, or the address of the frag involved
17327 we must account for the offset by +8, as the OS loader will never see the reloc.
17328 see fixup_segment() in write.c
17329 The S_IS_EXTERNAL test handles the case of global symbols.
17330 Those need the calculated base, not just the pipe compensation the linker will need. */
17331 if (fixP->fx_pcrel
17332 && fixP->fx_addsy != NULL
17333 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17334 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17335 return base + 8;
17336 return base;
17337 #else
17338 return base + 8;
17339 #endif
17340
17341 /* ARM mode loads relative to PC are also offset by +8. Unlike
17342 branches, the Windows CE loader *does* expect the relocation
17343 to take this into account. */
17344 case BFD_RELOC_ARM_OFFSET_IMM:
17345 case BFD_RELOC_ARM_OFFSET_IMM8:
17346 case BFD_RELOC_ARM_HWLITERAL:
17347 case BFD_RELOC_ARM_LITERAL:
17348 case BFD_RELOC_ARM_CP_OFF_IMM:
17349 return base + 8;
17350
17351
17352 /* Other PC-relative relocations are un-offset. */
17353 default:
17354 return base;
17355 }
17356 }
17357
17358 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17359 Otherwise we have no need to default values of symbols. */
17360
17361 symbolS *
17362 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17363 {
17364 #ifdef OBJ_ELF
17365 if (name[0] == '_' && name[1] == 'G'
17366 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17367 {
17368 if (!GOT_symbol)
17369 {
17370 if (symbol_find (name))
17371 as_bad ("GOT already in the symbol table");
17372
17373 GOT_symbol = symbol_new (name, undefined_section,
17374 (valueT) 0, & zero_address_frag);
17375 }
17376
17377 return GOT_symbol;
17378 }
17379 #endif
17380
17381 return 0;
17382 }
17383
17384 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17385 computed as two separate immediate values, added together. We
17386 already know that this value cannot be computed by just one ARM
17387 instruction. */
17388
17389 static unsigned int
17390 validate_immediate_twopart (unsigned int val,
17391 unsigned int * highpart)
17392 {
17393 unsigned int a;
17394 unsigned int i;
17395
17396 for (i = 0; i < 32; i += 2)
17397 if (((a = rotate_left (val, i)) & 0xff) != 0)
17398 {
17399 if (a & 0xff00)
17400 {
17401 if (a & ~ 0xffff)
17402 continue;
17403 * highpart = (a >> 8) | ((i + 24) << 7);
17404 }
17405 else if (a & 0xff0000)
17406 {
17407 if (a & 0xff000000)
17408 continue;
17409 * highpart = (a >> 16) | ((i + 16) << 7);
17410 }
17411 else
17412 {
17413 assert (a & 0xff000000);
17414 * highpart = (a >> 24) | ((i + 8) << 7);
17415 }
17416
17417 return (a & 0xff) | (i << 7);
17418 }
17419
17420 return FAIL;
17421 }
17422
17423 static int
17424 validate_offset_imm (unsigned int val, int hwse)
17425 {
17426 if ((hwse && val > 255) || val > 4095)
17427 return FAIL;
17428 return val;
17429 }
17430
17431 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17432 negative immediate constant by altering the instruction. A bit of
17433 a hack really.
17434 MOV <-> MVN
17435 AND <-> BIC
17436 ADC <-> SBC
17437 by inverting the second operand, and
17438 ADD <-> SUB
17439 CMP <-> CMN
17440 by negating the second operand. */
17441
17442 static int
17443 negate_data_op (unsigned long * instruction,
17444 unsigned long value)
17445 {
17446 int op, new_inst;
17447 unsigned long negated, inverted;
17448
17449 negated = encode_arm_immediate (-value);
17450 inverted = encode_arm_immediate (~value);
17451
17452 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17453 switch (op)
17454 {
17455 /* First negates. */
17456 case OPCODE_SUB: /* ADD <-> SUB */
17457 new_inst = OPCODE_ADD;
17458 value = negated;
17459 break;
17460
17461 case OPCODE_ADD:
17462 new_inst = OPCODE_SUB;
17463 value = negated;
17464 break;
17465
17466 case OPCODE_CMP: /* CMP <-> CMN */
17467 new_inst = OPCODE_CMN;
17468 value = negated;
17469 break;
17470
17471 case OPCODE_CMN:
17472 new_inst = OPCODE_CMP;
17473 value = negated;
17474 break;
17475
17476 /* Now Inverted ops. */
17477 case OPCODE_MOV: /* MOV <-> MVN */
17478 new_inst = OPCODE_MVN;
17479 value = inverted;
17480 break;
17481
17482 case OPCODE_MVN:
17483 new_inst = OPCODE_MOV;
17484 value = inverted;
17485 break;
17486
17487 case OPCODE_AND: /* AND <-> BIC */
17488 new_inst = OPCODE_BIC;
17489 value = inverted;
17490 break;
17491
17492 case OPCODE_BIC:
17493 new_inst = OPCODE_AND;
17494 value = inverted;
17495 break;
17496
17497 case OPCODE_ADC: /* ADC <-> SBC */
17498 new_inst = OPCODE_SBC;
17499 value = inverted;
17500 break;
17501
17502 case OPCODE_SBC:
17503 new_inst = OPCODE_ADC;
17504 value = inverted;
17505 break;
17506
17507 /* We cannot do anything. */
17508 default:
17509 return FAIL;
17510 }
17511
17512 if (value == (unsigned) FAIL)
17513 return FAIL;
17514
17515 *instruction &= OPCODE_MASK;
17516 *instruction |= new_inst << DATA_OP_SHIFT;
17517 return value;
17518 }
17519
17520 /* Like negate_data_op, but for Thumb-2. */
17521
17522 static unsigned int
17523 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17524 {
17525 int op, new_inst;
17526 int rd;
17527 unsigned int negated, inverted;
17528
17529 negated = encode_thumb32_immediate (-value);
17530 inverted = encode_thumb32_immediate (~value);
17531
17532 rd = (*instruction >> 8) & 0xf;
17533 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17534 switch (op)
17535 {
17536 /* ADD <-> SUB. Includes CMP <-> CMN. */
17537 case T2_OPCODE_SUB:
17538 new_inst = T2_OPCODE_ADD;
17539 value = negated;
17540 break;
17541
17542 case T2_OPCODE_ADD:
17543 new_inst = T2_OPCODE_SUB;
17544 value = negated;
17545 break;
17546
17547 /* ORR <-> ORN. Includes MOV <-> MVN. */
17548 case T2_OPCODE_ORR:
17549 new_inst = T2_OPCODE_ORN;
17550 value = inverted;
17551 break;
17552
17553 case T2_OPCODE_ORN:
17554 new_inst = T2_OPCODE_ORR;
17555 value = inverted;
17556 break;
17557
17558 /* AND <-> BIC. TST has no inverted equivalent. */
17559 case T2_OPCODE_AND:
17560 new_inst = T2_OPCODE_BIC;
17561 if (rd == 15)
17562 value = FAIL;
17563 else
17564 value = inverted;
17565 break;
17566
17567 case T2_OPCODE_BIC:
17568 new_inst = T2_OPCODE_AND;
17569 value = inverted;
17570 break;
17571
17572 /* ADC <-> SBC */
17573 case T2_OPCODE_ADC:
17574 new_inst = T2_OPCODE_SBC;
17575 value = inverted;
17576 break;
17577
17578 case T2_OPCODE_SBC:
17579 new_inst = T2_OPCODE_ADC;
17580 value = inverted;
17581 break;
17582
17583 /* We cannot do anything. */
17584 default:
17585 return FAIL;
17586 }
17587
17588 if (value == (unsigned int)FAIL)
17589 return FAIL;
17590
17591 *instruction &= T2_OPCODE_MASK;
17592 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17593 return value;
17594 }
17595
17596 /* Read a 32-bit thumb instruction from buf. */
17597 static unsigned long
17598 get_thumb32_insn (char * buf)
17599 {
17600 unsigned long insn;
17601 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17602 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17603
17604 return insn;
17605 }
17606
17607
17608 /* We usually want to set the low bit on the address of thumb function
17609 symbols. In particular .word foo - . should have the low bit set.
17610 Generic code tries to fold the difference of two symbols to
17611 a constant. Prevent this and force a relocation when the first symbols
17612 is a thumb function. */
17613 int
17614 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17615 {
17616 if (op == O_subtract
17617 && l->X_op == O_symbol
17618 && r->X_op == O_symbol
17619 && THUMB_IS_FUNC (l->X_add_symbol))
17620 {
17621 l->X_op = O_subtract;
17622 l->X_op_symbol = r->X_add_symbol;
17623 l->X_add_number -= r->X_add_number;
17624 return 1;
17625 }
17626 /* Process as normal. */
17627 return 0;
17628 }
17629
17630 void
17631 md_apply_fix (fixS * fixP,
17632 valueT * valP,
17633 segT seg)
17634 {
17635 offsetT value = * valP;
17636 offsetT newval;
17637 unsigned int newimm;
17638 unsigned long temp;
17639 int sign;
17640 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17641
17642 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17643
17644 /* Note whether this will delete the relocation. */
17645
17646 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17647 fixP->fx_done = 1;
17648
17649 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17650 consistency with the behavior on 32-bit hosts. Remember value
17651 for emit_reloc. */
17652 value &= 0xffffffff;
17653 value ^= 0x80000000;
17654 value -= 0x80000000;
17655
17656 *valP = value;
17657 fixP->fx_addnumber = value;
17658
17659 /* Same treatment for fixP->fx_offset. */
17660 fixP->fx_offset &= 0xffffffff;
17661 fixP->fx_offset ^= 0x80000000;
17662 fixP->fx_offset -= 0x80000000;
17663
17664 switch (fixP->fx_r_type)
17665 {
17666 case BFD_RELOC_NONE:
17667 /* This will need to go in the object file. */
17668 fixP->fx_done = 0;
17669 break;
17670
17671 case BFD_RELOC_ARM_IMMEDIATE:
17672 /* We claim that this fixup has been processed here,
17673 even if in fact we generate an error because we do
17674 not have a reloc for it, so tc_gen_reloc will reject it. */
17675 fixP->fx_done = 1;
17676
17677 if (fixP->fx_addsy
17678 && ! S_IS_DEFINED (fixP->fx_addsy))
17679 {
17680 as_bad_where (fixP->fx_file, fixP->fx_line,
17681 _("undefined symbol %s used as an immediate value"),
17682 S_GET_NAME (fixP->fx_addsy));
17683 break;
17684 }
17685
17686 newimm = encode_arm_immediate (value);
17687 temp = md_chars_to_number (buf, INSN_SIZE);
17688
17689 /* If the instruction will fail, see if we can fix things up by
17690 changing the opcode. */
17691 if (newimm == (unsigned int) FAIL
17692 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17693 {
17694 as_bad_where (fixP->fx_file, fixP->fx_line,
17695 _("invalid constant (%lx) after fixup"),
17696 (unsigned long) value);
17697 break;
17698 }
17699
17700 newimm |= (temp & 0xfffff000);
17701 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17702 break;
17703
17704 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17705 {
17706 unsigned int highpart = 0;
17707 unsigned int newinsn = 0xe1a00000; /* nop. */
17708
17709 newimm = encode_arm_immediate (value);
17710 temp = md_chars_to_number (buf, INSN_SIZE);
17711
17712 /* If the instruction will fail, see if we can fix things up by
17713 changing the opcode. */
17714 if (newimm == (unsigned int) FAIL
17715 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17716 {
17717 /* No ? OK - try using two ADD instructions to generate
17718 the value. */
17719 newimm = validate_immediate_twopart (value, & highpart);
17720
17721 /* Yes - then make sure that the second instruction is
17722 also an add. */
17723 if (newimm != (unsigned int) FAIL)
17724 newinsn = temp;
17725 /* Still No ? Try using a negated value. */
17726 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17727 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17728 /* Otherwise - give up. */
17729 else
17730 {
17731 as_bad_where (fixP->fx_file, fixP->fx_line,
17732 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17733 (long) value);
17734 break;
17735 }
17736
17737 /* Replace the first operand in the 2nd instruction (which
17738 is the PC) with the destination register. We have
17739 already added in the PC in the first instruction and we
17740 do not want to do it again. */
17741 newinsn &= ~ 0xf0000;
17742 newinsn |= ((newinsn & 0x0f000) << 4);
17743 }
17744
17745 newimm |= (temp & 0xfffff000);
17746 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17747
17748 highpart |= (newinsn & 0xfffff000);
17749 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17750 }
17751 break;
17752
17753 case BFD_RELOC_ARM_OFFSET_IMM:
17754 if (!fixP->fx_done && seg->use_rela_p)
17755 value = 0;
17756
17757 case BFD_RELOC_ARM_LITERAL:
17758 sign = value >= 0;
17759
17760 if (value < 0)
17761 value = - value;
17762
17763 if (validate_offset_imm (value, 0) == FAIL)
17764 {
17765 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17766 as_bad_where (fixP->fx_file, fixP->fx_line,
17767 _("invalid literal constant: pool needs to be closer"));
17768 else
17769 as_bad_where (fixP->fx_file, fixP->fx_line,
17770 _("bad immediate value for offset (%ld)"),
17771 (long) value);
17772 break;
17773 }
17774
17775 newval = md_chars_to_number (buf, INSN_SIZE);
17776 newval &= 0xff7ff000;
17777 newval |= value | (sign ? INDEX_UP : 0);
17778 md_number_to_chars (buf, newval, INSN_SIZE);
17779 break;
17780
17781 case BFD_RELOC_ARM_OFFSET_IMM8:
17782 case BFD_RELOC_ARM_HWLITERAL:
17783 sign = value >= 0;
17784
17785 if (value < 0)
17786 value = - value;
17787
17788 if (validate_offset_imm (value, 1) == FAIL)
17789 {
17790 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17791 as_bad_where (fixP->fx_file, fixP->fx_line,
17792 _("invalid literal constant: pool needs to be closer"));
17793 else
17794 as_bad (_("bad immediate value for half-word offset (%ld)"),
17795 (long) value);
17796 break;
17797 }
17798
17799 newval = md_chars_to_number (buf, INSN_SIZE);
17800 newval &= 0xff7ff0f0;
17801 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17802 md_number_to_chars (buf, newval, INSN_SIZE);
17803 break;
17804
17805 case BFD_RELOC_ARM_T32_OFFSET_U8:
17806 if (value < 0 || value > 1020 || value % 4 != 0)
17807 as_bad_where (fixP->fx_file, fixP->fx_line,
17808 _("bad immediate value for offset (%ld)"), (long) value);
17809 value /= 4;
17810
17811 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17812 newval |= value;
17813 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17814 break;
17815
17816 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17817 /* This is a complicated relocation used for all varieties of Thumb32
17818 load/store instruction with immediate offset:
17819
17820 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17821 *4, optional writeback(W)
17822 (doubleword load/store)
17823
17824 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17825 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17826 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17827 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17828 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17829
17830 Uppercase letters indicate bits that are already encoded at
17831 this point. Lowercase letters are our problem. For the
17832 second block of instructions, the secondary opcode nybble
17833 (bits 8..11) is present, and bit 23 is zero, even if this is
17834 a PC-relative operation. */
17835 newval = md_chars_to_number (buf, THUMB_SIZE);
17836 newval <<= 16;
17837 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17838
17839 if ((newval & 0xf0000000) == 0xe0000000)
17840 {
17841 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17842 if (value >= 0)
17843 newval |= (1 << 23);
17844 else
17845 value = -value;
17846 if (value % 4 != 0)
17847 {
17848 as_bad_where (fixP->fx_file, fixP->fx_line,
17849 _("offset not a multiple of 4"));
17850 break;
17851 }
17852 value /= 4;
17853 if (value > 0xff)
17854 {
17855 as_bad_where (fixP->fx_file, fixP->fx_line,
17856 _("offset out of range"));
17857 break;
17858 }
17859 newval &= ~0xff;
17860 }
17861 else if ((newval & 0x000f0000) == 0x000f0000)
17862 {
17863 /* PC-relative, 12-bit offset. */
17864 if (value >= 0)
17865 newval |= (1 << 23);
17866 else
17867 value = -value;
17868 if (value > 0xfff)
17869 {
17870 as_bad_where (fixP->fx_file, fixP->fx_line,
17871 _("offset out of range"));
17872 break;
17873 }
17874 newval &= ~0xfff;
17875 }
17876 else if ((newval & 0x00000100) == 0x00000100)
17877 {
17878 /* Writeback: 8-bit, +/- offset. */
17879 if (value >= 0)
17880 newval |= (1 << 9);
17881 else
17882 value = -value;
17883 if (value > 0xff)
17884 {
17885 as_bad_where (fixP->fx_file, fixP->fx_line,
17886 _("offset out of range"));
17887 break;
17888 }
17889 newval &= ~0xff;
17890 }
17891 else if ((newval & 0x00000f00) == 0x00000e00)
17892 {
17893 /* T-instruction: positive 8-bit offset. */
17894 if (value < 0 || value > 0xff)
17895 {
17896 as_bad_where (fixP->fx_file, fixP->fx_line,
17897 _("offset out of range"));
17898 break;
17899 }
17900 newval &= ~0xff;
17901 newval |= value;
17902 }
17903 else
17904 {
17905 /* Positive 12-bit or negative 8-bit offset. */
17906 int limit;
17907 if (value >= 0)
17908 {
17909 newval |= (1 << 23);
17910 limit = 0xfff;
17911 }
17912 else
17913 {
17914 value = -value;
17915 limit = 0xff;
17916 }
17917 if (value > limit)
17918 {
17919 as_bad_where (fixP->fx_file, fixP->fx_line,
17920 _("offset out of range"));
17921 break;
17922 }
17923 newval &= ~limit;
17924 }
17925
17926 newval |= value;
17927 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17928 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17929 break;
17930
17931 case BFD_RELOC_ARM_SHIFT_IMM:
17932 newval = md_chars_to_number (buf, INSN_SIZE);
17933 if (((unsigned long) value) > 32
17934 || (value == 32
17935 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17936 {
17937 as_bad_where (fixP->fx_file, fixP->fx_line,
17938 _("shift expression is too large"));
17939 break;
17940 }
17941
17942 if (value == 0)
17943 /* Shifts of zero must be done as lsl. */
17944 newval &= ~0x60;
17945 else if (value == 32)
17946 value = 0;
17947 newval &= 0xfffff07f;
17948 newval |= (value & 0x1f) << 7;
17949 md_number_to_chars (buf, newval, INSN_SIZE);
17950 break;
17951
17952 case BFD_RELOC_ARM_T32_IMMEDIATE:
17953 case BFD_RELOC_ARM_T32_ADD_IMM:
17954 case BFD_RELOC_ARM_T32_IMM12:
17955 case BFD_RELOC_ARM_T32_ADD_PC12:
17956 /* We claim that this fixup has been processed here,
17957 even if in fact we generate an error because we do
17958 not have a reloc for it, so tc_gen_reloc will reject it. */
17959 fixP->fx_done = 1;
17960
17961 if (fixP->fx_addsy
17962 && ! S_IS_DEFINED (fixP->fx_addsy))
17963 {
17964 as_bad_where (fixP->fx_file, fixP->fx_line,
17965 _("undefined symbol %s used as an immediate value"),
17966 S_GET_NAME (fixP->fx_addsy));
17967 break;
17968 }
17969
17970 newval = md_chars_to_number (buf, THUMB_SIZE);
17971 newval <<= 16;
17972 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
17973
17974 newimm = FAIL;
17975 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17976 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17977 {
17978 newimm = encode_thumb32_immediate (value);
17979 if (newimm == (unsigned int) FAIL)
17980 newimm = thumb32_negate_data_op (&newval, value);
17981 }
17982 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
17983 && newimm == (unsigned int) FAIL)
17984 {
17985 /* Turn add/sum into addw/subw. */
17986 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17987 newval = (newval & 0xfeffffff) | 0x02000000;
17988
17989 /* 12 bit immediate for addw/subw. */
17990 if (value < 0)
17991 {
17992 value = -value;
17993 newval ^= 0x00a00000;
17994 }
17995 if (value > 0xfff)
17996 newimm = (unsigned int) FAIL;
17997 else
17998 newimm = value;
17999 }
18000
18001 if (newimm == (unsigned int)FAIL)
18002 {
18003 as_bad_where (fixP->fx_file, fixP->fx_line,
18004 _("invalid constant (%lx) after fixup"),
18005 (unsigned long) value);
18006 break;
18007 }
18008
18009 newval |= (newimm & 0x800) << 15;
18010 newval |= (newimm & 0x700) << 4;
18011 newval |= (newimm & 0x0ff);
18012
18013 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18014 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18015 break;
18016
18017 case BFD_RELOC_ARM_SMC:
18018 if (((unsigned long) value) > 0xffff)
18019 as_bad_where (fixP->fx_file, fixP->fx_line,
18020 _("invalid smc expression"));
18021 newval = md_chars_to_number (buf, INSN_SIZE);
18022 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18023 md_number_to_chars (buf, newval, INSN_SIZE);
18024 break;
18025
18026 case BFD_RELOC_ARM_SWI:
18027 if (fixP->tc_fix_data != 0)
18028 {
18029 if (((unsigned long) value) > 0xff)
18030 as_bad_where (fixP->fx_file, fixP->fx_line,
18031 _("invalid swi expression"));
18032 newval = md_chars_to_number (buf, THUMB_SIZE);
18033 newval |= value;
18034 md_number_to_chars (buf, newval, THUMB_SIZE);
18035 }
18036 else
18037 {
18038 if (((unsigned long) value) > 0x00ffffff)
18039 as_bad_where (fixP->fx_file, fixP->fx_line,
18040 _("invalid swi expression"));
18041 newval = md_chars_to_number (buf, INSN_SIZE);
18042 newval |= value;
18043 md_number_to_chars (buf, newval, INSN_SIZE);
18044 }
18045 break;
18046
18047 case BFD_RELOC_ARM_MULTI:
18048 if (((unsigned long) value) > 0xffff)
18049 as_bad_where (fixP->fx_file, fixP->fx_line,
18050 _("invalid expression in load/store multiple"));
18051 newval = value | md_chars_to_number (buf, INSN_SIZE);
18052 md_number_to_chars (buf, newval, INSN_SIZE);
18053 break;
18054
18055 #ifdef OBJ_ELF
18056 case BFD_RELOC_ARM_PCREL_CALL:
18057 newval = md_chars_to_number (buf, INSN_SIZE);
18058 if ((newval & 0xf0000000) == 0xf0000000)
18059 temp = 1;
18060 else
18061 temp = 3;
18062 goto arm_branch_common;
18063
18064 case BFD_RELOC_ARM_PCREL_JUMP:
18065 case BFD_RELOC_ARM_PLT32:
18066 #endif
18067 case BFD_RELOC_ARM_PCREL_BRANCH:
18068 temp = 3;
18069 goto arm_branch_common;
18070
18071 case BFD_RELOC_ARM_PCREL_BLX:
18072 temp = 1;
18073 arm_branch_common:
18074 /* We are going to store value (shifted right by two) in the
18075 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18076 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18077 also be be clear. */
18078 if (value & temp)
18079 as_bad_where (fixP->fx_file, fixP->fx_line,
18080 _("misaligned branch destination"));
18081 if ((value & (offsetT)0xfe000000) != (offsetT)0
18082 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18083 as_bad_where (fixP->fx_file, fixP->fx_line,
18084 _("branch out of range"));
18085
18086 if (fixP->fx_done || !seg->use_rela_p)
18087 {
18088 newval = md_chars_to_number (buf, INSN_SIZE);
18089 newval |= (value >> 2) & 0x00ffffff;
18090 /* Set the H bit on BLX instructions. */
18091 if (temp == 1)
18092 {
18093 if (value & 2)
18094 newval |= 0x01000000;
18095 else
18096 newval &= ~0x01000000;
18097 }
18098 md_number_to_chars (buf, newval, INSN_SIZE);
18099 }
18100 break;
18101
18102 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18103 /* CBZ can only branch forward. */
18104 if (value & ~0x7e)
18105 as_bad_where (fixP->fx_file, fixP->fx_line,
18106 _("branch out of range"));
18107
18108 if (fixP->fx_done || !seg->use_rela_p)
18109 {
18110 newval = md_chars_to_number (buf, THUMB_SIZE);
18111 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18112 md_number_to_chars (buf, newval, THUMB_SIZE);
18113 }
18114 break;
18115
18116 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18117 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18118 as_bad_where (fixP->fx_file, fixP->fx_line,
18119 _("branch out of range"));
18120
18121 if (fixP->fx_done || !seg->use_rela_p)
18122 {
18123 newval = md_chars_to_number (buf, THUMB_SIZE);
18124 newval |= (value & 0x1ff) >> 1;
18125 md_number_to_chars (buf, newval, THUMB_SIZE);
18126 }
18127 break;
18128
18129 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18130 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18131 as_bad_where (fixP->fx_file, fixP->fx_line,
18132 _("branch out of range"));
18133
18134 if (fixP->fx_done || !seg->use_rela_p)
18135 {
18136 newval = md_chars_to_number (buf, THUMB_SIZE);
18137 newval |= (value & 0xfff) >> 1;
18138 md_number_to_chars (buf, newval, THUMB_SIZE);
18139 }
18140 break;
18141
18142 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18143 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18144 as_bad_where (fixP->fx_file, fixP->fx_line,
18145 _("conditional branch out of range"));
18146
18147 if (fixP->fx_done || !seg->use_rela_p)
18148 {
18149 offsetT newval2;
18150 addressT S, J1, J2, lo, hi;
18151
18152 S = (value & 0x00100000) >> 20;
18153 J2 = (value & 0x00080000) >> 19;
18154 J1 = (value & 0x00040000) >> 18;
18155 hi = (value & 0x0003f000) >> 12;
18156 lo = (value & 0x00000ffe) >> 1;
18157
18158 newval = md_chars_to_number (buf, THUMB_SIZE);
18159 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18160 newval |= (S << 10) | hi;
18161 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18162 md_number_to_chars (buf, newval, THUMB_SIZE);
18163 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18164 }
18165 break;
18166
18167 case BFD_RELOC_THUMB_PCREL_BLX:
18168 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18169 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18170 as_bad_where (fixP->fx_file, fixP->fx_line,
18171 _("branch out of range"));
18172
18173 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18174 /* For a BLX instruction, make sure that the relocation is rounded up
18175 to a word boundary. This follows the semantics of the instruction
18176 which specifies that bit 1 of the target address will come from bit
18177 1 of the base address. */
18178 value = (value + 1) & ~ 1;
18179
18180 if (fixP->fx_done || !seg->use_rela_p)
18181 {
18182 offsetT newval2;
18183
18184 newval = md_chars_to_number (buf, THUMB_SIZE);
18185 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18186 newval |= (value & 0x7fffff) >> 12;
18187 newval2 |= (value & 0xfff) >> 1;
18188 md_number_to_chars (buf, newval, THUMB_SIZE);
18189 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18190 }
18191 break;
18192
18193 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18194 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18195 as_bad_where (fixP->fx_file, fixP->fx_line,
18196 _("branch out of range"));
18197
18198 if (fixP->fx_done || !seg->use_rela_p)
18199 {
18200 offsetT newval2;
18201 addressT S, I1, I2, lo, hi;
18202
18203 S = (value & 0x01000000) >> 24;
18204 I1 = (value & 0x00800000) >> 23;
18205 I2 = (value & 0x00400000) >> 22;
18206 hi = (value & 0x003ff000) >> 12;
18207 lo = (value & 0x00000ffe) >> 1;
18208
18209 I1 = !(I1 ^ S);
18210 I2 = !(I2 ^ S);
18211
18212 newval = md_chars_to_number (buf, THUMB_SIZE);
18213 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18214 newval |= (S << 10) | hi;
18215 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18216 md_number_to_chars (buf, newval, THUMB_SIZE);
18217 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18218 }
18219 break;
18220
18221 case BFD_RELOC_8:
18222 if (fixP->fx_done || !seg->use_rela_p)
18223 md_number_to_chars (buf, value, 1);
18224 break;
18225
18226 case BFD_RELOC_16:
18227 if (fixP->fx_done || !seg->use_rela_p)
18228 md_number_to_chars (buf, value, 2);
18229 break;
18230
18231 #ifdef OBJ_ELF
18232 case BFD_RELOC_ARM_TLS_GD32:
18233 case BFD_RELOC_ARM_TLS_LE32:
18234 case BFD_RELOC_ARM_TLS_IE32:
18235 case BFD_RELOC_ARM_TLS_LDM32:
18236 case BFD_RELOC_ARM_TLS_LDO32:
18237 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18238 /* fall through */
18239
18240 case BFD_RELOC_ARM_GOT32:
18241 case BFD_RELOC_ARM_GOTOFF:
18242 case BFD_RELOC_ARM_TARGET2:
18243 if (fixP->fx_done || !seg->use_rela_p)
18244 md_number_to_chars (buf, 0, 4);
18245 break;
18246 #endif
18247
18248 case BFD_RELOC_RVA:
18249 case BFD_RELOC_32:
18250 case BFD_RELOC_ARM_TARGET1:
18251 case BFD_RELOC_ARM_ROSEGREL32:
18252 case BFD_RELOC_ARM_SBREL32:
18253 case BFD_RELOC_32_PCREL:
18254 #ifdef TE_PE
18255 case BFD_RELOC_32_SECREL:
18256 #endif
18257 if (fixP->fx_done || !seg->use_rela_p)
18258 #ifdef TE_WINCE
18259 /* For WinCE we only do this for pcrel fixups. */
18260 if (fixP->fx_done || fixP->fx_pcrel)
18261 #endif
18262 md_number_to_chars (buf, value, 4);
18263 break;
18264
18265 #ifdef OBJ_ELF
18266 case BFD_RELOC_ARM_PREL31:
18267 if (fixP->fx_done || !seg->use_rela_p)
18268 {
18269 newval = md_chars_to_number (buf, 4) & 0x80000000;
18270 if ((value ^ (value >> 1)) & 0x40000000)
18271 {
18272 as_bad_where (fixP->fx_file, fixP->fx_line,
18273 _("rel31 relocation overflow"));
18274 }
18275 newval |= value & 0x7fffffff;
18276 md_number_to_chars (buf, newval, 4);
18277 }
18278 break;
18279 #endif
18280
18281 case BFD_RELOC_ARM_CP_OFF_IMM:
18282 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18283 if (value < -1023 || value > 1023 || (value & 3))
18284 as_bad_where (fixP->fx_file, fixP->fx_line,
18285 _("co-processor offset out of range"));
18286 cp_off_common:
18287 sign = value >= 0;
18288 if (value < 0)
18289 value = -value;
18290 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18291 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18292 newval = md_chars_to_number (buf, INSN_SIZE);
18293 else
18294 newval = get_thumb32_insn (buf);
18295 newval &= 0xff7fff00;
18296 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18297 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18298 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18299 md_number_to_chars (buf, newval, INSN_SIZE);
18300 else
18301 put_thumb32_insn (buf, newval);
18302 break;
18303
18304 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18305 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18306 if (value < -255 || value > 255)
18307 as_bad_where (fixP->fx_file, fixP->fx_line,
18308 _("co-processor offset out of range"));
18309 value *= 4;
18310 goto cp_off_common;
18311
18312 case BFD_RELOC_ARM_THUMB_OFFSET:
18313 newval = md_chars_to_number (buf, THUMB_SIZE);
18314 /* Exactly what ranges, and where the offset is inserted depends
18315 on the type of instruction, we can establish this from the
18316 top 4 bits. */
18317 switch (newval >> 12)
18318 {
18319 case 4: /* PC load. */
18320 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18321 forced to zero for these loads; md_pcrel_from has already
18322 compensated for this. */
18323 if (value & 3)
18324 as_bad_where (fixP->fx_file, fixP->fx_line,
18325 _("invalid offset, target not word aligned (0x%08lX)"),
18326 (((unsigned long) fixP->fx_frag->fr_address
18327 + (unsigned long) fixP->fx_where) & ~3)
18328 + (unsigned long) value);
18329
18330 if (value & ~0x3fc)
18331 as_bad_where (fixP->fx_file, fixP->fx_line,
18332 _("invalid offset, value too big (0x%08lX)"),
18333 (long) value);
18334
18335 newval |= value >> 2;
18336 break;
18337
18338 case 9: /* SP load/store. */
18339 if (value & ~0x3fc)
18340 as_bad_where (fixP->fx_file, fixP->fx_line,
18341 _("invalid offset, value too big (0x%08lX)"),
18342 (long) value);
18343 newval |= value >> 2;
18344 break;
18345
18346 case 6: /* Word load/store. */
18347 if (value & ~0x7c)
18348 as_bad_where (fixP->fx_file, fixP->fx_line,
18349 _("invalid offset, value too big (0x%08lX)"),
18350 (long) value);
18351 newval |= value << 4; /* 6 - 2. */
18352 break;
18353
18354 case 7: /* Byte load/store. */
18355 if (value & ~0x1f)
18356 as_bad_where (fixP->fx_file, fixP->fx_line,
18357 _("invalid offset, value too big (0x%08lX)"),
18358 (long) value);
18359 newval |= value << 6;
18360 break;
18361
18362 case 8: /* Halfword load/store. */
18363 if (value & ~0x3e)
18364 as_bad_where (fixP->fx_file, fixP->fx_line,
18365 _("invalid offset, value too big (0x%08lX)"),
18366 (long) value);
18367 newval |= value << 5; /* 6 - 1. */
18368 break;
18369
18370 default:
18371 as_bad_where (fixP->fx_file, fixP->fx_line,
18372 "Unable to process relocation for thumb opcode: %lx",
18373 (unsigned long) newval);
18374 break;
18375 }
18376 md_number_to_chars (buf, newval, THUMB_SIZE);
18377 break;
18378
18379 case BFD_RELOC_ARM_THUMB_ADD:
18380 /* This is a complicated relocation, since we use it for all of
18381 the following immediate relocations:
18382
18383 3bit ADD/SUB
18384 8bit ADD/SUB
18385 9bit ADD/SUB SP word-aligned
18386 10bit ADD PC/SP word-aligned
18387
18388 The type of instruction being processed is encoded in the
18389 instruction field:
18390
18391 0x8000 SUB
18392 0x00F0 Rd
18393 0x000F Rs
18394 */
18395 newval = md_chars_to_number (buf, THUMB_SIZE);
18396 {
18397 int rd = (newval >> 4) & 0xf;
18398 int rs = newval & 0xf;
18399 int subtract = !!(newval & 0x8000);
18400
18401 /* Check for HI regs, only very restricted cases allowed:
18402 Adjusting SP, and using PC or SP to get an address. */
18403 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18404 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18405 as_bad_where (fixP->fx_file, fixP->fx_line,
18406 _("invalid Hi register with immediate"));
18407
18408 /* If value is negative, choose the opposite instruction. */
18409 if (value < 0)
18410 {
18411 value = -value;
18412 subtract = !subtract;
18413 if (value < 0)
18414 as_bad_where (fixP->fx_file, fixP->fx_line,
18415 _("immediate value out of range"));
18416 }
18417
18418 if (rd == REG_SP)
18419 {
18420 if (value & ~0x1fc)
18421 as_bad_where (fixP->fx_file, fixP->fx_line,
18422 _("invalid immediate for stack address calculation"));
18423 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18424 newval |= value >> 2;
18425 }
18426 else if (rs == REG_PC || rs == REG_SP)
18427 {
18428 if (subtract || value & ~0x3fc)
18429 as_bad_where (fixP->fx_file, fixP->fx_line,
18430 _("invalid immediate for address calculation (value = 0x%08lX)"),
18431 (unsigned long) value);
18432 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18433 newval |= rd << 8;
18434 newval |= value >> 2;
18435 }
18436 else if (rs == rd)
18437 {
18438 if (value & ~0xff)
18439 as_bad_where (fixP->fx_file, fixP->fx_line,
18440 _("immediate value out of range"));
18441 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18442 newval |= (rd << 8) | value;
18443 }
18444 else
18445 {
18446 if (value & ~0x7)
18447 as_bad_where (fixP->fx_file, fixP->fx_line,
18448 _("immediate value out of range"));
18449 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18450 newval |= rd | (rs << 3) | (value << 6);
18451 }
18452 }
18453 md_number_to_chars (buf, newval, THUMB_SIZE);
18454 break;
18455
18456 case BFD_RELOC_ARM_THUMB_IMM:
18457 newval = md_chars_to_number (buf, THUMB_SIZE);
18458 if (value < 0 || value > 255)
18459 as_bad_where (fixP->fx_file, fixP->fx_line,
18460 _("invalid immediate: %ld is too large"),
18461 (long) value);
18462 newval |= value;
18463 md_number_to_chars (buf, newval, THUMB_SIZE);
18464 break;
18465
18466 case BFD_RELOC_ARM_THUMB_SHIFT:
18467 /* 5bit shift value (0..32). LSL cannot take 32. */
18468 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18469 temp = newval & 0xf800;
18470 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18471 as_bad_where (fixP->fx_file, fixP->fx_line,
18472 _("invalid shift value: %ld"), (long) value);
18473 /* Shifts of zero must be encoded as LSL. */
18474 if (value == 0)
18475 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18476 /* Shifts of 32 are encoded as zero. */
18477 else if (value == 32)
18478 value = 0;
18479 newval |= value << 6;
18480 md_number_to_chars (buf, newval, THUMB_SIZE);
18481 break;
18482
18483 case BFD_RELOC_VTABLE_INHERIT:
18484 case BFD_RELOC_VTABLE_ENTRY:
18485 fixP->fx_done = 0;
18486 return;
18487
18488 case BFD_RELOC_ARM_MOVW:
18489 case BFD_RELOC_ARM_MOVT:
18490 case BFD_RELOC_ARM_THUMB_MOVW:
18491 case BFD_RELOC_ARM_THUMB_MOVT:
18492 if (fixP->fx_done || !seg->use_rela_p)
18493 {
18494 /* REL format relocations are limited to a 16-bit addend. */
18495 if (!fixP->fx_done)
18496 {
18497 if (value < -0x1000 || value > 0xffff)
18498 as_bad_where (fixP->fx_file, fixP->fx_line,
18499 _("offset too big"));
18500 }
18501 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18502 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18503 {
18504 value >>= 16;
18505 }
18506
18507 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18508 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18509 {
18510 newval = get_thumb32_insn (buf);
18511 newval &= 0xfbf08f00;
18512 newval |= (value & 0xf000) << 4;
18513 newval |= (value & 0x0800) << 15;
18514 newval |= (value & 0x0700) << 4;
18515 newval |= (value & 0x00ff);
18516 put_thumb32_insn (buf, newval);
18517 }
18518 else
18519 {
18520 newval = md_chars_to_number (buf, 4);
18521 newval &= 0xfff0f000;
18522 newval |= value & 0x0fff;
18523 newval |= (value & 0xf000) << 4;
18524 md_number_to_chars (buf, newval, 4);
18525 }
18526 }
18527 return;
18528
18529 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18530 case BFD_RELOC_ARM_ALU_PC_G0:
18531 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18532 case BFD_RELOC_ARM_ALU_PC_G1:
18533 case BFD_RELOC_ARM_ALU_PC_G2:
18534 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18535 case BFD_RELOC_ARM_ALU_SB_G0:
18536 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18537 case BFD_RELOC_ARM_ALU_SB_G1:
18538 case BFD_RELOC_ARM_ALU_SB_G2:
18539 assert (!fixP->fx_done);
18540 if (!seg->use_rela_p)
18541 {
18542 bfd_vma insn;
18543 bfd_vma encoded_addend;
18544 bfd_vma addend_abs = abs (value);
18545
18546 /* Check that the absolute value of the addend can be
18547 expressed as an 8-bit constant plus a rotation. */
18548 encoded_addend = encode_arm_immediate (addend_abs);
18549 if (encoded_addend == (unsigned int) FAIL)
18550 as_bad_where (fixP->fx_file, fixP->fx_line,
18551 _("the offset 0x%08lX is not representable"),
18552 addend_abs);
18553
18554 /* Extract the instruction. */
18555 insn = md_chars_to_number (buf, INSN_SIZE);
18556
18557 /* If the addend is positive, use an ADD instruction.
18558 Otherwise use a SUB. Take care not to destroy the S bit. */
18559 insn &= 0xff1fffff;
18560 if (value < 0)
18561 insn |= 1 << 22;
18562 else
18563 insn |= 1 << 23;
18564
18565 /* Place the encoded addend into the first 12 bits of the
18566 instruction. */
18567 insn &= 0xfffff000;
18568 insn |= encoded_addend;
18569
18570 /* Update the instruction. */
18571 md_number_to_chars (buf, insn, INSN_SIZE);
18572 }
18573 break;
18574
18575 case BFD_RELOC_ARM_LDR_PC_G0:
18576 case BFD_RELOC_ARM_LDR_PC_G1:
18577 case BFD_RELOC_ARM_LDR_PC_G2:
18578 case BFD_RELOC_ARM_LDR_SB_G0:
18579 case BFD_RELOC_ARM_LDR_SB_G1:
18580 case BFD_RELOC_ARM_LDR_SB_G2:
18581 assert (!fixP->fx_done);
18582 if (!seg->use_rela_p)
18583 {
18584 bfd_vma insn;
18585 bfd_vma addend_abs = abs (value);
18586
18587 /* Check that the absolute value of the addend can be
18588 encoded in 12 bits. */
18589 if (addend_abs >= 0x1000)
18590 as_bad_where (fixP->fx_file, fixP->fx_line,
18591 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18592 addend_abs);
18593
18594 /* Extract the instruction. */
18595 insn = md_chars_to_number (buf, INSN_SIZE);
18596
18597 /* If the addend is negative, clear bit 23 of the instruction.
18598 Otherwise set it. */
18599 if (value < 0)
18600 insn &= ~(1 << 23);
18601 else
18602 insn |= 1 << 23;
18603
18604 /* Place the absolute value of the addend into the first 12 bits
18605 of the instruction. */
18606 insn &= 0xfffff000;
18607 insn |= addend_abs;
18608
18609 /* Update the instruction. */
18610 md_number_to_chars (buf, insn, INSN_SIZE);
18611 }
18612 break;
18613
18614 case BFD_RELOC_ARM_LDRS_PC_G0:
18615 case BFD_RELOC_ARM_LDRS_PC_G1:
18616 case BFD_RELOC_ARM_LDRS_PC_G2:
18617 case BFD_RELOC_ARM_LDRS_SB_G0:
18618 case BFD_RELOC_ARM_LDRS_SB_G1:
18619 case BFD_RELOC_ARM_LDRS_SB_G2:
18620 assert (!fixP->fx_done);
18621 if (!seg->use_rela_p)
18622 {
18623 bfd_vma insn;
18624 bfd_vma addend_abs = abs (value);
18625
18626 /* Check that the absolute value of the addend can be
18627 encoded in 8 bits. */
18628 if (addend_abs >= 0x100)
18629 as_bad_where (fixP->fx_file, fixP->fx_line,
18630 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18631 addend_abs);
18632
18633 /* Extract the instruction. */
18634 insn = md_chars_to_number (buf, INSN_SIZE);
18635
18636 /* If the addend is negative, clear bit 23 of the instruction.
18637 Otherwise set it. */
18638 if (value < 0)
18639 insn &= ~(1 << 23);
18640 else
18641 insn |= 1 << 23;
18642
18643 /* Place the first four bits of the absolute value of the addend
18644 into the first 4 bits of the instruction, and the remaining
18645 four into bits 8 .. 11. */
18646 insn &= 0xfffff0f0;
18647 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18648
18649 /* Update the instruction. */
18650 md_number_to_chars (buf, insn, INSN_SIZE);
18651 }
18652 break;
18653
18654 case BFD_RELOC_ARM_LDC_PC_G0:
18655 case BFD_RELOC_ARM_LDC_PC_G1:
18656 case BFD_RELOC_ARM_LDC_PC_G2:
18657 case BFD_RELOC_ARM_LDC_SB_G0:
18658 case BFD_RELOC_ARM_LDC_SB_G1:
18659 case BFD_RELOC_ARM_LDC_SB_G2:
18660 assert (!fixP->fx_done);
18661 if (!seg->use_rela_p)
18662 {
18663 bfd_vma insn;
18664 bfd_vma addend_abs = abs (value);
18665
18666 /* Check that the absolute value of the addend is a multiple of
18667 four and, when divided by four, fits in 8 bits. */
18668 if (addend_abs & 0x3)
18669 as_bad_where (fixP->fx_file, fixP->fx_line,
18670 _("bad offset 0x%08lX (must be word-aligned)"),
18671 addend_abs);
18672
18673 if ((addend_abs >> 2) > 0xff)
18674 as_bad_where (fixP->fx_file, fixP->fx_line,
18675 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18676 addend_abs);
18677
18678 /* Extract the instruction. */
18679 insn = md_chars_to_number (buf, INSN_SIZE);
18680
18681 /* If the addend is negative, clear bit 23 of the instruction.
18682 Otherwise set it. */
18683 if (value < 0)
18684 insn &= ~(1 << 23);
18685 else
18686 insn |= 1 << 23;
18687
18688 /* Place the addend (divided by four) into the first eight
18689 bits of the instruction. */
18690 insn &= 0xfffffff0;
18691 insn |= addend_abs >> 2;
18692
18693 /* Update the instruction. */
18694 md_number_to_chars (buf, insn, INSN_SIZE);
18695 }
18696 break;
18697
18698 case BFD_RELOC_UNUSED:
18699 default:
18700 as_bad_where (fixP->fx_file, fixP->fx_line,
18701 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18702 }
18703 }
18704
18705 /* Translate internal representation of relocation info to BFD target
18706 format. */
18707
18708 arelent *
18709 tc_gen_reloc (asection *section, fixS *fixp)
18710 {
18711 arelent * reloc;
18712 bfd_reloc_code_real_type code;
18713
18714 reloc = xmalloc (sizeof (arelent));
18715
18716 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18717 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18718 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18719
18720 if (fixp->fx_pcrel)
18721 {
18722 if (section->use_rela_p)
18723 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18724 else
18725 fixp->fx_offset = reloc->address;
18726 }
18727 reloc->addend = fixp->fx_offset;
18728
18729 switch (fixp->fx_r_type)
18730 {
18731 case BFD_RELOC_8:
18732 if (fixp->fx_pcrel)
18733 {
18734 code = BFD_RELOC_8_PCREL;
18735 break;
18736 }
18737
18738 case BFD_RELOC_16:
18739 if (fixp->fx_pcrel)
18740 {
18741 code = BFD_RELOC_16_PCREL;
18742 break;
18743 }
18744
18745 case BFD_RELOC_32:
18746 if (fixp->fx_pcrel)
18747 {
18748 code = BFD_RELOC_32_PCREL;
18749 break;
18750 }
18751
18752 case BFD_RELOC_ARM_MOVW:
18753 if (fixp->fx_pcrel)
18754 {
18755 code = BFD_RELOC_ARM_MOVW_PCREL;
18756 break;
18757 }
18758
18759 case BFD_RELOC_ARM_MOVT:
18760 if (fixp->fx_pcrel)
18761 {
18762 code = BFD_RELOC_ARM_MOVT_PCREL;
18763 break;
18764 }
18765
18766 case BFD_RELOC_ARM_THUMB_MOVW:
18767 if (fixp->fx_pcrel)
18768 {
18769 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18770 break;
18771 }
18772
18773 case BFD_RELOC_ARM_THUMB_MOVT:
18774 if (fixp->fx_pcrel)
18775 {
18776 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18777 break;
18778 }
18779
18780 case BFD_RELOC_NONE:
18781 case BFD_RELOC_ARM_PCREL_BRANCH:
18782 case BFD_RELOC_ARM_PCREL_BLX:
18783 case BFD_RELOC_RVA:
18784 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18785 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18786 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18787 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18788 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18789 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18790 case BFD_RELOC_THUMB_PCREL_BLX:
18791 case BFD_RELOC_VTABLE_ENTRY:
18792 case BFD_RELOC_VTABLE_INHERIT:
18793 #ifdef TE_PE
18794 case BFD_RELOC_32_SECREL:
18795 #endif
18796 code = fixp->fx_r_type;
18797 break;
18798
18799 case BFD_RELOC_ARM_LITERAL:
18800 case BFD_RELOC_ARM_HWLITERAL:
18801 /* If this is called then the a literal has
18802 been referenced across a section boundary. */
18803 as_bad_where (fixp->fx_file, fixp->fx_line,
18804 _("literal referenced across section boundary"));
18805 return NULL;
18806
18807 #ifdef OBJ_ELF
18808 case BFD_RELOC_ARM_GOT32:
18809 case BFD_RELOC_ARM_GOTOFF:
18810 case BFD_RELOC_ARM_PLT32:
18811 case BFD_RELOC_ARM_TARGET1:
18812 case BFD_RELOC_ARM_ROSEGREL32:
18813 case BFD_RELOC_ARM_SBREL32:
18814 case BFD_RELOC_ARM_PREL31:
18815 case BFD_RELOC_ARM_TARGET2:
18816 case BFD_RELOC_ARM_TLS_LE32:
18817 case BFD_RELOC_ARM_TLS_LDO32:
18818 case BFD_RELOC_ARM_PCREL_CALL:
18819 case BFD_RELOC_ARM_PCREL_JUMP:
18820 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18821 case BFD_RELOC_ARM_ALU_PC_G0:
18822 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18823 case BFD_RELOC_ARM_ALU_PC_G1:
18824 case BFD_RELOC_ARM_ALU_PC_G2:
18825 case BFD_RELOC_ARM_LDR_PC_G0:
18826 case BFD_RELOC_ARM_LDR_PC_G1:
18827 case BFD_RELOC_ARM_LDR_PC_G2:
18828 case BFD_RELOC_ARM_LDRS_PC_G0:
18829 case BFD_RELOC_ARM_LDRS_PC_G1:
18830 case BFD_RELOC_ARM_LDRS_PC_G2:
18831 case BFD_RELOC_ARM_LDC_PC_G0:
18832 case BFD_RELOC_ARM_LDC_PC_G1:
18833 case BFD_RELOC_ARM_LDC_PC_G2:
18834 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18835 case BFD_RELOC_ARM_ALU_SB_G0:
18836 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18837 case BFD_RELOC_ARM_ALU_SB_G1:
18838 case BFD_RELOC_ARM_ALU_SB_G2:
18839 case BFD_RELOC_ARM_LDR_SB_G0:
18840 case BFD_RELOC_ARM_LDR_SB_G1:
18841 case BFD_RELOC_ARM_LDR_SB_G2:
18842 case BFD_RELOC_ARM_LDRS_SB_G0:
18843 case BFD_RELOC_ARM_LDRS_SB_G1:
18844 case BFD_RELOC_ARM_LDRS_SB_G2:
18845 case BFD_RELOC_ARM_LDC_SB_G0:
18846 case BFD_RELOC_ARM_LDC_SB_G1:
18847 case BFD_RELOC_ARM_LDC_SB_G2:
18848 code = fixp->fx_r_type;
18849 break;
18850
18851 case BFD_RELOC_ARM_TLS_GD32:
18852 case BFD_RELOC_ARM_TLS_IE32:
18853 case BFD_RELOC_ARM_TLS_LDM32:
18854 /* BFD will include the symbol's address in the addend.
18855 But we don't want that, so subtract it out again here. */
18856 if (!S_IS_COMMON (fixp->fx_addsy))
18857 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18858 code = fixp->fx_r_type;
18859 break;
18860 #endif
18861
18862 case BFD_RELOC_ARM_IMMEDIATE:
18863 as_bad_where (fixp->fx_file, fixp->fx_line,
18864 _("internal relocation (type: IMMEDIATE) not fixed up"));
18865 return NULL;
18866
18867 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18868 as_bad_where (fixp->fx_file, fixp->fx_line,
18869 _("ADRL used for a symbol not defined in the same file"));
18870 return NULL;
18871
18872 case BFD_RELOC_ARM_OFFSET_IMM:
18873 if (section->use_rela_p)
18874 {
18875 code = fixp->fx_r_type;
18876 break;
18877 }
18878
18879 if (fixp->fx_addsy != NULL
18880 && !S_IS_DEFINED (fixp->fx_addsy)
18881 && S_IS_LOCAL (fixp->fx_addsy))
18882 {
18883 as_bad_where (fixp->fx_file, fixp->fx_line,
18884 _("undefined local label `%s'"),
18885 S_GET_NAME (fixp->fx_addsy));
18886 return NULL;
18887 }
18888
18889 as_bad_where (fixp->fx_file, fixp->fx_line,
18890 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18891 return NULL;
18892
18893 default:
18894 {
18895 char * type;
18896
18897 switch (fixp->fx_r_type)
18898 {
18899 case BFD_RELOC_NONE: type = "NONE"; break;
18900 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18901 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18902 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18903 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18904 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18905 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18906 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18907 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18908 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18909 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18910 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18911 default: type = _("<unknown>"); break;
18912 }
18913 as_bad_where (fixp->fx_file, fixp->fx_line,
18914 _("cannot represent %s relocation in this object file format"),
18915 type);
18916 return NULL;
18917 }
18918 }
18919
18920 #ifdef OBJ_ELF
18921 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18922 && GOT_symbol
18923 && fixp->fx_addsy == GOT_symbol)
18924 {
18925 code = BFD_RELOC_ARM_GOTPC;
18926 reloc->addend = fixp->fx_offset = reloc->address;
18927 }
18928 #endif
18929
18930 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18931
18932 if (reloc->howto == NULL)
18933 {
18934 as_bad_where (fixp->fx_file, fixp->fx_line,
18935 _("cannot represent %s relocation in this object file format"),
18936 bfd_get_reloc_code_name (code));
18937 return NULL;
18938 }
18939
18940 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18941 vtable entry to be used in the relocation's section offset. */
18942 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18943 reloc->address = fixp->fx_offset;
18944
18945 return reloc;
18946 }
18947
18948 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18949
18950 void
18951 cons_fix_new_arm (fragS * frag,
18952 int where,
18953 int size,
18954 expressionS * exp)
18955 {
18956 bfd_reloc_code_real_type type;
18957 int pcrel = 0;
18958
18959 /* Pick a reloc.
18960 FIXME: @@ Should look at CPU word size. */
18961 switch (size)
18962 {
18963 case 1:
18964 type = BFD_RELOC_8;
18965 break;
18966 case 2:
18967 type = BFD_RELOC_16;
18968 break;
18969 case 4:
18970 default:
18971 type = BFD_RELOC_32;
18972 break;
18973 case 8:
18974 type = BFD_RELOC_64;
18975 break;
18976 }
18977
18978 #ifdef TE_PE
18979 if (exp->X_op == O_secrel)
18980 {
18981 exp->X_op = O_symbol;
18982 type = BFD_RELOC_32_SECREL;
18983 }
18984 #endif
18985
18986 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
18987 }
18988
18989 #if defined OBJ_COFF || defined OBJ_ELF
18990 void
18991 arm_validate_fix (fixS * fixP)
18992 {
18993 /* If the destination of the branch is a defined symbol which does not have
18994 the THUMB_FUNC attribute, then we must be calling a function which has
18995 the (interfacearm) attribute. We look for the Thumb entry point to that
18996 function and change the branch to refer to that function instead. */
18997 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
18998 && fixP->fx_addsy != NULL
18999 && S_IS_DEFINED (fixP->fx_addsy)
19000 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19001 {
19002 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19003 }
19004 }
19005 #endif
19006
19007 int
19008 arm_force_relocation (struct fix * fixp)
19009 {
19010 #if defined (OBJ_COFF) && defined (TE_PE)
19011 if (fixp->fx_r_type == BFD_RELOC_RVA)
19012 return 1;
19013 #endif
19014
19015 /* Resolve these relocations even if the symbol is extern or weak. */
19016 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19017 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19018 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19019 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19020 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19021 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19022 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19023 return 0;
19024
19025 /* Always leave these relocations for the linker. */
19026 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19027 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19028 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19029 return 1;
19030
19031 /* Always generate relocations against function symbols. */
19032 if (fixp->fx_r_type == BFD_RELOC_32
19033 && fixp->fx_addsy
19034 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19035 return 1;
19036
19037 return generic_force_reloc (fixp);
19038 }
19039
19040 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19041 /* Relocations against function names must be left unadjusted,
19042 so that the linker can use this information to generate interworking
19043 stubs. The MIPS version of this function
19044 also prevents relocations that are mips-16 specific, but I do not
19045 know why it does this.
19046
19047 FIXME:
19048 There is one other problem that ought to be addressed here, but
19049 which currently is not: Taking the address of a label (rather
19050 than a function) and then later jumping to that address. Such
19051 addresses also ought to have their bottom bit set (assuming that
19052 they reside in Thumb code), but at the moment they will not. */
19053
19054 bfd_boolean
19055 arm_fix_adjustable (fixS * fixP)
19056 {
19057 if (fixP->fx_addsy == NULL)
19058 return 1;
19059
19060 /* Preserve relocations against symbols with function type. */
19061 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19062 return 0;
19063
19064 if (THUMB_IS_FUNC (fixP->fx_addsy)
19065 && fixP->fx_subsy == NULL)
19066 return 0;
19067
19068 /* We need the symbol name for the VTABLE entries. */
19069 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19070 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19071 return 0;
19072
19073 /* Don't allow symbols to be discarded on GOT related relocs. */
19074 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19075 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19076 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19077 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19078 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19079 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19080 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19081 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19082 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19083 return 0;
19084
19085 /* Similarly for group relocations. */
19086 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19087 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19088 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19089 return 0;
19090
19091 return 1;
19092 }
19093 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19094
19095 #ifdef OBJ_ELF
19096
19097 const char *
19098 elf32_arm_target_format (void)
19099 {
19100 #ifdef TE_SYMBIAN
19101 return (target_big_endian
19102 ? "elf32-bigarm-symbian"
19103 : "elf32-littlearm-symbian");
19104 #elif defined (TE_VXWORKS)
19105 return (target_big_endian
19106 ? "elf32-bigarm-vxworks"
19107 : "elf32-littlearm-vxworks");
19108 #else
19109 if (target_big_endian)
19110 return "elf32-bigarm";
19111 else
19112 return "elf32-littlearm";
19113 #endif
19114 }
19115
19116 void
19117 armelf_frob_symbol (symbolS * symp,
19118 int * puntp)
19119 {
19120 elf_frob_symbol (symp, puntp);
19121 }
19122 #endif
19123
19124 /* MD interface: Finalization. */
19125
19126 /* A good place to do this, although this was probably not intended
19127 for this kind of use. We need to dump the literal pool before
19128 references are made to a null symbol pointer. */
19129
19130 void
19131 arm_cleanup (void)
19132 {
19133 literal_pool * pool;
19134
19135 for (pool = list_of_pools; pool; pool = pool->next)
19136 {
19137 /* Put it at the end of the relevent section. */
19138 subseg_set (pool->section, pool->sub_section);
19139 #ifdef OBJ_ELF
19140 arm_elf_change_section ();
19141 #endif
19142 s_ltorg (0);
19143 }
19144 }
19145
19146 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19147 ARM ones. */
19148
19149 void
19150 arm_adjust_symtab (void)
19151 {
19152 #ifdef OBJ_COFF
19153 symbolS * sym;
19154
19155 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19156 {
19157 if (ARM_IS_THUMB (sym))
19158 {
19159 if (THUMB_IS_FUNC (sym))
19160 {
19161 /* Mark the symbol as a Thumb function. */
19162 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19163 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19164 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19165
19166 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19167 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19168 else
19169 as_bad (_("%s: unexpected function type: %d"),
19170 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19171 }
19172 else switch (S_GET_STORAGE_CLASS (sym))
19173 {
19174 case C_EXT:
19175 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19176 break;
19177 case C_STAT:
19178 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19179 break;
19180 case C_LABEL:
19181 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19182 break;
19183 default:
19184 /* Do nothing. */
19185 break;
19186 }
19187 }
19188
19189 if (ARM_IS_INTERWORK (sym))
19190 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19191 }
19192 #endif
19193 #ifdef OBJ_ELF
19194 symbolS * sym;
19195 char bind;
19196
19197 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19198 {
19199 if (ARM_IS_THUMB (sym))
19200 {
19201 elf_symbol_type * elf_sym;
19202
19203 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19204 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19205
19206 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19207 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19208 {
19209 /* If it's a .thumb_func, declare it as so,
19210 otherwise tag label as .code 16. */
19211 if (THUMB_IS_FUNC (sym))
19212 elf_sym->internal_elf_sym.st_info =
19213 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19214 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19215 elf_sym->internal_elf_sym.st_info =
19216 ELF_ST_INFO (bind, STT_ARM_16BIT);
19217 }
19218 }
19219 }
19220 #endif
19221 }
19222
19223 /* MD interface: Initialization. */
19224
19225 static void
19226 set_constant_flonums (void)
19227 {
19228 int i;
19229
19230 for (i = 0; i < NUM_FLOAT_VALS; i++)
19231 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19232 abort ();
19233 }
19234
19235 /* Auto-select Thumb mode if it's the only available instruction set for the
19236 given architecture. */
19237
19238 static void
19239 autoselect_thumb_from_cpu_variant (void)
19240 {
19241 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19242 opcode_select (16);
19243 }
19244
19245 void
19246 md_begin (void)
19247 {
19248 unsigned mach;
19249 unsigned int i;
19250
19251 if ( (arm_ops_hsh = hash_new ()) == NULL
19252 || (arm_cond_hsh = hash_new ()) == NULL
19253 || (arm_shift_hsh = hash_new ()) == NULL
19254 || (arm_psr_hsh = hash_new ()) == NULL
19255 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19256 || (arm_reg_hsh = hash_new ()) == NULL
19257 || (arm_reloc_hsh = hash_new ()) == NULL
19258 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19259 as_fatal (_("virtual memory exhausted"));
19260
19261 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19262 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19263 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19264 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19265 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19266 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19267 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19268 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19269 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19270 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19271 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19272 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19273 for (i = 0;
19274 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19275 i++)
19276 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19277 (PTR) (barrier_opt_names + i));
19278 #ifdef OBJ_ELF
19279 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19280 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19281 #endif
19282
19283 set_constant_flonums ();
19284
19285 /* Set the cpu variant based on the command-line options. We prefer
19286 -mcpu= over -march= if both are set (as for GCC); and we prefer
19287 -mfpu= over any other way of setting the floating point unit.
19288 Use of legacy options with new options are faulted. */
19289 if (legacy_cpu)
19290 {
19291 if (mcpu_cpu_opt || march_cpu_opt)
19292 as_bad (_("use of old and new-style options to set CPU type"));
19293
19294 mcpu_cpu_opt = legacy_cpu;
19295 }
19296 else if (!mcpu_cpu_opt)
19297 mcpu_cpu_opt = march_cpu_opt;
19298
19299 if (legacy_fpu)
19300 {
19301 if (mfpu_opt)
19302 as_bad (_("use of old and new-style options to set FPU type"));
19303
19304 mfpu_opt = legacy_fpu;
19305 }
19306 else if (!mfpu_opt)
19307 {
19308 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19309 /* Some environments specify a default FPU. If they don't, infer it
19310 from the processor. */
19311 if (mcpu_fpu_opt)
19312 mfpu_opt = mcpu_fpu_opt;
19313 else
19314 mfpu_opt = march_fpu_opt;
19315 #else
19316 mfpu_opt = &fpu_default;
19317 #endif
19318 }
19319
19320 if (!mfpu_opt)
19321 {
19322 if (mcpu_cpu_opt != NULL)
19323 mfpu_opt = &fpu_default;
19324 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19325 mfpu_opt = &fpu_arch_vfp_v2;
19326 else
19327 mfpu_opt = &fpu_arch_fpa;
19328 }
19329
19330 #ifdef CPU_DEFAULT
19331 if (!mcpu_cpu_opt)
19332 {
19333 mcpu_cpu_opt = &cpu_default;
19334 selected_cpu = cpu_default;
19335 }
19336 #else
19337 if (mcpu_cpu_opt)
19338 selected_cpu = *mcpu_cpu_opt;
19339 else
19340 mcpu_cpu_opt = &arm_arch_any;
19341 #endif
19342
19343 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19344
19345 autoselect_thumb_from_cpu_variant ();
19346
19347 arm_arch_used = thumb_arch_used = arm_arch_none;
19348
19349 #if defined OBJ_COFF || defined OBJ_ELF
19350 {
19351 unsigned int flags = 0;
19352
19353 #if defined OBJ_ELF
19354 flags = meabi_flags;
19355
19356 switch (meabi_flags)
19357 {
19358 case EF_ARM_EABI_UNKNOWN:
19359 #endif
19360 /* Set the flags in the private structure. */
19361 if (uses_apcs_26) flags |= F_APCS26;
19362 if (support_interwork) flags |= F_INTERWORK;
19363 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19364 if (pic_code) flags |= F_PIC;
19365 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19366 flags |= F_SOFT_FLOAT;
19367
19368 switch (mfloat_abi_opt)
19369 {
19370 case ARM_FLOAT_ABI_SOFT:
19371 case ARM_FLOAT_ABI_SOFTFP:
19372 flags |= F_SOFT_FLOAT;
19373 break;
19374
19375 case ARM_FLOAT_ABI_HARD:
19376 if (flags & F_SOFT_FLOAT)
19377 as_bad (_("hard-float conflicts with specified fpu"));
19378 break;
19379 }
19380
19381 /* Using pure-endian doubles (even if soft-float). */
19382 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19383 flags |= F_VFP_FLOAT;
19384
19385 #if defined OBJ_ELF
19386 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19387 flags |= EF_ARM_MAVERICK_FLOAT;
19388 break;
19389
19390 case EF_ARM_EABI_VER4:
19391 case EF_ARM_EABI_VER5:
19392 /* No additional flags to set. */
19393 break;
19394
19395 default:
19396 abort ();
19397 }
19398 #endif
19399 bfd_set_private_flags (stdoutput, flags);
19400
19401 /* We have run out flags in the COFF header to encode the
19402 status of ATPCS support, so instead we create a dummy,
19403 empty, debug section called .arm.atpcs. */
19404 if (atpcs)
19405 {
19406 asection * sec;
19407
19408 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19409
19410 if (sec != NULL)
19411 {
19412 bfd_set_section_flags
19413 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19414 bfd_set_section_size (stdoutput, sec, 0);
19415 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19416 }
19417 }
19418 }
19419 #endif
19420
19421 /* Record the CPU type as well. */
19422 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19423 mach = bfd_mach_arm_iWMMXt2;
19424 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19425 mach = bfd_mach_arm_iWMMXt;
19426 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19427 mach = bfd_mach_arm_XScale;
19428 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19429 mach = bfd_mach_arm_ep9312;
19430 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19431 mach = bfd_mach_arm_5TE;
19432 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19433 {
19434 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19435 mach = bfd_mach_arm_5T;
19436 else
19437 mach = bfd_mach_arm_5;
19438 }
19439 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19440 {
19441 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19442 mach = bfd_mach_arm_4T;
19443 else
19444 mach = bfd_mach_arm_4;
19445 }
19446 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19447 mach = bfd_mach_arm_3M;
19448 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19449 mach = bfd_mach_arm_3;
19450 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19451 mach = bfd_mach_arm_2a;
19452 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19453 mach = bfd_mach_arm_2;
19454 else
19455 mach = bfd_mach_arm_unknown;
19456
19457 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19458 }
19459
19460 /* Command line processing. */
19461
19462 /* md_parse_option
19463 Invocation line includes a switch not recognized by the base assembler.
19464 See if it's a processor-specific option.
19465
19466 This routine is somewhat complicated by the need for backwards
19467 compatibility (since older releases of gcc can't be changed).
19468 The new options try to make the interface as compatible as
19469 possible with GCC.
19470
19471 New options (supported) are:
19472
19473 -mcpu=<cpu name> Assemble for selected processor
19474 -march=<architecture name> Assemble for selected architecture
19475 -mfpu=<fpu architecture> Assemble for selected FPU.
19476 -EB/-mbig-endian Big-endian
19477 -EL/-mlittle-endian Little-endian
19478 -k Generate PIC code
19479 -mthumb Start in Thumb mode
19480 -mthumb-interwork Code supports ARM/Thumb interworking
19481
19482 For now we will also provide support for:
19483
19484 -mapcs-32 32-bit Program counter
19485 -mapcs-26 26-bit Program counter
19486 -macps-float Floats passed in FP registers
19487 -mapcs-reentrant Reentrant code
19488 -matpcs
19489 (sometime these will probably be replaced with -mapcs=<list of options>
19490 and -matpcs=<list of options>)
19491
19492 The remaining options are only supported for back-wards compatibility.
19493 Cpu variants, the arm part is optional:
19494 -m[arm]1 Currently not supported.
19495 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19496 -m[arm]3 Arm 3 processor
19497 -m[arm]6[xx], Arm 6 processors
19498 -m[arm]7[xx][t][[d]m] Arm 7 processors
19499 -m[arm]8[10] Arm 8 processors
19500 -m[arm]9[20][tdmi] Arm 9 processors
19501 -mstrongarm[110[0]] StrongARM processors
19502 -mxscale XScale processors
19503 -m[arm]v[2345[t[e]]] Arm architectures
19504 -mall All (except the ARM1)
19505 FP variants:
19506 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19507 -mfpe-old (No float load/store multiples)
19508 -mvfpxd VFP Single precision
19509 -mvfp All VFP
19510 -mno-fpu Disable all floating point instructions
19511
19512 The following CPU names are recognized:
19513 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19514 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19515 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19516 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19517 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19518 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19519 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19520
19521 */
19522
19523 const char * md_shortopts = "m:k";
19524
19525 #ifdef ARM_BI_ENDIAN
19526 #define OPTION_EB (OPTION_MD_BASE + 0)
19527 #define OPTION_EL (OPTION_MD_BASE + 1)
19528 #else
19529 #if TARGET_BYTES_BIG_ENDIAN
19530 #define OPTION_EB (OPTION_MD_BASE + 0)
19531 #else
19532 #define OPTION_EL (OPTION_MD_BASE + 1)
19533 #endif
19534 #endif
19535
19536 struct option md_longopts[] =
19537 {
19538 #ifdef OPTION_EB
19539 {"EB", no_argument, NULL, OPTION_EB},
19540 #endif
19541 #ifdef OPTION_EL
19542 {"EL", no_argument, NULL, OPTION_EL},
19543 #endif
19544 {NULL, no_argument, NULL, 0}
19545 };
19546
19547 size_t md_longopts_size = sizeof (md_longopts);
19548
19549 struct arm_option_table
19550 {
19551 char *option; /* Option name to match. */
19552 char *help; /* Help information. */
19553 int *var; /* Variable to change. */
19554 int value; /* What to change it to. */
19555 char *deprecated; /* If non-null, print this message. */
19556 };
19557
19558 struct arm_option_table arm_opts[] =
19559 {
19560 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19561 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19562 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19563 &support_interwork, 1, NULL},
19564 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19565 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19566 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19567 1, NULL},
19568 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19569 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19570 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19571 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19572 NULL},
19573
19574 /* These are recognized by the assembler, but have no affect on code. */
19575 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19576 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19577 {NULL, NULL, NULL, 0, NULL}
19578 };
19579
19580 struct arm_legacy_option_table
19581 {
19582 char *option; /* Option name to match. */
19583 const arm_feature_set **var; /* Variable to change. */
19584 const arm_feature_set value; /* What to change it to. */
19585 char *deprecated; /* If non-null, print this message. */
19586 };
19587
19588 const struct arm_legacy_option_table arm_legacy_opts[] =
19589 {
19590 /* DON'T add any new processors to this list -- we want the whole list
19591 to go away... Add them to the processors table instead. */
19592 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19593 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19594 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19595 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19596 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19597 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19598 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19599 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19600 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19601 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19602 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19603 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19604 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19605 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19606 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19607 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19608 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19609 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19610 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19611 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19612 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19613 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19614 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19615 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19616 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19617 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19618 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19619 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19620 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19621 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19622 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19623 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19624 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19625 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19626 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19627 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19628 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19629 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19630 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19631 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19632 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19633 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19634 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19635 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19636 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19637 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19638 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19639 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19640 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19641 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19642 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19643 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19644 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19645 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19646 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19647 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19648 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19649 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19650 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19651 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19652 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19653 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19654 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19655 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19656 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19657 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19658 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19659 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19660 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19661 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19662 N_("use -mcpu=strongarm110")},
19663 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19664 N_("use -mcpu=strongarm1100")},
19665 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19666 N_("use -mcpu=strongarm1110")},
19667 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19668 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19669 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19670
19671 /* Architecture variants -- don't add any more to this list either. */
19672 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19673 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19674 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19675 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19676 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19677 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19678 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19679 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19680 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19681 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19682 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19683 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19684 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19685 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19686 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19687 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19688 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19689 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19690
19691 /* Floating point variants -- don't add any more to this list either. */
19692 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19693 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19694 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19695 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19696 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19697
19698 {NULL, NULL, ARM_ARCH_NONE, NULL}
19699 };
19700
19701 struct arm_cpu_option_table
19702 {
19703 char *name;
19704 const arm_feature_set value;
19705 /* For some CPUs we assume an FPU unless the user explicitly sets
19706 -mfpu=... */
19707 const arm_feature_set default_fpu;
19708 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19709 case. */
19710 const char *canonical_name;
19711 };
19712
19713 /* This list should, at a minimum, contain all the cpu names
19714 recognized by GCC. */
19715 static const struct arm_cpu_option_table arm_cpus[] =
19716 {
19717 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19718 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19719 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19720 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19721 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19722 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19723 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19724 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19725 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19726 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19727 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19728 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19729 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19730 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19731 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19732 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19733 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19734 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19735 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19736 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19737 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19738 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19739 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19740 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19741 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19742 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19743 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19744 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19745 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19746 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19747 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19748 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19749 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19750 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19751 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19752 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19753 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19754 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19755 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19756 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19757 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19758 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19759 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19760 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19761 /* For V5 or later processors we default to using VFP; but the user
19762 should really set the FPU type explicitly. */
19763 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19764 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19765 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19766 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19767 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19768 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19769 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19770 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19771 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19772 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19773 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19774 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19775 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19776 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19777 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19778 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19779 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19780 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19781 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19782 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19783 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19784 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19785 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19786 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19787 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19788 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19789 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19790 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19791 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19792 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19793 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19794 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19795 | FPU_NEON_EXT_V1),
19796 NULL},
19797 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19798 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19799 /* ??? XSCALE is really an architecture. */
19800 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19801 /* ??? iwmmxt is not a processor. */
19802 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19803 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19804 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19805 /* Maverick */
19806 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19807 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19808 };
19809
19810 struct arm_arch_option_table
19811 {
19812 char *name;
19813 const arm_feature_set value;
19814 const arm_feature_set default_fpu;
19815 };
19816
19817 /* This list should, at a minimum, contain all the architecture names
19818 recognized by GCC. */
19819 static const struct arm_arch_option_table arm_archs[] =
19820 {
19821 {"all", ARM_ANY, FPU_ARCH_FPA},
19822 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19823 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19824 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19825 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19826 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19827 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19828 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19829 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19830 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19831 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19832 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19833 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19834 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19835 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19836 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19837 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19838 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19839 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19840 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19841 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19842 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19843 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19844 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19845 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19846 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19847 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19848 /* The official spelling of the ARMv7 profile variants is the dashed form.
19849 Accept the non-dashed form for compatibility with old toolchains. */
19850 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19851 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19852 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19853 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19854 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19855 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19856 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19857 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19858 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19859 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19860 };
19861
19862 /* ISA extensions in the co-processor space. */
19863 struct arm_option_cpu_value_table
19864 {
19865 char *name;
19866 const arm_feature_set value;
19867 };
19868
19869 static const struct arm_option_cpu_value_table arm_extensions[] =
19870 {
19871 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
19872 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
19873 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
19874 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
19875 {NULL, ARM_ARCH_NONE}
19876 };
19877
19878 /* This list should, at a minimum, contain all the fpu names
19879 recognized by GCC. */
19880 static const struct arm_option_cpu_value_table arm_fpus[] =
19881 {
19882 {"softfpa", FPU_NONE},
19883 {"fpe", FPU_ARCH_FPE},
19884 {"fpe2", FPU_ARCH_FPE},
19885 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
19886 {"fpa", FPU_ARCH_FPA},
19887 {"fpa10", FPU_ARCH_FPA},
19888 {"fpa11", FPU_ARCH_FPA},
19889 {"arm7500fe", FPU_ARCH_FPA},
19890 {"softvfp", FPU_ARCH_VFP},
19891 {"softvfp+vfp", FPU_ARCH_VFP_V2},
19892 {"vfp", FPU_ARCH_VFP_V2},
19893 {"vfp9", FPU_ARCH_VFP_V2},
19894 {"vfp3", FPU_ARCH_VFP_V3},
19895 {"vfp10", FPU_ARCH_VFP_V2},
19896 {"vfp10-r0", FPU_ARCH_VFP_V1},
19897 {"vfpxd", FPU_ARCH_VFP_V1xD},
19898 {"arm1020t", FPU_ARCH_VFP_V1},
19899 {"arm1020e", FPU_ARCH_VFP_V2},
19900 {"arm1136jfs", FPU_ARCH_VFP_V2},
19901 {"arm1136jf-s", FPU_ARCH_VFP_V2},
19902 {"maverick", FPU_ARCH_MAVERICK},
19903 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
19904 {NULL, ARM_ARCH_NONE}
19905 };
19906
19907 struct arm_option_value_table
19908 {
19909 char *name;
19910 long value;
19911 };
19912
19913 static const struct arm_option_value_table arm_float_abis[] =
19914 {
19915 {"hard", ARM_FLOAT_ABI_HARD},
19916 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19917 {"soft", ARM_FLOAT_ABI_SOFT},
19918 {NULL, 0}
19919 };
19920
19921 #ifdef OBJ_ELF
19922 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19923 static const struct arm_option_value_table arm_eabis[] =
19924 {
19925 {"gnu", EF_ARM_EABI_UNKNOWN},
19926 {"4", EF_ARM_EABI_VER4},
19927 {"5", EF_ARM_EABI_VER5},
19928 {NULL, 0}
19929 };
19930 #endif
19931
19932 struct arm_long_option_table
19933 {
19934 char * option; /* Substring to match. */
19935 char * help; /* Help information. */
19936 int (* func) (char * subopt); /* Function to decode sub-option. */
19937 char * deprecated; /* If non-null, print this message. */
19938 };
19939
19940 static int
19941 arm_parse_extension (char * str, const arm_feature_set **opt_p)
19942 {
19943 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
19944
19945 /* Copy the feature set, so that we can modify it. */
19946 *ext_set = **opt_p;
19947 *opt_p = ext_set;
19948
19949 while (str != NULL && *str != 0)
19950 {
19951 const struct arm_option_cpu_value_table * opt;
19952 char * ext;
19953 int optlen;
19954
19955 if (*str != '+')
19956 {
19957 as_bad (_("invalid architectural extension"));
19958 return 0;
19959 }
19960
19961 str++;
19962 ext = strchr (str, '+');
19963
19964 if (ext != NULL)
19965 optlen = ext - str;
19966 else
19967 optlen = strlen (str);
19968
19969 if (optlen == 0)
19970 {
19971 as_bad (_("missing architectural extension"));
19972 return 0;
19973 }
19974
19975 for (opt = arm_extensions; opt->name != NULL; opt++)
19976 if (strncmp (opt->name, str, optlen) == 0)
19977 {
19978 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
19979 break;
19980 }
19981
19982 if (opt->name == NULL)
19983 {
19984 as_bad (_("unknown architectural extnsion `%s'"), str);
19985 return 0;
19986 }
19987
19988 str = ext;
19989 };
19990
19991 return 1;
19992 }
19993
19994 static int
19995 arm_parse_cpu (char * str)
19996 {
19997 const struct arm_cpu_option_table * opt;
19998 char * ext = strchr (str, '+');
19999 int optlen;
20000
20001 if (ext != NULL)
20002 optlen = ext - str;
20003 else
20004 optlen = strlen (str);
20005
20006 if (optlen == 0)
20007 {
20008 as_bad (_("missing cpu name `%s'"), str);
20009 return 0;
20010 }
20011
20012 for (opt = arm_cpus; opt->name != NULL; opt++)
20013 if (strncmp (opt->name, str, optlen) == 0)
20014 {
20015 mcpu_cpu_opt = &opt->value;
20016 mcpu_fpu_opt = &opt->default_fpu;
20017 if (opt->canonical_name)
20018 strcpy(selected_cpu_name, opt->canonical_name);
20019 else
20020 {
20021 int i;
20022 for (i = 0; i < optlen; i++)
20023 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20024 selected_cpu_name[i] = 0;
20025 }
20026
20027 if (ext != NULL)
20028 return arm_parse_extension (ext, &mcpu_cpu_opt);
20029
20030 return 1;
20031 }
20032
20033 as_bad (_("unknown cpu `%s'"), str);
20034 return 0;
20035 }
20036
20037 static int
20038 arm_parse_arch (char * str)
20039 {
20040 const struct arm_arch_option_table *opt;
20041 char *ext = strchr (str, '+');
20042 int optlen;
20043
20044 if (ext != NULL)
20045 optlen = ext - str;
20046 else
20047 optlen = strlen (str);
20048
20049 if (optlen == 0)
20050 {
20051 as_bad (_("missing architecture name `%s'"), str);
20052 return 0;
20053 }
20054
20055 for (opt = arm_archs; opt->name != NULL; opt++)
20056 if (streq (opt->name, str))
20057 {
20058 march_cpu_opt = &opt->value;
20059 march_fpu_opt = &opt->default_fpu;
20060 strcpy(selected_cpu_name, opt->name);
20061
20062 if (ext != NULL)
20063 return arm_parse_extension (ext, &march_cpu_opt);
20064
20065 return 1;
20066 }
20067
20068 as_bad (_("unknown architecture `%s'\n"), str);
20069 return 0;
20070 }
20071
20072 static int
20073 arm_parse_fpu (char * str)
20074 {
20075 const struct arm_option_cpu_value_table * opt;
20076
20077 for (opt = arm_fpus; opt->name != NULL; opt++)
20078 if (streq (opt->name, str))
20079 {
20080 mfpu_opt = &opt->value;
20081 return 1;
20082 }
20083
20084 as_bad (_("unknown floating point format `%s'\n"), str);
20085 return 0;
20086 }
20087
20088 static int
20089 arm_parse_float_abi (char * str)
20090 {
20091 const struct arm_option_value_table * opt;
20092
20093 for (opt = arm_float_abis; opt->name != NULL; opt++)
20094 if (streq (opt->name, str))
20095 {
20096 mfloat_abi_opt = opt->value;
20097 return 1;
20098 }
20099
20100 as_bad (_("unknown floating point abi `%s'\n"), str);
20101 return 0;
20102 }
20103
20104 #ifdef OBJ_ELF
20105 static int
20106 arm_parse_eabi (char * str)
20107 {
20108 const struct arm_option_value_table *opt;
20109
20110 for (opt = arm_eabis; opt->name != NULL; opt++)
20111 if (streq (opt->name, str))
20112 {
20113 meabi_flags = opt->value;
20114 return 1;
20115 }
20116 as_bad (_("unknown EABI `%s'\n"), str);
20117 return 0;
20118 }
20119 #endif
20120
20121 struct arm_long_option_table arm_long_opts[] =
20122 {
20123 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20124 arm_parse_cpu, NULL},
20125 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20126 arm_parse_arch, NULL},
20127 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20128 arm_parse_fpu, NULL},
20129 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20130 arm_parse_float_abi, NULL},
20131 #ifdef OBJ_ELF
20132 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20133 arm_parse_eabi, NULL},
20134 #endif
20135 {NULL, NULL, 0, NULL}
20136 };
20137
20138 int
20139 md_parse_option (int c, char * arg)
20140 {
20141 struct arm_option_table *opt;
20142 const struct arm_legacy_option_table *fopt;
20143 struct arm_long_option_table *lopt;
20144
20145 switch (c)
20146 {
20147 #ifdef OPTION_EB
20148 case OPTION_EB:
20149 target_big_endian = 1;
20150 break;
20151 #endif
20152
20153 #ifdef OPTION_EL
20154 case OPTION_EL:
20155 target_big_endian = 0;
20156 break;
20157 #endif
20158
20159 case 'a':
20160 /* Listing option. Just ignore these, we don't support additional
20161 ones. */
20162 return 0;
20163
20164 default:
20165 for (opt = arm_opts; opt->option != NULL; opt++)
20166 {
20167 if (c == opt->option[0]
20168 && ((arg == NULL && opt->option[1] == 0)
20169 || streq (arg, opt->option + 1)))
20170 {
20171 #if WARN_DEPRECATED
20172 /* If the option is deprecated, tell the user. */
20173 if (opt->deprecated != NULL)
20174 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20175 arg ? arg : "", _(opt->deprecated));
20176 #endif
20177
20178 if (opt->var != NULL)
20179 *opt->var = opt->value;
20180
20181 return 1;
20182 }
20183 }
20184
20185 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20186 {
20187 if (c == fopt->option[0]
20188 && ((arg == NULL && fopt->option[1] == 0)
20189 || streq (arg, fopt->option + 1)))
20190 {
20191 #if WARN_DEPRECATED
20192 /* If the option is deprecated, tell the user. */
20193 if (fopt->deprecated != NULL)
20194 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20195 arg ? arg : "", _(fopt->deprecated));
20196 #endif
20197
20198 if (fopt->var != NULL)
20199 *fopt->var = &fopt->value;
20200
20201 return 1;
20202 }
20203 }
20204
20205 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20206 {
20207 /* These options are expected to have an argument. */
20208 if (c == lopt->option[0]
20209 && arg != NULL
20210 && strncmp (arg, lopt->option + 1,
20211 strlen (lopt->option + 1)) == 0)
20212 {
20213 #if WARN_DEPRECATED
20214 /* If the option is deprecated, tell the user. */
20215 if (lopt->deprecated != NULL)
20216 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20217 _(lopt->deprecated));
20218 #endif
20219
20220 /* Call the sup-option parser. */
20221 return lopt->func (arg + strlen (lopt->option) - 1);
20222 }
20223 }
20224
20225 return 0;
20226 }
20227
20228 return 1;
20229 }
20230
20231 void
20232 md_show_usage (FILE * fp)
20233 {
20234 struct arm_option_table *opt;
20235 struct arm_long_option_table *lopt;
20236
20237 fprintf (fp, _(" ARM-specific assembler options:\n"));
20238
20239 for (opt = arm_opts; opt->option != NULL; opt++)
20240 if (opt->help != NULL)
20241 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20242
20243 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20244 if (lopt->help != NULL)
20245 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20246
20247 #ifdef OPTION_EB
20248 fprintf (fp, _("\
20249 -EB assemble code for a big-endian cpu\n"));
20250 #endif
20251
20252 #ifdef OPTION_EL
20253 fprintf (fp, _("\
20254 -EL assemble code for a little-endian cpu\n"));
20255 #endif
20256 }
20257
20258
20259 #ifdef OBJ_ELF
20260 typedef struct
20261 {
20262 int val;
20263 arm_feature_set flags;
20264 } cpu_arch_ver_table;
20265
20266 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20267 least features first. */
20268 static const cpu_arch_ver_table cpu_arch_ver[] =
20269 {
20270 {1, ARM_ARCH_V4},
20271 {2, ARM_ARCH_V4T},
20272 {3, ARM_ARCH_V5},
20273 {4, ARM_ARCH_V5TE},
20274 {5, ARM_ARCH_V5TEJ},
20275 {6, ARM_ARCH_V6},
20276 {7, ARM_ARCH_V6Z},
20277 {8, ARM_ARCH_V6K},
20278 {9, ARM_ARCH_V6T2},
20279 {10, ARM_ARCH_V7A},
20280 {10, ARM_ARCH_V7R},
20281 {10, ARM_ARCH_V7M},
20282 {0, ARM_ARCH_NONE}
20283 };
20284
20285 /* Set the public EABI object attributes. */
20286 static void
20287 aeabi_set_public_attributes (void)
20288 {
20289 int arch;
20290 arm_feature_set flags;
20291 arm_feature_set tmp;
20292 const cpu_arch_ver_table *p;
20293
20294 /* Choose the architecture based on the capabilities of the requested cpu
20295 (if any) and/or the instructions actually used. */
20296 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20297 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20298 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20299 /*Allow the user to override the reported architecture. */
20300 if (object_arch)
20301 {
20302 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20303 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20304 }
20305
20306 tmp = flags;
20307 arch = 0;
20308 for (p = cpu_arch_ver; p->val; p++)
20309 {
20310 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20311 {
20312 arch = p->val;
20313 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20314 }
20315 }
20316
20317 /* Tag_CPU_name. */
20318 if (selected_cpu_name[0])
20319 {
20320 char *p;
20321
20322 p = selected_cpu_name;
20323 if (strncmp(p, "armv", 4) == 0)
20324 {
20325 int i;
20326
20327 p += 4;
20328 for (i = 0; p[i]; i++)
20329 p[i] = TOUPPER (p[i]);
20330 }
20331 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20332 }
20333 /* Tag_CPU_arch. */
20334 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20335 /* Tag_CPU_arch_profile. */
20336 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20337 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20338 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20339 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20340 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20341 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20342 /* Tag_ARM_ISA_use. */
20343 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20344 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20345 /* Tag_THUMB_ISA_use. */
20346 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20347 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20348 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20349 /* Tag_VFP_arch. */
20350 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20351 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20352 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20353 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20354 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20355 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20356 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20357 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20358 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20359 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20360 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20361 /* Tag_WMMX_arch. */
20362 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20363 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20364 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20365 /* Tag_NEON_arch. */
20366 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20367 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20368 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20369 }
20370
20371 /* Add the .ARM.attributes section. */
20372 void
20373 arm_md_end (void)
20374 {
20375 segT s;
20376 char *p;
20377 addressT addr;
20378 offsetT size;
20379
20380 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20381 return;
20382
20383 aeabi_set_public_attributes ();
20384 size = elf32_arm_eabi_attr_size (stdoutput);
20385 s = subseg_new (".ARM.attributes", 0);
20386 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20387 addr = frag_now_fix ();
20388 p = frag_more (size);
20389 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20390 }
20391 #endif /* OBJ_ELF */
20392
20393
20394 /* Parse a .cpu directive. */
20395
20396 static void
20397 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20398 {
20399 const struct arm_cpu_option_table *opt;
20400 char *name;
20401 char saved_char;
20402
20403 name = input_line_pointer;
20404 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20405 input_line_pointer++;
20406 saved_char = *input_line_pointer;
20407 *input_line_pointer = 0;
20408
20409 /* Skip the first "all" entry. */
20410 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20411 if (streq (opt->name, name))
20412 {
20413 mcpu_cpu_opt = &opt->value;
20414 selected_cpu = opt->value;
20415 if (opt->canonical_name)
20416 strcpy(selected_cpu_name, opt->canonical_name);
20417 else
20418 {
20419 int i;
20420 for (i = 0; opt->name[i]; i++)
20421 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20422 selected_cpu_name[i] = 0;
20423 }
20424 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20425 *input_line_pointer = saved_char;
20426 demand_empty_rest_of_line ();
20427 return;
20428 }
20429 as_bad (_("unknown cpu `%s'"), name);
20430 *input_line_pointer = saved_char;
20431 ignore_rest_of_line ();
20432 }
20433
20434
20435 /* Parse a .arch directive. */
20436
20437 static void
20438 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20439 {
20440 const struct arm_arch_option_table *opt;
20441 char saved_char;
20442 char *name;
20443
20444 name = input_line_pointer;
20445 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20446 input_line_pointer++;
20447 saved_char = *input_line_pointer;
20448 *input_line_pointer = 0;
20449
20450 /* Skip the first "all" entry. */
20451 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20452 if (streq (opt->name, name))
20453 {
20454 mcpu_cpu_opt = &opt->value;
20455 selected_cpu = opt->value;
20456 strcpy(selected_cpu_name, opt->name);
20457 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20458 *input_line_pointer = saved_char;
20459 demand_empty_rest_of_line ();
20460 return;
20461 }
20462
20463 as_bad (_("unknown architecture `%s'\n"), name);
20464 *input_line_pointer = saved_char;
20465 ignore_rest_of_line ();
20466 }
20467
20468
20469 /* Parse a .object_arch directive. */
20470
20471 static void
20472 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20473 {
20474 const struct arm_arch_option_table *opt;
20475 char saved_char;
20476 char *name;
20477
20478 name = input_line_pointer;
20479 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20480 input_line_pointer++;
20481 saved_char = *input_line_pointer;
20482 *input_line_pointer = 0;
20483
20484 /* Skip the first "all" entry. */
20485 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20486 if (streq (opt->name, name))
20487 {
20488 object_arch = &opt->value;
20489 *input_line_pointer = saved_char;
20490 demand_empty_rest_of_line ();
20491 return;
20492 }
20493
20494 as_bad (_("unknown architecture `%s'\n"), name);
20495 *input_line_pointer = saved_char;
20496 ignore_rest_of_line ();
20497 }
20498
20499
20500 /* Parse a .fpu directive. */
20501
20502 static void
20503 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20504 {
20505 const struct arm_option_cpu_value_table *opt;
20506 char saved_char;
20507 char *name;
20508
20509 name = input_line_pointer;
20510 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20511 input_line_pointer++;
20512 saved_char = *input_line_pointer;
20513 *input_line_pointer = 0;
20514
20515 for (opt = arm_fpus; opt->name != NULL; opt++)
20516 if (streq (opt->name, name))
20517 {
20518 mfpu_opt = &opt->value;
20519 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20520 *input_line_pointer = saved_char;
20521 demand_empty_rest_of_line ();
20522 return;
20523 }
20524
20525 as_bad (_("unknown floating point format `%s'\n"), name);
20526 *input_line_pointer = saved_char;
20527 ignore_rest_of_line ();
20528 }
20529
This page took 0.500245 seconds and 3 git commands to generate.