ARM/gas: Fix a build failure with GCC 4.3.3
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2014 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
206
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
212
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243 static const arm_feature_set crc_ext_armv8 =
244 ARM_FEATURE (0, CRC_EXT_ARMV8);
245
246 static int mfloat_abi_opt = -1;
247 /* Record user cpu selection for object attributes. */
248 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
249 /* Must be long enough to hold any of the names in arm_cpus. */
250 static char selected_cpu_name[16];
251
252 extern FLONUM_TYPE generic_floating_point_number;
253
254 /* Return if no cpu was selected on command-line. */
255 static bfd_boolean
256 no_cpu_selected (void)
257 {
258 return selected_cpu.core == arm_arch_none.core
259 && selected_cpu.coproc == arm_arch_none.coproc;
260 }
261
262 #ifdef OBJ_ELF
263 # ifdef EABI_DEFAULT
264 static int meabi_flags = EABI_DEFAULT;
265 # else
266 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
267 # endif
268
269 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
270
271 bfd_boolean
272 arm_is_eabi (void)
273 {
274 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
275 }
276 #endif
277
278 #ifdef OBJ_ELF
279 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
280 symbolS * GOT_symbol;
281 #endif
282
283 /* 0: assemble for ARM,
284 1: assemble for Thumb,
285 2: assemble for Thumb even though target CPU does not support thumb
286 instructions. */
287 static int thumb_mode = 0;
288 /* A value distinct from the possible values for thumb_mode that we
289 can use to record whether thumb_mode has been copied into the
290 tc_frag_data field of a frag. */
291 #define MODE_RECORDED (1 << 4)
292
293 /* Specifies the intrinsic IT insn behavior mode. */
294 enum implicit_it_mode
295 {
296 IMPLICIT_IT_MODE_NEVER = 0x00,
297 IMPLICIT_IT_MODE_ARM = 0x01,
298 IMPLICIT_IT_MODE_THUMB = 0x02,
299 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
300 };
301 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
302
303 /* If unified_syntax is true, we are processing the new unified
304 ARM/Thumb syntax. Important differences from the old ARM mode:
305
306 - Immediate operands do not require a # prefix.
307 - Conditional affixes always appear at the end of the
308 instruction. (For backward compatibility, those instructions
309 that formerly had them in the middle, continue to accept them
310 there.)
311 - The IT instruction may appear, and if it does is validated
312 against subsequent conditional affixes. It does not generate
313 machine code.
314
315 Important differences from the old Thumb mode:
316
317 - Immediate operands do not require a # prefix.
318 - Most of the V6T2 instructions are only available in unified mode.
319 - The .N and .W suffixes are recognized and honored (it is an error
320 if they cannot be honored).
321 - All instructions set the flags if and only if they have an 's' affix.
322 - Conditional affixes may be used. They are validated against
323 preceding IT instructions. Unlike ARM mode, you cannot use a
324 conditional affix except in the scope of an IT instruction. */
325
326 static bfd_boolean unified_syntax = FALSE;
327
328 /* An immediate operand can start with #, and ld*, st*, pld operands
329 can contain [ and ]. We need to tell APP not to elide whitespace
330 before a [, which can appear as the first operand for pld.
331 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
332 const char arm_symbol_chars[] = "#[]{}";
333
334 enum neon_el_type
335 {
336 NT_invtype,
337 NT_untyped,
338 NT_integer,
339 NT_float,
340 NT_poly,
341 NT_signed,
342 NT_unsigned
343 };
344
345 struct neon_type_el
346 {
347 enum neon_el_type type;
348 unsigned size;
349 };
350
351 #define NEON_MAX_TYPE_ELS 4
352
353 struct neon_type
354 {
355 struct neon_type_el el[NEON_MAX_TYPE_ELS];
356 unsigned elems;
357 };
358
359 enum it_instruction_type
360 {
361 OUTSIDE_IT_INSN,
362 INSIDE_IT_INSN,
363 INSIDE_IT_LAST_INSN,
364 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
365 if inside, should be the last one. */
366 NEUTRAL_IT_INSN, /* This could be either inside or outside,
367 i.e. BKPT and NOP. */
368 IT_INSN /* The IT insn has been parsed. */
369 };
370
371 /* The maximum number of operands we need. */
372 #define ARM_IT_MAX_OPERANDS 6
373
374 struct arm_it
375 {
376 const char * error;
377 unsigned long instruction;
378 int size;
379 int size_req;
380 int cond;
381 /* "uncond_value" is set to the value in place of the conditional field in
382 unconditional versions of the instruction, or -1 if nothing is
383 appropriate. */
384 int uncond_value;
385 struct neon_type vectype;
386 /* This does not indicate an actual NEON instruction, only that
387 the mnemonic accepts neon-style type suffixes. */
388 int is_neon;
389 /* Set to the opcode if the instruction needs relaxation.
390 Zero if the instruction is not relaxed. */
391 unsigned long relax;
392 struct
393 {
394 bfd_reloc_code_real_type type;
395 expressionS exp;
396 int pc_rel;
397 } reloc;
398
399 enum it_instruction_type it_insn_type;
400
401 struct
402 {
403 unsigned reg;
404 signed int imm;
405 struct neon_type_el vectype;
406 unsigned present : 1; /* Operand present. */
407 unsigned isreg : 1; /* Operand was a register. */
408 unsigned immisreg : 1; /* .imm field is a second register. */
409 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
410 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
411 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
412 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
413 instructions. This allows us to disambiguate ARM <-> vector insns. */
414 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
415 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
416 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
417 unsigned issingle : 1; /* Operand is VFP single-precision register. */
418 unsigned hasreloc : 1; /* Operand has relocation suffix. */
419 unsigned writeback : 1; /* Operand has trailing ! */
420 unsigned preind : 1; /* Preindexed address. */
421 unsigned postind : 1; /* Postindexed address. */
422 unsigned negative : 1; /* Index register was negated. */
423 unsigned shifted : 1; /* Shift applied to operation. */
424 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
425 } operands[ARM_IT_MAX_OPERANDS];
426 };
427
428 static struct arm_it inst;
429
430 #define NUM_FLOAT_VALS 8
431
432 const char * fp_const[] =
433 {
434 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
435 };
436
437 /* Number of littlenums required to hold an extended precision number. */
438 #define MAX_LITTLENUMS 6
439
440 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
441
442 #define FAIL (-1)
443 #define SUCCESS (0)
444
445 #define SUFF_S 1
446 #define SUFF_D 2
447 #define SUFF_E 3
448 #define SUFF_P 4
449
450 #define CP_T_X 0x00008000
451 #define CP_T_Y 0x00400000
452
453 #define CONDS_BIT 0x00100000
454 #define LOAD_BIT 0x00100000
455
456 #define DOUBLE_LOAD_FLAG 0x00000001
457
458 struct asm_cond
459 {
460 const char * template_name;
461 unsigned long value;
462 };
463
464 #define COND_ALWAYS 0xE
465
466 struct asm_psr
467 {
468 const char * template_name;
469 unsigned long field;
470 };
471
472 struct asm_barrier_opt
473 {
474 const char * template_name;
475 unsigned long value;
476 const arm_feature_set arch;
477 };
478
479 /* The bit that distinguishes CPSR and SPSR. */
480 #define SPSR_BIT (1 << 22)
481
482 /* The individual PSR flag bits. */
483 #define PSR_c (1 << 16)
484 #define PSR_x (1 << 17)
485 #define PSR_s (1 << 18)
486 #define PSR_f (1 << 19)
487
488 struct reloc_entry
489 {
490 char * name;
491 bfd_reloc_code_real_type reloc;
492 };
493
494 enum vfp_reg_pos
495 {
496 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
497 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
498 };
499
500 enum vfp_ldstm_type
501 {
502 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
503 };
504
505 /* Bits for DEFINED field in neon_typed_alias. */
506 #define NTA_HASTYPE 1
507 #define NTA_HASINDEX 2
508
509 struct neon_typed_alias
510 {
511 unsigned char defined;
512 unsigned char index;
513 struct neon_type_el eltype;
514 };
515
516 /* ARM register categories. This includes coprocessor numbers and various
517 architecture extensions' registers. */
518 enum arm_reg_type
519 {
520 REG_TYPE_RN,
521 REG_TYPE_CP,
522 REG_TYPE_CN,
523 REG_TYPE_FN,
524 REG_TYPE_VFS,
525 REG_TYPE_VFD,
526 REG_TYPE_NQ,
527 REG_TYPE_VFSD,
528 REG_TYPE_NDQ,
529 REG_TYPE_NSDQ,
530 REG_TYPE_VFC,
531 REG_TYPE_MVF,
532 REG_TYPE_MVD,
533 REG_TYPE_MVFX,
534 REG_TYPE_MVDX,
535 REG_TYPE_MVAX,
536 REG_TYPE_DSPSC,
537 REG_TYPE_MMXWR,
538 REG_TYPE_MMXWC,
539 REG_TYPE_MMXWCG,
540 REG_TYPE_XSCALE,
541 REG_TYPE_RNB
542 };
543
544 /* Structure for a hash table entry for a register.
545 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
546 information which states whether a vector type or index is specified (for a
547 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
548 struct reg_entry
549 {
550 const char * name;
551 unsigned int number;
552 unsigned char type;
553 unsigned char builtin;
554 struct neon_typed_alias * neon;
555 };
556
557 /* Diagnostics used when we don't get a register of the expected type. */
558 const char * const reg_expected_msgs[] =
559 {
560 N_("ARM register expected"),
561 N_("bad or missing co-processor number"),
562 N_("co-processor register expected"),
563 N_("FPA register expected"),
564 N_("VFP single precision register expected"),
565 N_("VFP/Neon double precision register expected"),
566 N_("Neon quad precision register expected"),
567 N_("VFP single or double precision register expected"),
568 N_("Neon double or quad precision register expected"),
569 N_("VFP single, double or Neon quad precision register expected"),
570 N_("VFP system register expected"),
571 N_("Maverick MVF register expected"),
572 N_("Maverick MVD register expected"),
573 N_("Maverick MVFX register expected"),
574 N_("Maverick MVDX register expected"),
575 N_("Maverick MVAX register expected"),
576 N_("Maverick DSPSC register expected"),
577 N_("iWMMXt data register expected"),
578 N_("iWMMXt control register expected"),
579 N_("iWMMXt scalar register expected"),
580 N_("XScale accumulator register expected"),
581 };
582
583 /* Some well known registers that we refer to directly elsewhere. */
584 #define REG_R12 12
585 #define REG_SP 13
586 #define REG_LR 14
587 #define REG_PC 15
588
589 /* ARM instructions take 4bytes in the object file, Thumb instructions
590 take 2: */
591 #define INSN_SIZE 4
592
593 struct asm_opcode
594 {
595 /* Basic string to match. */
596 const char * template_name;
597
598 /* Parameters to instruction. */
599 unsigned int operands[8];
600
601 /* Conditional tag - see opcode_lookup. */
602 unsigned int tag : 4;
603
604 /* Basic instruction code. */
605 unsigned int avalue : 28;
606
607 /* Thumb-format instruction code. */
608 unsigned int tvalue;
609
610 /* Which architecture variant provides this instruction. */
611 const arm_feature_set * avariant;
612 const arm_feature_set * tvariant;
613
614 /* Function to call to encode instruction in ARM format. */
615 void (* aencode) (void);
616
617 /* Function to call to encode instruction in Thumb format. */
618 void (* tencode) (void);
619 };
620
621 /* Defines for various bits that we will want to toggle. */
622 #define INST_IMMEDIATE 0x02000000
623 #define OFFSET_REG 0x02000000
624 #define HWOFFSET_IMM 0x00400000
625 #define SHIFT_BY_REG 0x00000010
626 #define PRE_INDEX 0x01000000
627 #define INDEX_UP 0x00800000
628 #define WRITE_BACK 0x00200000
629 #define LDM_TYPE_2_OR_3 0x00400000
630 #define CPSI_MMOD 0x00020000
631
632 #define LITERAL_MASK 0xf000f000
633 #define OPCODE_MASK 0xfe1fffff
634 #define V4_STR_BIT 0x00000020
635 #define VLDR_VMOV_SAME 0x0040f000
636
637 #define T2_SUBS_PC_LR 0xf3de8f00
638
639 #define DATA_OP_SHIFT 21
640
641 #define T2_OPCODE_MASK 0xfe1fffff
642 #define T2_DATA_OP_SHIFT 21
643
644 #define A_COND_MASK 0xf0000000
645 #define A_PUSH_POP_OP_MASK 0x0fff0000
646
647 /* Opcodes for pushing/poping registers to/from the stack. */
648 #define A1_OPCODE_PUSH 0x092d0000
649 #define A2_OPCODE_PUSH 0x052d0004
650 #define A2_OPCODE_POP 0x049d0004
651
652 /* Codes to distinguish the arithmetic instructions. */
653 #define OPCODE_AND 0
654 #define OPCODE_EOR 1
655 #define OPCODE_SUB 2
656 #define OPCODE_RSB 3
657 #define OPCODE_ADD 4
658 #define OPCODE_ADC 5
659 #define OPCODE_SBC 6
660 #define OPCODE_RSC 7
661 #define OPCODE_TST 8
662 #define OPCODE_TEQ 9
663 #define OPCODE_CMP 10
664 #define OPCODE_CMN 11
665 #define OPCODE_ORR 12
666 #define OPCODE_MOV 13
667 #define OPCODE_BIC 14
668 #define OPCODE_MVN 15
669
670 #define T2_OPCODE_AND 0
671 #define T2_OPCODE_BIC 1
672 #define T2_OPCODE_ORR 2
673 #define T2_OPCODE_ORN 3
674 #define T2_OPCODE_EOR 4
675 #define T2_OPCODE_ADD 8
676 #define T2_OPCODE_ADC 10
677 #define T2_OPCODE_SBC 11
678 #define T2_OPCODE_SUB 13
679 #define T2_OPCODE_RSB 14
680
681 #define T_OPCODE_MUL 0x4340
682 #define T_OPCODE_TST 0x4200
683 #define T_OPCODE_CMN 0x42c0
684 #define T_OPCODE_NEG 0x4240
685 #define T_OPCODE_MVN 0x43c0
686
687 #define T_OPCODE_ADD_R3 0x1800
688 #define T_OPCODE_SUB_R3 0x1a00
689 #define T_OPCODE_ADD_HI 0x4400
690 #define T_OPCODE_ADD_ST 0xb000
691 #define T_OPCODE_SUB_ST 0xb080
692 #define T_OPCODE_ADD_SP 0xa800
693 #define T_OPCODE_ADD_PC 0xa000
694 #define T_OPCODE_ADD_I8 0x3000
695 #define T_OPCODE_SUB_I8 0x3800
696 #define T_OPCODE_ADD_I3 0x1c00
697 #define T_OPCODE_SUB_I3 0x1e00
698
699 #define T_OPCODE_ASR_R 0x4100
700 #define T_OPCODE_LSL_R 0x4080
701 #define T_OPCODE_LSR_R 0x40c0
702 #define T_OPCODE_ROR_R 0x41c0
703 #define T_OPCODE_ASR_I 0x1000
704 #define T_OPCODE_LSL_I 0x0000
705 #define T_OPCODE_LSR_I 0x0800
706
707 #define T_OPCODE_MOV_I8 0x2000
708 #define T_OPCODE_CMP_I8 0x2800
709 #define T_OPCODE_CMP_LR 0x4280
710 #define T_OPCODE_MOV_HR 0x4600
711 #define T_OPCODE_CMP_HR 0x4500
712
713 #define T_OPCODE_LDR_PC 0x4800
714 #define T_OPCODE_LDR_SP 0x9800
715 #define T_OPCODE_STR_SP 0x9000
716 #define T_OPCODE_LDR_IW 0x6800
717 #define T_OPCODE_STR_IW 0x6000
718 #define T_OPCODE_LDR_IH 0x8800
719 #define T_OPCODE_STR_IH 0x8000
720 #define T_OPCODE_LDR_IB 0x7800
721 #define T_OPCODE_STR_IB 0x7000
722 #define T_OPCODE_LDR_RW 0x5800
723 #define T_OPCODE_STR_RW 0x5000
724 #define T_OPCODE_LDR_RH 0x5a00
725 #define T_OPCODE_STR_RH 0x5200
726 #define T_OPCODE_LDR_RB 0x5c00
727 #define T_OPCODE_STR_RB 0x5400
728
729 #define T_OPCODE_PUSH 0xb400
730 #define T_OPCODE_POP 0xbc00
731
732 #define T_OPCODE_BRANCH 0xe000
733
734 #define THUMB_SIZE 2 /* Size of thumb instruction. */
735 #define THUMB_PP_PC_LR 0x0100
736 #define THUMB_LOAD_BIT 0x0800
737 #define THUMB2_LOAD_BIT 0x00100000
738
739 #define BAD_ARGS _("bad arguments to instruction")
740 #define BAD_SP _("r13 not allowed here")
741 #define BAD_PC _("r15 not allowed here")
742 #define BAD_COND _("instruction cannot be conditional")
743 #define BAD_OVERLAP _("registers may not be the same")
744 #define BAD_HIREG _("lo register required")
745 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
746 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
747 #define BAD_BRANCH _("branch must be last instruction in IT block")
748 #define BAD_NOT_IT _("instruction not allowed in IT block")
749 #define BAD_FPU _("selected FPU does not support instruction")
750 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
751 #define BAD_IT_COND _("incorrect condition in IT block")
752 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
753 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
754 #define BAD_PC_ADDRESSING \
755 _("cannot use register index with PC-relative addressing")
756 #define BAD_PC_WRITEBACK \
757 _("cannot use writeback with PC-relative addressing")
758 #define BAD_RANGE _("branch out of range")
759 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
760
761 static struct hash_control * arm_ops_hsh;
762 static struct hash_control * arm_cond_hsh;
763 static struct hash_control * arm_shift_hsh;
764 static struct hash_control * arm_psr_hsh;
765 static struct hash_control * arm_v7m_psr_hsh;
766 static struct hash_control * arm_reg_hsh;
767 static struct hash_control * arm_reloc_hsh;
768 static struct hash_control * arm_barrier_opt_hsh;
769
770 /* Stuff needed to resolve the label ambiguity
771 As:
772 ...
773 label: <insn>
774 may differ from:
775 ...
776 label:
777 <insn> */
778
779 symbolS * last_label_seen;
780 static int label_is_thumb_function_name = FALSE;
781
782 /* Literal pool structure. Held on a per-section
783 and per-sub-section basis. */
784
785 #define MAX_LITERAL_POOL_SIZE 1024
786 typedef struct literal_pool
787 {
788 expressionS literals [MAX_LITERAL_POOL_SIZE];
789 unsigned int next_free_entry;
790 unsigned int id;
791 symbolS * symbol;
792 segT section;
793 subsegT sub_section;
794 #ifdef OBJ_ELF
795 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
796 #endif
797 struct literal_pool * next;
798 unsigned int alignment;
799 } literal_pool;
800
801 /* Pointer to a linked list of literal pools. */
802 literal_pool * list_of_pools = NULL;
803
804 typedef enum asmfunc_states
805 {
806 OUTSIDE_ASMFUNC,
807 WAITING_ASMFUNC_NAME,
808 WAITING_ENDASMFUNC
809 } asmfunc_states;
810
811 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
812
813 #ifdef OBJ_ELF
814 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
815 #else
816 static struct current_it now_it;
817 #endif
818
819 static inline int
820 now_it_compatible (int cond)
821 {
822 return (cond & ~1) == (now_it.cc & ~1);
823 }
824
825 static inline int
826 conditional_insn (void)
827 {
828 return inst.cond != COND_ALWAYS;
829 }
830
831 static int in_it_block (void);
832
833 static int handle_it_state (void);
834
835 static void force_automatic_it_block_close (void);
836
837 static void it_fsm_post_encode (void);
838
839 #define set_it_insn_type(type) \
840 do \
841 { \
842 inst.it_insn_type = type; \
843 if (handle_it_state () == FAIL) \
844 return; \
845 } \
846 while (0)
847
848 #define set_it_insn_type_nonvoid(type, failret) \
849 do \
850 { \
851 inst.it_insn_type = type; \
852 if (handle_it_state () == FAIL) \
853 return failret; \
854 } \
855 while(0)
856
857 #define set_it_insn_type_last() \
858 do \
859 { \
860 if (inst.cond == COND_ALWAYS) \
861 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
862 else \
863 set_it_insn_type (INSIDE_IT_LAST_INSN); \
864 } \
865 while (0)
866
867 /* Pure syntax. */
868
869 /* This array holds the chars that always start a comment. If the
870 pre-processor is disabled, these aren't very useful. */
871 char arm_comment_chars[] = "@";
872
873 /* This array holds the chars that only start a comment at the beginning of
874 a line. If the line seems to have the form '# 123 filename'
875 .line and .file directives will appear in the pre-processed output. */
876 /* Note that input_file.c hand checks for '#' at the beginning of the
877 first line of the input file. This is because the compiler outputs
878 #NO_APP at the beginning of its output. */
879 /* Also note that comments like this one will always work. */
880 const char line_comment_chars[] = "#";
881
882 char arm_line_separator_chars[] = ";";
883
884 /* Chars that can be used to separate mant
885 from exp in floating point numbers. */
886 const char EXP_CHARS[] = "eE";
887
888 /* Chars that mean this number is a floating point constant. */
889 /* As in 0f12.456 */
890 /* or 0d1.2345e12 */
891
892 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
893
894 /* Prefix characters that indicate the start of an immediate
895 value. */
896 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
897
898 /* Separator character handling. */
899
900 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
901
902 static inline int
903 skip_past_char (char ** str, char c)
904 {
905 /* PR gas/14987: Allow for whitespace before the expected character. */
906 skip_whitespace (*str);
907
908 if (**str == c)
909 {
910 (*str)++;
911 return SUCCESS;
912 }
913 else
914 return FAIL;
915 }
916
917 #define skip_past_comma(str) skip_past_char (str, ',')
918
919 /* Arithmetic expressions (possibly involving symbols). */
920
921 /* Return TRUE if anything in the expression is a bignum. */
922
923 static int
924 walk_no_bignums (symbolS * sp)
925 {
926 if (symbol_get_value_expression (sp)->X_op == O_big)
927 return 1;
928
929 if (symbol_get_value_expression (sp)->X_add_symbol)
930 {
931 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
932 || (symbol_get_value_expression (sp)->X_op_symbol
933 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
934 }
935
936 return 0;
937 }
938
939 static int in_my_get_expression = 0;
940
941 /* Third argument to my_get_expression. */
942 #define GE_NO_PREFIX 0
943 #define GE_IMM_PREFIX 1
944 #define GE_OPT_PREFIX 2
945 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
946 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
947 #define GE_OPT_PREFIX_BIG 3
948
949 static int
950 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
951 {
952 char * save_in;
953 segT seg;
954
955 /* In unified syntax, all prefixes are optional. */
956 if (unified_syntax)
957 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
958 : GE_OPT_PREFIX;
959
960 switch (prefix_mode)
961 {
962 case GE_NO_PREFIX: break;
963 case GE_IMM_PREFIX:
964 if (!is_immediate_prefix (**str))
965 {
966 inst.error = _("immediate expression requires a # prefix");
967 return FAIL;
968 }
969 (*str)++;
970 break;
971 case GE_OPT_PREFIX:
972 case GE_OPT_PREFIX_BIG:
973 if (is_immediate_prefix (**str))
974 (*str)++;
975 break;
976 default: abort ();
977 }
978
979 memset (ep, 0, sizeof (expressionS));
980
981 save_in = input_line_pointer;
982 input_line_pointer = *str;
983 in_my_get_expression = 1;
984 seg = expression (ep);
985 in_my_get_expression = 0;
986
987 if (ep->X_op == O_illegal || ep->X_op == O_absent)
988 {
989 /* We found a bad or missing expression in md_operand(). */
990 *str = input_line_pointer;
991 input_line_pointer = save_in;
992 if (inst.error == NULL)
993 inst.error = (ep->X_op == O_absent
994 ? _("missing expression") :_("bad expression"));
995 return 1;
996 }
997
998 #ifdef OBJ_AOUT
999 if (seg != absolute_section
1000 && seg != text_section
1001 && seg != data_section
1002 && seg != bss_section
1003 && seg != undefined_section)
1004 {
1005 inst.error = _("bad segment");
1006 *str = input_line_pointer;
1007 input_line_pointer = save_in;
1008 return 1;
1009 }
1010 #else
1011 (void) seg;
1012 #endif
1013
1014 /* Get rid of any bignums now, so that we don't generate an error for which
1015 we can't establish a line number later on. Big numbers are never valid
1016 in instructions, which is where this routine is always called. */
1017 if (prefix_mode != GE_OPT_PREFIX_BIG
1018 && (ep->X_op == O_big
1019 || (ep->X_add_symbol
1020 && (walk_no_bignums (ep->X_add_symbol)
1021 || (ep->X_op_symbol
1022 && walk_no_bignums (ep->X_op_symbol))))))
1023 {
1024 inst.error = _("invalid constant");
1025 *str = input_line_pointer;
1026 input_line_pointer = save_in;
1027 return 1;
1028 }
1029
1030 *str = input_line_pointer;
1031 input_line_pointer = save_in;
1032 return 0;
1033 }
1034
1035 /* Turn a string in input_line_pointer into a floating point constant
1036 of type TYPE, and store the appropriate bytes in *LITP. The number
1037 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1038 returned, or NULL on OK.
1039
1040 Note that fp constants aren't represent in the normal way on the ARM.
1041 In big endian mode, things are as expected. However, in little endian
1042 mode fp constants are big-endian word-wise, and little-endian byte-wise
1043 within the words. For example, (double) 1.1 in big endian mode is
1044 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1045 the byte sequence 99 99 f1 3f 9a 99 99 99.
1046
1047 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1048
1049 char *
1050 md_atof (int type, char * litP, int * sizeP)
1051 {
1052 int prec;
1053 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1054 char *t;
1055 int i;
1056
1057 switch (type)
1058 {
1059 case 'f':
1060 case 'F':
1061 case 's':
1062 case 'S':
1063 prec = 2;
1064 break;
1065
1066 case 'd':
1067 case 'D':
1068 case 'r':
1069 case 'R':
1070 prec = 4;
1071 break;
1072
1073 case 'x':
1074 case 'X':
1075 prec = 5;
1076 break;
1077
1078 case 'p':
1079 case 'P':
1080 prec = 5;
1081 break;
1082
1083 default:
1084 *sizeP = 0;
1085 return _("Unrecognized or unsupported floating point constant");
1086 }
1087
1088 t = atof_ieee (input_line_pointer, type, words);
1089 if (t)
1090 input_line_pointer = t;
1091 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1092
1093 if (target_big_endian)
1094 {
1095 for (i = 0; i < prec; i++)
1096 {
1097 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1098 litP += sizeof (LITTLENUM_TYPE);
1099 }
1100 }
1101 else
1102 {
1103 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1104 for (i = prec - 1; i >= 0; i--)
1105 {
1106 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1107 litP += sizeof (LITTLENUM_TYPE);
1108 }
1109 else
1110 /* For a 4 byte float the order of elements in `words' is 1 0.
1111 For an 8 byte float the order is 1 0 3 2. */
1112 for (i = 0; i < prec; i += 2)
1113 {
1114 md_number_to_chars (litP, (valueT) words[i + 1],
1115 sizeof (LITTLENUM_TYPE));
1116 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1117 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1118 litP += 2 * sizeof (LITTLENUM_TYPE);
1119 }
1120 }
1121
1122 return NULL;
1123 }
1124
1125 /* We handle all bad expressions here, so that we can report the faulty
1126 instruction in the error message. */
1127 void
1128 md_operand (expressionS * exp)
1129 {
1130 if (in_my_get_expression)
1131 exp->X_op = O_illegal;
1132 }
1133
1134 /* Immediate values. */
1135
1136 /* Generic immediate-value read function for use in directives.
1137 Accepts anything that 'expression' can fold to a constant.
1138 *val receives the number. */
1139 #ifdef OBJ_ELF
1140 static int
1141 immediate_for_directive (int *val)
1142 {
1143 expressionS exp;
1144 exp.X_op = O_illegal;
1145
1146 if (is_immediate_prefix (*input_line_pointer))
1147 {
1148 input_line_pointer++;
1149 expression (&exp);
1150 }
1151
1152 if (exp.X_op != O_constant)
1153 {
1154 as_bad (_("expected #constant"));
1155 ignore_rest_of_line ();
1156 return FAIL;
1157 }
1158 *val = exp.X_add_number;
1159 return SUCCESS;
1160 }
1161 #endif
1162
1163 /* Register parsing. */
1164
1165 /* Generic register parser. CCP points to what should be the
1166 beginning of a register name. If it is indeed a valid register
1167 name, advance CCP over it and return the reg_entry structure;
1168 otherwise return NULL. Does not issue diagnostics. */
1169
1170 static struct reg_entry *
1171 arm_reg_parse_multi (char **ccp)
1172 {
1173 char *start = *ccp;
1174 char *p;
1175 struct reg_entry *reg;
1176
1177 skip_whitespace (start);
1178
1179 #ifdef REGISTER_PREFIX
1180 if (*start != REGISTER_PREFIX)
1181 return NULL;
1182 start++;
1183 #endif
1184 #ifdef OPTIONAL_REGISTER_PREFIX
1185 if (*start == OPTIONAL_REGISTER_PREFIX)
1186 start++;
1187 #endif
1188
1189 p = start;
1190 if (!ISALPHA (*p) || !is_name_beginner (*p))
1191 return NULL;
1192
1193 do
1194 p++;
1195 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1196
1197 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1198
1199 if (!reg)
1200 return NULL;
1201
1202 *ccp = p;
1203 return reg;
1204 }
1205
1206 static int
1207 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1208 enum arm_reg_type type)
1209 {
1210 /* Alternative syntaxes are accepted for a few register classes. */
1211 switch (type)
1212 {
1213 case REG_TYPE_MVF:
1214 case REG_TYPE_MVD:
1215 case REG_TYPE_MVFX:
1216 case REG_TYPE_MVDX:
1217 /* Generic coprocessor register names are allowed for these. */
1218 if (reg && reg->type == REG_TYPE_CN)
1219 return reg->number;
1220 break;
1221
1222 case REG_TYPE_CP:
1223 /* For backward compatibility, a bare number is valid here. */
1224 {
1225 unsigned long processor = strtoul (start, ccp, 10);
1226 if (*ccp != start && processor <= 15)
1227 return processor;
1228 }
1229
1230 case REG_TYPE_MMXWC:
1231 /* WC includes WCG. ??? I'm not sure this is true for all
1232 instructions that take WC registers. */
1233 if (reg && reg->type == REG_TYPE_MMXWCG)
1234 return reg->number;
1235 break;
1236
1237 default:
1238 break;
1239 }
1240
1241 return FAIL;
1242 }
1243
1244 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1245 return value is the register number or FAIL. */
1246
1247 static int
1248 arm_reg_parse (char **ccp, enum arm_reg_type type)
1249 {
1250 char *start = *ccp;
1251 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1252 int ret;
1253
1254 /* Do not allow a scalar (reg+index) to parse as a register. */
1255 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1256 return FAIL;
1257
1258 if (reg && reg->type == type)
1259 return reg->number;
1260
1261 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1262 return ret;
1263
1264 *ccp = start;
1265 return FAIL;
1266 }
1267
1268 /* Parse a Neon type specifier. *STR should point at the leading '.'
1269 character. Does no verification at this stage that the type fits the opcode
1270 properly. E.g.,
1271
1272 .i32.i32.s16
1273 .s32.f32
1274 .u16
1275
1276 Can all be legally parsed by this function.
1277
1278 Fills in neon_type struct pointer with parsed information, and updates STR
1279 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1280 type, FAIL if not. */
1281
1282 static int
1283 parse_neon_type (struct neon_type *type, char **str)
1284 {
1285 char *ptr = *str;
1286
1287 if (type)
1288 type->elems = 0;
1289
1290 while (type->elems < NEON_MAX_TYPE_ELS)
1291 {
1292 enum neon_el_type thistype = NT_untyped;
1293 unsigned thissize = -1u;
1294
1295 if (*ptr != '.')
1296 break;
1297
1298 ptr++;
1299
1300 /* Just a size without an explicit type. */
1301 if (ISDIGIT (*ptr))
1302 goto parsesize;
1303
1304 switch (TOLOWER (*ptr))
1305 {
1306 case 'i': thistype = NT_integer; break;
1307 case 'f': thistype = NT_float; break;
1308 case 'p': thistype = NT_poly; break;
1309 case 's': thistype = NT_signed; break;
1310 case 'u': thistype = NT_unsigned; break;
1311 case 'd':
1312 thistype = NT_float;
1313 thissize = 64;
1314 ptr++;
1315 goto done;
1316 default:
1317 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1318 return FAIL;
1319 }
1320
1321 ptr++;
1322
1323 /* .f is an abbreviation for .f32. */
1324 if (thistype == NT_float && !ISDIGIT (*ptr))
1325 thissize = 32;
1326 else
1327 {
1328 parsesize:
1329 thissize = strtoul (ptr, &ptr, 10);
1330
1331 if (thissize != 8 && thissize != 16 && thissize != 32
1332 && thissize != 64)
1333 {
1334 as_bad (_("bad size %d in type specifier"), thissize);
1335 return FAIL;
1336 }
1337 }
1338
1339 done:
1340 if (type)
1341 {
1342 type->el[type->elems].type = thistype;
1343 type->el[type->elems].size = thissize;
1344 type->elems++;
1345 }
1346 }
1347
1348 /* Empty/missing type is not a successful parse. */
1349 if (type->elems == 0)
1350 return FAIL;
1351
1352 *str = ptr;
1353
1354 return SUCCESS;
1355 }
1356
1357 /* Errors may be set multiple times during parsing or bit encoding
1358 (particularly in the Neon bits), but usually the earliest error which is set
1359 will be the most meaningful. Avoid overwriting it with later (cascading)
1360 errors by calling this function. */
1361
1362 static void
1363 first_error (const char *err)
1364 {
1365 if (!inst.error)
1366 inst.error = err;
1367 }
1368
1369 /* Parse a single type, e.g. ".s32", leading period included. */
1370 static int
1371 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1372 {
1373 char *str = *ccp;
1374 struct neon_type optype;
1375
1376 if (*str == '.')
1377 {
1378 if (parse_neon_type (&optype, &str) == SUCCESS)
1379 {
1380 if (optype.elems == 1)
1381 *vectype = optype.el[0];
1382 else
1383 {
1384 first_error (_("only one type should be specified for operand"));
1385 return FAIL;
1386 }
1387 }
1388 else
1389 {
1390 first_error (_("vector type expected"));
1391 return FAIL;
1392 }
1393 }
1394 else
1395 return FAIL;
1396
1397 *ccp = str;
1398
1399 return SUCCESS;
1400 }
1401
1402 /* Special meanings for indices (which have a range of 0-7), which will fit into
1403 a 4-bit integer. */
1404
1405 #define NEON_ALL_LANES 15
1406 #define NEON_INTERLEAVE_LANES 14
1407
1408 /* Parse either a register or a scalar, with an optional type. Return the
1409 register number, and optionally fill in the actual type of the register
1410 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1411 type/index information in *TYPEINFO. */
1412
1413 static int
1414 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1415 enum arm_reg_type *rtype,
1416 struct neon_typed_alias *typeinfo)
1417 {
1418 char *str = *ccp;
1419 struct reg_entry *reg = arm_reg_parse_multi (&str);
1420 struct neon_typed_alias atype;
1421 struct neon_type_el parsetype;
1422
1423 atype.defined = 0;
1424 atype.index = -1;
1425 atype.eltype.type = NT_invtype;
1426 atype.eltype.size = -1;
1427
1428 /* Try alternate syntax for some types of register. Note these are mutually
1429 exclusive with the Neon syntax extensions. */
1430 if (reg == NULL)
1431 {
1432 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1433 if (altreg != FAIL)
1434 *ccp = str;
1435 if (typeinfo)
1436 *typeinfo = atype;
1437 return altreg;
1438 }
1439
1440 /* Undo polymorphism when a set of register types may be accepted. */
1441 if ((type == REG_TYPE_NDQ
1442 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1443 || (type == REG_TYPE_VFSD
1444 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1445 || (type == REG_TYPE_NSDQ
1446 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1447 || reg->type == REG_TYPE_NQ))
1448 || (type == REG_TYPE_MMXWC
1449 && (reg->type == REG_TYPE_MMXWCG)))
1450 type = (enum arm_reg_type) reg->type;
1451
1452 if (type != reg->type)
1453 return FAIL;
1454
1455 if (reg->neon)
1456 atype = *reg->neon;
1457
1458 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1459 {
1460 if ((atype.defined & NTA_HASTYPE) != 0)
1461 {
1462 first_error (_("can't redefine type for operand"));
1463 return FAIL;
1464 }
1465 atype.defined |= NTA_HASTYPE;
1466 atype.eltype = parsetype;
1467 }
1468
1469 if (skip_past_char (&str, '[') == SUCCESS)
1470 {
1471 if (type != REG_TYPE_VFD)
1472 {
1473 first_error (_("only D registers may be indexed"));
1474 return FAIL;
1475 }
1476
1477 if ((atype.defined & NTA_HASINDEX) != 0)
1478 {
1479 first_error (_("can't change index for operand"));
1480 return FAIL;
1481 }
1482
1483 atype.defined |= NTA_HASINDEX;
1484
1485 if (skip_past_char (&str, ']') == SUCCESS)
1486 atype.index = NEON_ALL_LANES;
1487 else
1488 {
1489 expressionS exp;
1490
1491 my_get_expression (&exp, &str, GE_NO_PREFIX);
1492
1493 if (exp.X_op != O_constant)
1494 {
1495 first_error (_("constant expression required"));
1496 return FAIL;
1497 }
1498
1499 if (skip_past_char (&str, ']') == FAIL)
1500 return FAIL;
1501
1502 atype.index = exp.X_add_number;
1503 }
1504 }
1505
1506 if (typeinfo)
1507 *typeinfo = atype;
1508
1509 if (rtype)
1510 *rtype = type;
1511
1512 *ccp = str;
1513
1514 return reg->number;
1515 }
1516
1517 /* Like arm_reg_parse, but allow allow the following extra features:
1518 - If RTYPE is non-zero, return the (possibly restricted) type of the
1519 register (e.g. Neon double or quad reg when either has been requested).
1520 - If this is a Neon vector type with additional type information, fill
1521 in the struct pointed to by VECTYPE (if non-NULL).
1522 This function will fault on encountering a scalar. */
1523
1524 static int
1525 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1526 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1527 {
1528 struct neon_typed_alias atype;
1529 char *str = *ccp;
1530 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1531
1532 if (reg == FAIL)
1533 return FAIL;
1534
1535 /* Do not allow regname(... to parse as a register. */
1536 if (*str == '(')
1537 return FAIL;
1538
1539 /* Do not allow a scalar (reg+index) to parse as a register. */
1540 if ((atype.defined & NTA_HASINDEX) != 0)
1541 {
1542 first_error (_("register operand expected, but got scalar"));
1543 return FAIL;
1544 }
1545
1546 if (vectype)
1547 *vectype = atype.eltype;
1548
1549 *ccp = str;
1550
1551 return reg;
1552 }
1553
1554 #define NEON_SCALAR_REG(X) ((X) >> 4)
1555 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1556
1557 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1558 have enough information to be able to do a good job bounds-checking. So, we
1559 just do easy checks here, and do further checks later. */
1560
1561 static int
1562 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1563 {
1564 int reg;
1565 char *str = *ccp;
1566 struct neon_typed_alias atype;
1567
1568 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1569
1570 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1571 return FAIL;
1572
1573 if (atype.index == NEON_ALL_LANES)
1574 {
1575 first_error (_("scalar must have an index"));
1576 return FAIL;
1577 }
1578 else if (atype.index >= 64 / elsize)
1579 {
1580 first_error (_("scalar index out of range"));
1581 return FAIL;
1582 }
1583
1584 if (type)
1585 *type = atype.eltype;
1586
1587 *ccp = str;
1588
1589 return reg * 16 + atype.index;
1590 }
1591
1592 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1593
1594 static long
1595 parse_reg_list (char ** strp)
1596 {
1597 char * str = * strp;
1598 long range = 0;
1599 int another_range;
1600
1601 /* We come back here if we get ranges concatenated by '+' or '|'. */
1602 do
1603 {
1604 skip_whitespace (str);
1605
1606 another_range = 0;
1607
1608 if (*str == '{')
1609 {
1610 int in_range = 0;
1611 int cur_reg = -1;
1612
1613 str++;
1614 do
1615 {
1616 int reg;
1617
1618 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1619 {
1620 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1621 return FAIL;
1622 }
1623
1624 if (in_range)
1625 {
1626 int i;
1627
1628 if (reg <= cur_reg)
1629 {
1630 first_error (_("bad range in register list"));
1631 return FAIL;
1632 }
1633
1634 for (i = cur_reg + 1; i < reg; i++)
1635 {
1636 if (range & (1 << i))
1637 as_tsktsk
1638 (_("Warning: duplicated register (r%d) in register list"),
1639 i);
1640 else
1641 range |= 1 << i;
1642 }
1643 in_range = 0;
1644 }
1645
1646 if (range & (1 << reg))
1647 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1648 reg);
1649 else if (reg <= cur_reg)
1650 as_tsktsk (_("Warning: register range not in ascending order"));
1651
1652 range |= 1 << reg;
1653 cur_reg = reg;
1654 }
1655 while (skip_past_comma (&str) != FAIL
1656 || (in_range = 1, *str++ == '-'));
1657 str--;
1658
1659 if (skip_past_char (&str, '}') == FAIL)
1660 {
1661 first_error (_("missing `}'"));
1662 return FAIL;
1663 }
1664 }
1665 else
1666 {
1667 expressionS exp;
1668
1669 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1670 return FAIL;
1671
1672 if (exp.X_op == O_constant)
1673 {
1674 if (exp.X_add_number
1675 != (exp.X_add_number & 0x0000ffff))
1676 {
1677 inst.error = _("invalid register mask");
1678 return FAIL;
1679 }
1680
1681 if ((range & exp.X_add_number) != 0)
1682 {
1683 int regno = range & exp.X_add_number;
1684
1685 regno &= -regno;
1686 regno = (1 << regno) - 1;
1687 as_tsktsk
1688 (_("Warning: duplicated register (r%d) in register list"),
1689 regno);
1690 }
1691
1692 range |= exp.X_add_number;
1693 }
1694 else
1695 {
1696 if (inst.reloc.type != 0)
1697 {
1698 inst.error = _("expression too complex");
1699 return FAIL;
1700 }
1701
1702 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1703 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1704 inst.reloc.pc_rel = 0;
1705 }
1706 }
1707
1708 if (*str == '|' || *str == '+')
1709 {
1710 str++;
1711 another_range = 1;
1712 }
1713 }
1714 while (another_range);
1715
1716 *strp = str;
1717 return range;
1718 }
1719
1720 /* Types of registers in a list. */
1721
1722 enum reg_list_els
1723 {
1724 REGLIST_VFP_S,
1725 REGLIST_VFP_D,
1726 REGLIST_NEON_D
1727 };
1728
1729 /* Parse a VFP register list. If the string is invalid return FAIL.
1730 Otherwise return the number of registers, and set PBASE to the first
1731 register. Parses registers of type ETYPE.
1732 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1733 - Q registers can be used to specify pairs of D registers
1734 - { } can be omitted from around a singleton register list
1735 FIXME: This is not implemented, as it would require backtracking in
1736 some cases, e.g.:
1737 vtbl.8 d3,d4,d5
1738 This could be done (the meaning isn't really ambiguous), but doesn't
1739 fit in well with the current parsing framework.
1740 - 32 D registers may be used (also true for VFPv3).
1741 FIXME: Types are ignored in these register lists, which is probably a
1742 bug. */
1743
1744 static int
1745 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1746 {
1747 char *str = *ccp;
1748 int base_reg;
1749 int new_base;
1750 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1751 int max_regs = 0;
1752 int count = 0;
1753 int warned = 0;
1754 unsigned long mask = 0;
1755 int i;
1756
1757 if (skip_past_char (&str, '{') == FAIL)
1758 {
1759 inst.error = _("expecting {");
1760 return FAIL;
1761 }
1762
1763 switch (etype)
1764 {
1765 case REGLIST_VFP_S:
1766 regtype = REG_TYPE_VFS;
1767 max_regs = 32;
1768 break;
1769
1770 case REGLIST_VFP_D:
1771 regtype = REG_TYPE_VFD;
1772 break;
1773
1774 case REGLIST_NEON_D:
1775 regtype = REG_TYPE_NDQ;
1776 break;
1777 }
1778
1779 if (etype != REGLIST_VFP_S)
1780 {
1781 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1782 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1783 {
1784 max_regs = 32;
1785 if (thumb_mode)
1786 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1787 fpu_vfp_ext_d32);
1788 else
1789 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1790 fpu_vfp_ext_d32);
1791 }
1792 else
1793 max_regs = 16;
1794 }
1795
1796 base_reg = max_regs;
1797
1798 do
1799 {
1800 int setmask = 1, addregs = 1;
1801
1802 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1803
1804 if (new_base == FAIL)
1805 {
1806 first_error (_(reg_expected_msgs[regtype]));
1807 return FAIL;
1808 }
1809
1810 if (new_base >= max_regs)
1811 {
1812 first_error (_("register out of range in list"));
1813 return FAIL;
1814 }
1815
1816 /* Note: a value of 2 * n is returned for the register Q<n>. */
1817 if (regtype == REG_TYPE_NQ)
1818 {
1819 setmask = 3;
1820 addregs = 2;
1821 }
1822
1823 if (new_base < base_reg)
1824 base_reg = new_base;
1825
1826 if (mask & (setmask << new_base))
1827 {
1828 first_error (_("invalid register list"));
1829 return FAIL;
1830 }
1831
1832 if ((mask >> new_base) != 0 && ! warned)
1833 {
1834 as_tsktsk (_("register list not in ascending order"));
1835 warned = 1;
1836 }
1837
1838 mask |= setmask << new_base;
1839 count += addregs;
1840
1841 if (*str == '-') /* We have the start of a range expression */
1842 {
1843 int high_range;
1844
1845 str++;
1846
1847 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1848 == FAIL)
1849 {
1850 inst.error = gettext (reg_expected_msgs[regtype]);
1851 return FAIL;
1852 }
1853
1854 if (high_range >= max_regs)
1855 {
1856 first_error (_("register out of range in list"));
1857 return FAIL;
1858 }
1859
1860 if (regtype == REG_TYPE_NQ)
1861 high_range = high_range + 1;
1862
1863 if (high_range <= new_base)
1864 {
1865 inst.error = _("register range not in ascending order");
1866 return FAIL;
1867 }
1868
1869 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1870 {
1871 if (mask & (setmask << new_base))
1872 {
1873 inst.error = _("invalid register list");
1874 return FAIL;
1875 }
1876
1877 mask |= setmask << new_base;
1878 count += addregs;
1879 }
1880 }
1881 }
1882 while (skip_past_comma (&str) != FAIL);
1883
1884 str++;
1885
1886 /* Sanity check -- should have raised a parse error above. */
1887 if (count == 0 || count > max_regs)
1888 abort ();
1889
1890 *pbase = base_reg;
1891
1892 /* Final test -- the registers must be consecutive. */
1893 mask >>= base_reg;
1894 for (i = 0; i < count; i++)
1895 {
1896 if ((mask & (1u << i)) == 0)
1897 {
1898 inst.error = _("non-contiguous register range");
1899 return FAIL;
1900 }
1901 }
1902
1903 *ccp = str;
1904
1905 return count;
1906 }
1907
1908 /* True if two alias types are the same. */
1909
1910 static bfd_boolean
1911 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1912 {
1913 if (!a && !b)
1914 return TRUE;
1915
1916 if (!a || !b)
1917 return FALSE;
1918
1919 if (a->defined != b->defined)
1920 return FALSE;
1921
1922 if ((a->defined & NTA_HASTYPE) != 0
1923 && (a->eltype.type != b->eltype.type
1924 || a->eltype.size != b->eltype.size))
1925 return FALSE;
1926
1927 if ((a->defined & NTA_HASINDEX) != 0
1928 && (a->index != b->index))
1929 return FALSE;
1930
1931 return TRUE;
1932 }
1933
1934 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1935 The base register is put in *PBASE.
1936 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1937 the return value.
1938 The register stride (minus one) is put in bit 4 of the return value.
1939 Bits [6:5] encode the list length (minus one).
1940 The type of the list elements is put in *ELTYPE, if non-NULL. */
1941
1942 #define NEON_LANE(X) ((X) & 0xf)
1943 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1944 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1945
1946 static int
1947 parse_neon_el_struct_list (char **str, unsigned *pbase,
1948 struct neon_type_el *eltype)
1949 {
1950 char *ptr = *str;
1951 int base_reg = -1;
1952 int reg_incr = -1;
1953 int count = 0;
1954 int lane = -1;
1955 int leading_brace = 0;
1956 enum arm_reg_type rtype = REG_TYPE_NDQ;
1957 const char *const incr_error = _("register stride must be 1 or 2");
1958 const char *const type_error = _("mismatched element/structure types in list");
1959 struct neon_typed_alias firsttype;
1960
1961 if (skip_past_char (&ptr, '{') == SUCCESS)
1962 leading_brace = 1;
1963
1964 do
1965 {
1966 struct neon_typed_alias atype;
1967 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1968
1969 if (getreg == FAIL)
1970 {
1971 first_error (_(reg_expected_msgs[rtype]));
1972 return FAIL;
1973 }
1974
1975 if (base_reg == -1)
1976 {
1977 base_reg = getreg;
1978 if (rtype == REG_TYPE_NQ)
1979 {
1980 reg_incr = 1;
1981 }
1982 firsttype = atype;
1983 }
1984 else if (reg_incr == -1)
1985 {
1986 reg_incr = getreg - base_reg;
1987 if (reg_incr < 1 || reg_incr > 2)
1988 {
1989 first_error (_(incr_error));
1990 return FAIL;
1991 }
1992 }
1993 else if (getreg != base_reg + reg_incr * count)
1994 {
1995 first_error (_(incr_error));
1996 return FAIL;
1997 }
1998
1999 if (! neon_alias_types_same (&atype, &firsttype))
2000 {
2001 first_error (_(type_error));
2002 return FAIL;
2003 }
2004
2005 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2006 modes. */
2007 if (ptr[0] == '-')
2008 {
2009 struct neon_typed_alias htype;
2010 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2011 if (lane == -1)
2012 lane = NEON_INTERLEAVE_LANES;
2013 else if (lane != NEON_INTERLEAVE_LANES)
2014 {
2015 first_error (_(type_error));
2016 return FAIL;
2017 }
2018 if (reg_incr == -1)
2019 reg_incr = 1;
2020 else if (reg_incr != 1)
2021 {
2022 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2023 return FAIL;
2024 }
2025 ptr++;
2026 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2027 if (hireg == FAIL)
2028 {
2029 first_error (_(reg_expected_msgs[rtype]));
2030 return FAIL;
2031 }
2032 if (! neon_alias_types_same (&htype, &firsttype))
2033 {
2034 first_error (_(type_error));
2035 return FAIL;
2036 }
2037 count += hireg + dregs - getreg;
2038 continue;
2039 }
2040
2041 /* If we're using Q registers, we can't use [] or [n] syntax. */
2042 if (rtype == REG_TYPE_NQ)
2043 {
2044 count += 2;
2045 continue;
2046 }
2047
2048 if ((atype.defined & NTA_HASINDEX) != 0)
2049 {
2050 if (lane == -1)
2051 lane = atype.index;
2052 else if (lane != atype.index)
2053 {
2054 first_error (_(type_error));
2055 return FAIL;
2056 }
2057 }
2058 else if (lane == -1)
2059 lane = NEON_INTERLEAVE_LANES;
2060 else if (lane != NEON_INTERLEAVE_LANES)
2061 {
2062 first_error (_(type_error));
2063 return FAIL;
2064 }
2065 count++;
2066 }
2067 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2068
2069 /* No lane set by [x]. We must be interleaving structures. */
2070 if (lane == -1)
2071 lane = NEON_INTERLEAVE_LANES;
2072
2073 /* Sanity check. */
2074 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2075 || (count > 1 && reg_incr == -1))
2076 {
2077 first_error (_("error parsing element/structure list"));
2078 return FAIL;
2079 }
2080
2081 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2082 {
2083 first_error (_("expected }"));
2084 return FAIL;
2085 }
2086
2087 if (reg_incr == -1)
2088 reg_incr = 1;
2089
2090 if (eltype)
2091 *eltype = firsttype.eltype;
2092
2093 *pbase = base_reg;
2094 *str = ptr;
2095
2096 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2097 }
2098
2099 /* Parse an explicit relocation suffix on an expression. This is
2100 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2101 arm_reloc_hsh contains no entries, so this function can only
2102 succeed if there is no () after the word. Returns -1 on error,
2103 BFD_RELOC_UNUSED if there wasn't any suffix. */
2104
2105 static int
2106 parse_reloc (char **str)
2107 {
2108 struct reloc_entry *r;
2109 char *p, *q;
2110
2111 if (**str != '(')
2112 return BFD_RELOC_UNUSED;
2113
2114 p = *str + 1;
2115 q = p;
2116
2117 while (*q && *q != ')' && *q != ',')
2118 q++;
2119 if (*q != ')')
2120 return -1;
2121
2122 if ((r = (struct reloc_entry *)
2123 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2124 return -1;
2125
2126 *str = q + 1;
2127 return r->reloc;
2128 }
2129
2130 /* Directives: register aliases. */
2131
2132 static struct reg_entry *
2133 insert_reg_alias (char *str, unsigned number, int type)
2134 {
2135 struct reg_entry *new_reg;
2136 const char *name;
2137
2138 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2139 {
2140 if (new_reg->builtin)
2141 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2142
2143 /* Only warn about a redefinition if it's not defined as the
2144 same register. */
2145 else if (new_reg->number != number || new_reg->type != type)
2146 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2147
2148 return NULL;
2149 }
2150
2151 name = xstrdup (str);
2152 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2153
2154 new_reg->name = name;
2155 new_reg->number = number;
2156 new_reg->type = type;
2157 new_reg->builtin = FALSE;
2158 new_reg->neon = NULL;
2159
2160 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2161 abort ();
2162
2163 return new_reg;
2164 }
2165
2166 static void
2167 insert_neon_reg_alias (char *str, int number, int type,
2168 struct neon_typed_alias *atype)
2169 {
2170 struct reg_entry *reg = insert_reg_alias (str, number, type);
2171
2172 if (!reg)
2173 {
2174 first_error (_("attempt to redefine typed alias"));
2175 return;
2176 }
2177
2178 if (atype)
2179 {
2180 reg->neon = (struct neon_typed_alias *)
2181 xmalloc (sizeof (struct neon_typed_alias));
2182 *reg->neon = *atype;
2183 }
2184 }
2185
2186 /* Look for the .req directive. This is of the form:
2187
2188 new_register_name .req existing_register_name
2189
2190 If we find one, or if it looks sufficiently like one that we want to
2191 handle any error here, return TRUE. Otherwise return FALSE. */
2192
2193 static bfd_boolean
2194 create_register_alias (char * newname, char *p)
2195 {
2196 struct reg_entry *old;
2197 char *oldname, *nbuf;
2198 size_t nlen;
2199
2200 /* The input scrubber ensures that whitespace after the mnemonic is
2201 collapsed to single spaces. */
2202 oldname = p;
2203 if (strncmp (oldname, " .req ", 6) != 0)
2204 return FALSE;
2205
2206 oldname += 6;
2207 if (*oldname == '\0')
2208 return FALSE;
2209
2210 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2211 if (!old)
2212 {
2213 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2214 return TRUE;
2215 }
2216
2217 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2218 the desired alias name, and p points to its end. If not, then
2219 the desired alias name is in the global original_case_string. */
2220 #ifdef TC_CASE_SENSITIVE
2221 nlen = p - newname;
2222 #else
2223 newname = original_case_string;
2224 nlen = strlen (newname);
2225 #endif
2226
2227 nbuf = (char *) alloca (nlen + 1);
2228 memcpy (nbuf, newname, nlen);
2229 nbuf[nlen] = '\0';
2230
2231 /* Create aliases under the new name as stated; an all-lowercase
2232 version of the new name; and an all-uppercase version of the new
2233 name. */
2234 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2235 {
2236 for (p = nbuf; *p; p++)
2237 *p = TOUPPER (*p);
2238
2239 if (strncmp (nbuf, newname, nlen))
2240 {
2241 /* If this attempt to create an additional alias fails, do not bother
2242 trying to create the all-lower case alias. We will fail and issue
2243 a second, duplicate error message. This situation arises when the
2244 programmer does something like:
2245 foo .req r0
2246 Foo .req r1
2247 The second .req creates the "Foo" alias but then fails to create
2248 the artificial FOO alias because it has already been created by the
2249 first .req. */
2250 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2251 return TRUE;
2252 }
2253
2254 for (p = nbuf; *p; p++)
2255 *p = TOLOWER (*p);
2256
2257 if (strncmp (nbuf, newname, nlen))
2258 insert_reg_alias (nbuf, old->number, old->type);
2259 }
2260
2261 return TRUE;
2262 }
2263
2264 /* Create a Neon typed/indexed register alias using directives, e.g.:
2265 X .dn d5.s32[1]
2266 Y .qn 6.s16
2267 Z .dn d7
2268 T .dn Z[0]
2269 These typed registers can be used instead of the types specified after the
2270 Neon mnemonic, so long as all operands given have types. Types can also be
2271 specified directly, e.g.:
2272 vadd d0.s32, d1.s32, d2.s32 */
2273
2274 static bfd_boolean
2275 create_neon_reg_alias (char *newname, char *p)
2276 {
2277 enum arm_reg_type basetype;
2278 struct reg_entry *basereg;
2279 struct reg_entry mybasereg;
2280 struct neon_type ntype;
2281 struct neon_typed_alias typeinfo;
2282 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2283 int namelen;
2284
2285 typeinfo.defined = 0;
2286 typeinfo.eltype.type = NT_invtype;
2287 typeinfo.eltype.size = -1;
2288 typeinfo.index = -1;
2289
2290 nameend = p;
2291
2292 if (strncmp (p, " .dn ", 5) == 0)
2293 basetype = REG_TYPE_VFD;
2294 else if (strncmp (p, " .qn ", 5) == 0)
2295 basetype = REG_TYPE_NQ;
2296 else
2297 return FALSE;
2298
2299 p += 5;
2300
2301 if (*p == '\0')
2302 return FALSE;
2303
2304 basereg = arm_reg_parse_multi (&p);
2305
2306 if (basereg && basereg->type != basetype)
2307 {
2308 as_bad (_("bad type for register"));
2309 return FALSE;
2310 }
2311
2312 if (basereg == NULL)
2313 {
2314 expressionS exp;
2315 /* Try parsing as an integer. */
2316 my_get_expression (&exp, &p, GE_NO_PREFIX);
2317 if (exp.X_op != O_constant)
2318 {
2319 as_bad (_("expression must be constant"));
2320 return FALSE;
2321 }
2322 basereg = &mybasereg;
2323 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2324 : exp.X_add_number;
2325 basereg->neon = 0;
2326 }
2327
2328 if (basereg->neon)
2329 typeinfo = *basereg->neon;
2330
2331 if (parse_neon_type (&ntype, &p) == SUCCESS)
2332 {
2333 /* We got a type. */
2334 if (typeinfo.defined & NTA_HASTYPE)
2335 {
2336 as_bad (_("can't redefine the type of a register alias"));
2337 return FALSE;
2338 }
2339
2340 typeinfo.defined |= NTA_HASTYPE;
2341 if (ntype.elems != 1)
2342 {
2343 as_bad (_("you must specify a single type only"));
2344 return FALSE;
2345 }
2346 typeinfo.eltype = ntype.el[0];
2347 }
2348
2349 if (skip_past_char (&p, '[') == SUCCESS)
2350 {
2351 expressionS exp;
2352 /* We got a scalar index. */
2353
2354 if (typeinfo.defined & NTA_HASINDEX)
2355 {
2356 as_bad (_("can't redefine the index of a scalar alias"));
2357 return FALSE;
2358 }
2359
2360 my_get_expression (&exp, &p, GE_NO_PREFIX);
2361
2362 if (exp.X_op != O_constant)
2363 {
2364 as_bad (_("scalar index must be constant"));
2365 return FALSE;
2366 }
2367
2368 typeinfo.defined |= NTA_HASINDEX;
2369 typeinfo.index = exp.X_add_number;
2370
2371 if (skip_past_char (&p, ']') == FAIL)
2372 {
2373 as_bad (_("expecting ]"));
2374 return FALSE;
2375 }
2376 }
2377
2378 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2379 the desired alias name, and p points to its end. If not, then
2380 the desired alias name is in the global original_case_string. */
2381 #ifdef TC_CASE_SENSITIVE
2382 namelen = nameend - newname;
2383 #else
2384 newname = original_case_string;
2385 namelen = strlen (newname);
2386 #endif
2387
2388 namebuf = (char *) alloca (namelen + 1);
2389 strncpy (namebuf, newname, namelen);
2390 namebuf[namelen] = '\0';
2391
2392 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2393 typeinfo.defined != 0 ? &typeinfo : NULL);
2394
2395 /* Insert name in all uppercase. */
2396 for (p = namebuf; *p; p++)
2397 *p = TOUPPER (*p);
2398
2399 if (strncmp (namebuf, newname, namelen))
2400 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2401 typeinfo.defined != 0 ? &typeinfo : NULL);
2402
2403 /* Insert name in all lowercase. */
2404 for (p = namebuf; *p; p++)
2405 *p = TOLOWER (*p);
2406
2407 if (strncmp (namebuf, newname, namelen))
2408 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2409 typeinfo.defined != 0 ? &typeinfo : NULL);
2410
2411 return TRUE;
2412 }
2413
2414 /* Should never be called, as .req goes between the alias and the
2415 register name, not at the beginning of the line. */
2416
2417 static void
2418 s_req (int a ATTRIBUTE_UNUSED)
2419 {
2420 as_bad (_("invalid syntax for .req directive"));
2421 }
2422
2423 static void
2424 s_dn (int a ATTRIBUTE_UNUSED)
2425 {
2426 as_bad (_("invalid syntax for .dn directive"));
2427 }
2428
2429 static void
2430 s_qn (int a ATTRIBUTE_UNUSED)
2431 {
2432 as_bad (_("invalid syntax for .qn directive"));
2433 }
2434
2435 /* The .unreq directive deletes an alias which was previously defined
2436 by .req. For example:
2437
2438 my_alias .req r11
2439 .unreq my_alias */
2440
2441 static void
2442 s_unreq (int a ATTRIBUTE_UNUSED)
2443 {
2444 char * name;
2445 char saved_char;
2446
2447 name = input_line_pointer;
2448
2449 while (*input_line_pointer != 0
2450 && *input_line_pointer != ' '
2451 && *input_line_pointer != '\n')
2452 ++input_line_pointer;
2453
2454 saved_char = *input_line_pointer;
2455 *input_line_pointer = 0;
2456
2457 if (!*name)
2458 as_bad (_("invalid syntax for .unreq directive"));
2459 else
2460 {
2461 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2462 name);
2463
2464 if (!reg)
2465 as_bad (_("unknown register alias '%s'"), name);
2466 else if (reg->builtin)
2467 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2468 name);
2469 else
2470 {
2471 char * p;
2472 char * nbuf;
2473
2474 hash_delete (arm_reg_hsh, name, FALSE);
2475 free ((char *) reg->name);
2476 if (reg->neon)
2477 free (reg->neon);
2478 free (reg);
2479
2480 /* Also locate the all upper case and all lower case versions.
2481 Do not complain if we cannot find one or the other as it
2482 was probably deleted above. */
2483
2484 nbuf = strdup (name);
2485 for (p = nbuf; *p; p++)
2486 *p = TOUPPER (*p);
2487 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2488 if (reg)
2489 {
2490 hash_delete (arm_reg_hsh, nbuf, FALSE);
2491 free ((char *) reg->name);
2492 if (reg->neon)
2493 free (reg->neon);
2494 free (reg);
2495 }
2496
2497 for (p = nbuf; *p; p++)
2498 *p = TOLOWER (*p);
2499 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2500 if (reg)
2501 {
2502 hash_delete (arm_reg_hsh, nbuf, FALSE);
2503 free ((char *) reg->name);
2504 if (reg->neon)
2505 free (reg->neon);
2506 free (reg);
2507 }
2508
2509 free (nbuf);
2510 }
2511 }
2512
2513 *input_line_pointer = saved_char;
2514 demand_empty_rest_of_line ();
2515 }
2516
2517 /* Directives: Instruction set selection. */
2518
2519 #ifdef OBJ_ELF
2520 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2521 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2522 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2523 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2524
2525 /* Create a new mapping symbol for the transition to STATE. */
2526
2527 static void
2528 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2529 {
2530 symbolS * symbolP;
2531 const char * symname;
2532 int type;
2533
2534 switch (state)
2535 {
2536 case MAP_DATA:
2537 symname = "$d";
2538 type = BSF_NO_FLAGS;
2539 break;
2540 case MAP_ARM:
2541 symname = "$a";
2542 type = BSF_NO_FLAGS;
2543 break;
2544 case MAP_THUMB:
2545 symname = "$t";
2546 type = BSF_NO_FLAGS;
2547 break;
2548 default:
2549 abort ();
2550 }
2551
2552 symbolP = symbol_new (symname, now_seg, value, frag);
2553 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2554
2555 switch (state)
2556 {
2557 case MAP_ARM:
2558 THUMB_SET_FUNC (symbolP, 0);
2559 ARM_SET_THUMB (symbolP, 0);
2560 ARM_SET_INTERWORK (symbolP, support_interwork);
2561 break;
2562
2563 case MAP_THUMB:
2564 THUMB_SET_FUNC (symbolP, 1);
2565 ARM_SET_THUMB (symbolP, 1);
2566 ARM_SET_INTERWORK (symbolP, support_interwork);
2567 break;
2568
2569 case MAP_DATA:
2570 default:
2571 break;
2572 }
2573
2574 /* Save the mapping symbols for future reference. Also check that
2575 we do not place two mapping symbols at the same offset within a
2576 frag. We'll handle overlap between frags in
2577 check_mapping_symbols.
2578
2579 If .fill or other data filling directive generates zero sized data,
2580 the mapping symbol for the following code will have the same value
2581 as the one generated for the data filling directive. In this case,
2582 we replace the old symbol with the new one at the same address. */
2583 if (value == 0)
2584 {
2585 if (frag->tc_frag_data.first_map != NULL)
2586 {
2587 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2588 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2589 }
2590 frag->tc_frag_data.first_map = symbolP;
2591 }
2592 if (frag->tc_frag_data.last_map != NULL)
2593 {
2594 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2595 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2596 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2597 }
2598 frag->tc_frag_data.last_map = symbolP;
2599 }
2600
2601 /* We must sometimes convert a region marked as code to data during
2602 code alignment, if an odd number of bytes have to be padded. The
2603 code mapping symbol is pushed to an aligned address. */
2604
2605 static void
2606 insert_data_mapping_symbol (enum mstate state,
2607 valueT value, fragS *frag, offsetT bytes)
2608 {
2609 /* If there was already a mapping symbol, remove it. */
2610 if (frag->tc_frag_data.last_map != NULL
2611 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2612 {
2613 symbolS *symp = frag->tc_frag_data.last_map;
2614
2615 if (value == 0)
2616 {
2617 know (frag->tc_frag_data.first_map == symp);
2618 frag->tc_frag_data.first_map = NULL;
2619 }
2620 frag->tc_frag_data.last_map = NULL;
2621 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2622 }
2623
2624 make_mapping_symbol (MAP_DATA, value, frag);
2625 make_mapping_symbol (state, value + bytes, frag);
2626 }
2627
2628 static void mapping_state_2 (enum mstate state, int max_chars);
2629
2630 /* Set the mapping state to STATE. Only call this when about to
2631 emit some STATE bytes to the file. */
2632
2633 void
2634 mapping_state (enum mstate state)
2635 {
2636 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2637
2638 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2639
2640 if (mapstate == state)
2641 /* The mapping symbol has already been emitted.
2642 There is nothing else to do. */
2643 return;
2644
2645 if (state == MAP_ARM || state == MAP_THUMB)
2646 /* PR gas/12931
2647 All ARM instructions require 4-byte alignment.
2648 (Almost) all Thumb instructions require 2-byte alignment.
2649
2650 When emitting instructions into any section, mark the section
2651 appropriately.
2652
2653 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2654 but themselves require 2-byte alignment; this applies to some
2655 PC- relative forms. However, these cases will invovle implicit
2656 literal pool generation or an explicit .align >=2, both of
2657 which will cause the section to me marked with sufficient
2658 alignment. Thus, we don't handle those cases here. */
2659 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2660
2661 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2662 /* This case will be evaluated later in the next else. */
2663 return;
2664 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2665 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2666 {
2667 /* Only add the symbol if the offset is > 0:
2668 if we're at the first frag, check it's size > 0;
2669 if we're not at the first frag, then for sure
2670 the offset is > 0. */
2671 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2672 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2673
2674 if (add_symbol)
2675 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2676 }
2677
2678 mapping_state_2 (state, 0);
2679 #undef TRANSITION
2680 }
2681
2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
2683 allocated. Put the mapping symbol that far back. */
2684
2685 static void
2686 mapping_state_2 (enum mstate state, int max_chars)
2687 {
2688 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2689
2690 if (!SEG_NORMAL (now_seg))
2691 return;
2692
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2696 return;
2697
2698 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2699 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2700 }
2701 #else
2702 #define mapping_state(x) ((void)0)
2703 #define mapping_state_2(x, y) ((void)0)
2704 #endif
2705
2706 /* Find the real, Thumb encoded start of a Thumb function. */
2707
2708 #ifdef OBJ_COFF
2709 static symbolS *
2710 find_real_start (symbolS * symbolP)
2711 {
2712 char * real_start;
2713 const char * name = S_GET_NAME (symbolP);
2714 symbolS * new_target;
2715
2716 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2717 #define STUB_NAME ".real_start_of"
2718
2719 if (name == NULL)
2720 abort ();
2721
2722 /* The compiler may generate BL instructions to local labels because
2723 it needs to perform a branch to a far away location. These labels
2724 do not have a corresponding ".real_start_of" label. We check
2725 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2726 the ".real_start_of" convention for nonlocal branches. */
2727 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2728 return symbolP;
2729
2730 real_start = ACONCAT ((STUB_NAME, name, NULL));
2731 new_target = symbol_find (real_start);
2732
2733 if (new_target == NULL)
2734 {
2735 as_warn (_("Failed to find real start of function: %s\n"), name);
2736 new_target = symbolP;
2737 }
2738
2739 return new_target;
2740 }
2741 #endif
2742
2743 static void
2744 opcode_select (int width)
2745 {
2746 switch (width)
2747 {
2748 case 16:
2749 if (! thumb_mode)
2750 {
2751 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2752 as_bad (_("selected processor does not support THUMB opcodes"));
2753
2754 thumb_mode = 1;
2755 /* No need to force the alignment, since we will have been
2756 coming from ARM mode, which is word-aligned. */
2757 record_alignment (now_seg, 1);
2758 }
2759 break;
2760
2761 case 32:
2762 if (thumb_mode)
2763 {
2764 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2765 as_bad (_("selected processor does not support ARM opcodes"));
2766
2767 thumb_mode = 0;
2768
2769 if (!need_pass_2)
2770 frag_align (2, 0, 0);
2771
2772 record_alignment (now_seg, 1);
2773 }
2774 break;
2775
2776 default:
2777 as_bad (_("invalid instruction size selected (%d)"), width);
2778 }
2779 }
2780
2781 static void
2782 s_arm (int ignore ATTRIBUTE_UNUSED)
2783 {
2784 opcode_select (32);
2785 demand_empty_rest_of_line ();
2786 }
2787
2788 static void
2789 s_thumb (int ignore ATTRIBUTE_UNUSED)
2790 {
2791 opcode_select (16);
2792 demand_empty_rest_of_line ();
2793 }
2794
2795 static void
2796 s_code (int unused ATTRIBUTE_UNUSED)
2797 {
2798 int temp;
2799
2800 temp = get_absolute_expression ();
2801 switch (temp)
2802 {
2803 case 16:
2804 case 32:
2805 opcode_select (temp);
2806 break;
2807
2808 default:
2809 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2810 }
2811 }
2812
2813 static void
2814 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2815 {
2816 /* If we are not already in thumb mode go into it, EVEN if
2817 the target processor does not support thumb instructions.
2818 This is used by gcc/config/arm/lib1funcs.asm for example
2819 to compile interworking support functions even if the
2820 target processor should not support interworking. */
2821 if (! thumb_mode)
2822 {
2823 thumb_mode = 2;
2824 record_alignment (now_seg, 1);
2825 }
2826
2827 demand_empty_rest_of_line ();
2828 }
2829
2830 static void
2831 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2832 {
2833 s_thumb (0);
2834
2835 /* The following label is the name/address of the start of a Thumb function.
2836 We need to know this for the interworking support. */
2837 label_is_thumb_function_name = TRUE;
2838 }
2839
2840 /* Perform a .set directive, but also mark the alias as
2841 being a thumb function. */
2842
2843 static void
2844 s_thumb_set (int equiv)
2845 {
2846 /* XXX the following is a duplicate of the code for s_set() in read.c
2847 We cannot just call that code as we need to get at the symbol that
2848 is created. */
2849 char * name;
2850 char delim;
2851 char * end_name;
2852 symbolS * symbolP;
2853
2854 /* Especial apologies for the random logic:
2855 This just grew, and could be parsed much more simply!
2856 Dean - in haste. */
2857 name = input_line_pointer;
2858 delim = get_symbol_end ();
2859 end_name = input_line_pointer;
2860 *end_name = delim;
2861
2862 if (*input_line_pointer != ',')
2863 {
2864 *end_name = 0;
2865 as_bad (_("expected comma after name \"%s\""), name);
2866 *end_name = delim;
2867 ignore_rest_of_line ();
2868 return;
2869 }
2870
2871 input_line_pointer++;
2872 *end_name = 0;
2873
2874 if (name[0] == '.' && name[1] == '\0')
2875 {
2876 /* XXX - this should not happen to .thumb_set. */
2877 abort ();
2878 }
2879
2880 if ((symbolP = symbol_find (name)) == NULL
2881 && (symbolP = md_undefined_symbol (name)) == NULL)
2882 {
2883 #ifndef NO_LISTING
2884 /* When doing symbol listings, play games with dummy fragments living
2885 outside the normal fragment chain to record the file and line info
2886 for this symbol. */
2887 if (listing & LISTING_SYMBOLS)
2888 {
2889 extern struct list_info_struct * listing_tail;
2890 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2891
2892 memset (dummy_frag, 0, sizeof (fragS));
2893 dummy_frag->fr_type = rs_fill;
2894 dummy_frag->line = listing_tail;
2895 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2896 dummy_frag->fr_symbol = symbolP;
2897 }
2898 else
2899 #endif
2900 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2901
2902 #ifdef OBJ_COFF
2903 /* "set" symbols are local unless otherwise specified. */
2904 SF_SET_LOCAL (symbolP);
2905 #endif /* OBJ_COFF */
2906 } /* Make a new symbol. */
2907
2908 symbol_table_insert (symbolP);
2909
2910 * end_name = delim;
2911
2912 if (equiv
2913 && S_IS_DEFINED (symbolP)
2914 && S_GET_SEGMENT (symbolP) != reg_section)
2915 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2916
2917 pseudo_set (symbolP);
2918
2919 demand_empty_rest_of_line ();
2920
2921 /* XXX Now we come to the Thumb specific bit of code. */
2922
2923 THUMB_SET_FUNC (symbolP, 1);
2924 ARM_SET_THUMB (symbolP, 1);
2925 #if defined OBJ_ELF || defined OBJ_COFF
2926 ARM_SET_INTERWORK (symbolP, support_interwork);
2927 #endif
2928 }
2929
2930 /* Directives: Mode selection. */
2931
2932 /* .syntax [unified|divided] - choose the new unified syntax
2933 (same for Arm and Thumb encoding, modulo slight differences in what
2934 can be represented) or the old divergent syntax for each mode. */
2935 static void
2936 s_syntax (int unused ATTRIBUTE_UNUSED)
2937 {
2938 char *name, delim;
2939
2940 name = input_line_pointer;
2941 delim = get_symbol_end ();
2942
2943 if (!strcasecmp (name, "unified"))
2944 unified_syntax = TRUE;
2945 else if (!strcasecmp (name, "divided"))
2946 unified_syntax = FALSE;
2947 else
2948 {
2949 as_bad (_("unrecognized syntax mode \"%s\""), name);
2950 return;
2951 }
2952 *input_line_pointer = delim;
2953 demand_empty_rest_of_line ();
2954 }
2955
2956 /* Directives: sectioning and alignment. */
2957
2958 /* Same as s_align_ptwo but align 0 => align 2. */
2959
2960 static void
2961 s_align (int unused ATTRIBUTE_UNUSED)
2962 {
2963 int temp;
2964 bfd_boolean fill_p;
2965 long temp_fill;
2966 long max_alignment = 15;
2967
2968 temp = get_absolute_expression ();
2969 if (temp > max_alignment)
2970 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2971 else if (temp < 0)
2972 {
2973 as_bad (_("alignment negative. 0 assumed."));
2974 temp = 0;
2975 }
2976
2977 if (*input_line_pointer == ',')
2978 {
2979 input_line_pointer++;
2980 temp_fill = get_absolute_expression ();
2981 fill_p = TRUE;
2982 }
2983 else
2984 {
2985 fill_p = FALSE;
2986 temp_fill = 0;
2987 }
2988
2989 if (!temp)
2990 temp = 2;
2991
2992 /* Only make a frag if we HAVE to. */
2993 if (temp && !need_pass_2)
2994 {
2995 if (!fill_p && subseg_text_p (now_seg))
2996 frag_align_code (temp, 0);
2997 else
2998 frag_align (temp, (int) temp_fill, 0);
2999 }
3000 demand_empty_rest_of_line ();
3001
3002 record_alignment (now_seg, temp);
3003 }
3004
3005 static void
3006 s_bss (int ignore ATTRIBUTE_UNUSED)
3007 {
3008 /* We don't support putting frags in the BSS segment, we fake it by
3009 marking in_bss, then looking at s_skip for clues. */
3010 subseg_set (bss_section, 0);
3011 demand_empty_rest_of_line ();
3012
3013 #ifdef md_elf_section_change_hook
3014 md_elf_section_change_hook ();
3015 #endif
3016 }
3017
3018 static void
3019 s_even (int ignore ATTRIBUTE_UNUSED)
3020 {
3021 /* Never make frag if expect extra pass. */
3022 if (!need_pass_2)
3023 frag_align (1, 0, 0);
3024
3025 record_alignment (now_seg, 1);
3026
3027 demand_empty_rest_of_line ();
3028 }
3029
3030 /* Directives: CodeComposer Studio. */
3031
3032 /* .ref (for CodeComposer Studio syntax only). */
3033 static void
3034 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3035 {
3036 if (codecomposer_syntax)
3037 ignore_rest_of_line ();
3038 else
3039 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3040 }
3041
3042 /* If name is not NULL, then it is used for marking the beginning of a
3043 function, wherease if it is NULL then it means the function end. */
3044 static void
3045 asmfunc_debug (const char * name)
3046 {
3047 static const char * last_name = NULL;
3048
3049 if (name != NULL)
3050 {
3051 gas_assert (last_name == NULL);
3052 last_name = name;
3053
3054 if (debug_type == DEBUG_STABS)
3055 stabs_generate_asm_func (name, name);
3056 }
3057 else
3058 {
3059 gas_assert (last_name != NULL);
3060
3061 if (debug_type == DEBUG_STABS)
3062 stabs_generate_asm_endfunc (last_name, last_name);
3063
3064 last_name = NULL;
3065 }
3066 }
3067
3068 static void
3069 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3070 {
3071 if (codecomposer_syntax)
3072 {
3073 switch (asmfunc_state)
3074 {
3075 case OUTSIDE_ASMFUNC:
3076 asmfunc_state = WAITING_ASMFUNC_NAME;
3077 break;
3078
3079 case WAITING_ASMFUNC_NAME:
3080 as_bad (_(".asmfunc repeated."));
3081 break;
3082
3083 case WAITING_ENDASMFUNC:
3084 as_bad (_(".asmfunc without function."));
3085 break;
3086 }
3087 demand_empty_rest_of_line ();
3088 }
3089 else
3090 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3091 }
3092
3093 static void
3094 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3095 {
3096 if (codecomposer_syntax)
3097 {
3098 switch (asmfunc_state)
3099 {
3100 case OUTSIDE_ASMFUNC:
3101 as_bad (_(".endasmfunc without a .asmfunc."));
3102 break;
3103
3104 case WAITING_ASMFUNC_NAME:
3105 as_bad (_(".endasmfunc without function."));
3106 break;
3107
3108 case WAITING_ENDASMFUNC:
3109 asmfunc_state = OUTSIDE_ASMFUNC;
3110 asmfunc_debug (NULL);
3111 break;
3112 }
3113 demand_empty_rest_of_line ();
3114 }
3115 else
3116 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3117 }
3118
3119 static void
3120 s_ccs_def (int name)
3121 {
3122 if (codecomposer_syntax)
3123 s_globl (name);
3124 else
3125 as_bad (_(".def pseudo-op only available with -mccs flag."));
3126 }
3127
3128 /* Directives: Literal pools. */
3129
3130 static literal_pool *
3131 find_literal_pool (void)
3132 {
3133 literal_pool * pool;
3134
3135 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3136 {
3137 if (pool->section == now_seg
3138 && pool->sub_section == now_subseg)
3139 break;
3140 }
3141
3142 return pool;
3143 }
3144
3145 static literal_pool *
3146 find_or_make_literal_pool (void)
3147 {
3148 /* Next literal pool ID number. */
3149 static unsigned int latest_pool_num = 1;
3150 literal_pool * pool;
3151
3152 pool = find_literal_pool ();
3153
3154 if (pool == NULL)
3155 {
3156 /* Create a new pool. */
3157 pool = (literal_pool *) xmalloc (sizeof (* pool));
3158 if (! pool)
3159 return NULL;
3160
3161 pool->next_free_entry = 0;
3162 pool->section = now_seg;
3163 pool->sub_section = now_subseg;
3164 pool->next = list_of_pools;
3165 pool->symbol = NULL;
3166 pool->alignment = 2;
3167
3168 /* Add it to the list. */
3169 list_of_pools = pool;
3170 }
3171
3172 /* New pools, and emptied pools, will have a NULL symbol. */
3173 if (pool->symbol == NULL)
3174 {
3175 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3176 (valueT) 0, &zero_address_frag);
3177 pool->id = latest_pool_num ++;
3178 }
3179
3180 /* Done. */
3181 return pool;
3182 }
3183
3184 /* Add the literal in the global 'inst'
3185 structure to the relevant literal pool. */
3186
3187 static int
3188 add_to_lit_pool (unsigned int nbytes)
3189 {
3190 #define PADDING_SLOT 0x1
3191 #define LIT_ENTRY_SIZE_MASK 0xFF
3192 literal_pool * pool;
3193 unsigned int entry, pool_size = 0;
3194 bfd_boolean padding_slot_p = FALSE;
3195 unsigned imm1 = 0;
3196 unsigned imm2 = 0;
3197
3198 if (nbytes == 8)
3199 {
3200 imm1 = inst.operands[1].imm;
3201 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3202 : inst.reloc.exp.X_unsigned ? 0
3203 : ((int64_t) inst.operands[1].imm) >> 32);
3204 if (target_big_endian)
3205 {
3206 imm1 = imm2;
3207 imm2 = inst.operands[1].imm;
3208 }
3209 }
3210
3211 pool = find_or_make_literal_pool ();
3212
3213 /* Check if this literal value is already in the pool. */
3214 for (entry = 0; entry < pool->next_free_entry; entry ++)
3215 {
3216 if (nbytes == 4)
3217 {
3218 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3219 && (inst.reloc.exp.X_op == O_constant)
3220 && (pool->literals[entry].X_add_number
3221 == inst.reloc.exp.X_add_number)
3222 && (pool->literals[entry].X_md == nbytes)
3223 && (pool->literals[entry].X_unsigned
3224 == inst.reloc.exp.X_unsigned))
3225 break;
3226
3227 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3228 && (inst.reloc.exp.X_op == O_symbol)
3229 && (pool->literals[entry].X_add_number
3230 == inst.reloc.exp.X_add_number)
3231 && (pool->literals[entry].X_add_symbol
3232 == inst.reloc.exp.X_add_symbol)
3233 && (pool->literals[entry].X_op_symbol
3234 == inst.reloc.exp.X_op_symbol)
3235 && (pool->literals[entry].X_md == nbytes))
3236 break;
3237 }
3238 else if ((nbytes == 8)
3239 && !(pool_size & 0x7)
3240 && ((entry + 1) != pool->next_free_entry)
3241 && (pool->literals[entry].X_op == O_constant)
3242 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3243 && (pool->literals[entry].X_unsigned
3244 == inst.reloc.exp.X_unsigned)
3245 && (pool->literals[entry + 1].X_op == O_constant)
3246 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3247 && (pool->literals[entry + 1].X_unsigned
3248 == inst.reloc.exp.X_unsigned))
3249 break;
3250
3251 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3252 if (padding_slot_p && (nbytes == 4))
3253 break;
3254
3255 pool_size += 4;
3256 }
3257
3258 /* Do we need to create a new entry? */
3259 if (entry == pool->next_free_entry)
3260 {
3261 if (entry >= MAX_LITERAL_POOL_SIZE)
3262 {
3263 inst.error = _("literal pool overflow");
3264 return FAIL;
3265 }
3266
3267 if (nbytes == 8)
3268 {
3269 /* For 8-byte entries, we align to an 8-byte boundary,
3270 and split it into two 4-byte entries, because on 32-bit
3271 host, 8-byte constants are treated as big num, thus
3272 saved in "generic_bignum" which will be overwritten
3273 by later assignments.
3274
3275 We also need to make sure there is enough space for
3276 the split.
3277
3278 We also check to make sure the literal operand is a
3279 constant number. */
3280 if (!(inst.reloc.exp.X_op == O_constant
3281 || inst.reloc.exp.X_op == O_big))
3282 {
3283 inst.error = _("invalid type for literal pool");
3284 return FAIL;
3285 }
3286 else if (pool_size & 0x7)
3287 {
3288 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3289 {
3290 inst.error = _("literal pool overflow");
3291 return FAIL;
3292 }
3293
3294 pool->literals[entry] = inst.reloc.exp;
3295 pool->literals[entry].X_add_number = 0;
3296 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3297 pool->next_free_entry += 1;
3298 pool_size += 4;
3299 }
3300 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3301 {
3302 inst.error = _("literal pool overflow");
3303 return FAIL;
3304 }
3305
3306 pool->literals[entry] = inst.reloc.exp;
3307 pool->literals[entry].X_op = O_constant;
3308 pool->literals[entry].X_add_number = imm1;
3309 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3310 pool->literals[entry++].X_md = 4;
3311 pool->literals[entry] = inst.reloc.exp;
3312 pool->literals[entry].X_op = O_constant;
3313 pool->literals[entry].X_add_number = imm2;
3314 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3315 pool->literals[entry].X_md = 4;
3316 pool->alignment = 3;
3317 pool->next_free_entry += 1;
3318 }
3319 else
3320 {
3321 pool->literals[entry] = inst.reloc.exp;
3322 pool->literals[entry].X_md = 4;
3323 }
3324
3325 #ifdef OBJ_ELF
3326 /* PR ld/12974: Record the location of the first source line to reference
3327 this entry in the literal pool. If it turns out during linking that the
3328 symbol does not exist we will be able to give an accurate line number for
3329 the (first use of the) missing reference. */
3330 if (debug_type == DEBUG_DWARF2)
3331 dwarf2_where (pool->locs + entry);
3332 #endif
3333 pool->next_free_entry += 1;
3334 }
3335 else if (padding_slot_p)
3336 {
3337 pool->literals[entry] = inst.reloc.exp;
3338 pool->literals[entry].X_md = nbytes;
3339 }
3340
3341 inst.reloc.exp.X_op = O_symbol;
3342 inst.reloc.exp.X_add_number = pool_size;
3343 inst.reloc.exp.X_add_symbol = pool->symbol;
3344
3345 return SUCCESS;
3346 }
3347
3348 bfd_boolean
3349 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3350 {
3351 bfd_boolean ret = TRUE;
3352
3353 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3354 {
3355 const char *label = rest;
3356
3357 while (!is_end_of_line[(int) label[-1]])
3358 --label;
3359
3360 if (*label == '.')
3361 {
3362 as_bad (_("Invalid label '%s'"), label);
3363 ret = FALSE;
3364 }
3365
3366 asmfunc_debug (label);
3367
3368 asmfunc_state = WAITING_ENDASMFUNC;
3369 }
3370
3371 return ret;
3372 }
3373
3374 /* Can't use symbol_new here, so have to create a symbol and then at
3375 a later date assign it a value. Thats what these functions do. */
3376
3377 static void
3378 symbol_locate (symbolS * symbolP,
3379 const char * name, /* It is copied, the caller can modify. */
3380 segT segment, /* Segment identifier (SEG_<something>). */
3381 valueT valu, /* Symbol value. */
3382 fragS * frag) /* Associated fragment. */
3383 {
3384 size_t name_length;
3385 char * preserved_copy_of_name;
3386
3387 name_length = strlen (name) + 1; /* +1 for \0. */
3388 obstack_grow (&notes, name, name_length);
3389 preserved_copy_of_name = (char *) obstack_finish (&notes);
3390
3391 #ifdef tc_canonicalize_symbol_name
3392 preserved_copy_of_name =
3393 tc_canonicalize_symbol_name (preserved_copy_of_name);
3394 #endif
3395
3396 S_SET_NAME (symbolP, preserved_copy_of_name);
3397
3398 S_SET_SEGMENT (symbolP, segment);
3399 S_SET_VALUE (symbolP, valu);
3400 symbol_clear_list_pointers (symbolP);
3401
3402 symbol_set_frag (symbolP, frag);
3403
3404 /* Link to end of symbol chain. */
3405 {
3406 extern int symbol_table_frozen;
3407
3408 if (symbol_table_frozen)
3409 abort ();
3410 }
3411
3412 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3413
3414 obj_symbol_new_hook (symbolP);
3415
3416 #ifdef tc_symbol_new_hook
3417 tc_symbol_new_hook (symbolP);
3418 #endif
3419
3420 #ifdef DEBUG_SYMS
3421 verify_symbol_chain (symbol_rootP, symbol_lastP);
3422 #endif /* DEBUG_SYMS */
3423 }
3424
3425 static void
3426 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3427 {
3428 unsigned int entry;
3429 literal_pool * pool;
3430 char sym_name[20];
3431
3432 pool = find_literal_pool ();
3433 if (pool == NULL
3434 || pool->symbol == NULL
3435 || pool->next_free_entry == 0)
3436 return;
3437
3438 /* Align pool as you have word accesses.
3439 Only make a frag if we have to. */
3440 if (!need_pass_2)
3441 frag_align (pool->alignment, 0, 0);
3442
3443 record_alignment (now_seg, 2);
3444
3445 #ifdef OBJ_ELF
3446 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3447 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3448 #endif
3449 sprintf (sym_name, "$$lit_\002%x", pool->id);
3450
3451 symbol_locate (pool->symbol, sym_name, now_seg,
3452 (valueT) frag_now_fix (), frag_now);
3453 symbol_table_insert (pool->symbol);
3454
3455 ARM_SET_THUMB (pool->symbol, thumb_mode);
3456
3457 #if defined OBJ_COFF || defined OBJ_ELF
3458 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3459 #endif
3460
3461 for (entry = 0; entry < pool->next_free_entry; entry ++)
3462 {
3463 #ifdef OBJ_ELF
3464 if (debug_type == DEBUG_DWARF2)
3465 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3466 #endif
3467 /* First output the expression in the instruction to the pool. */
3468 emit_expr (&(pool->literals[entry]),
3469 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3470 }
3471
3472 /* Mark the pool as empty. */
3473 pool->next_free_entry = 0;
3474 pool->symbol = NULL;
3475 }
3476
3477 #ifdef OBJ_ELF
3478 /* Forward declarations for functions below, in the MD interface
3479 section. */
3480 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3481 static valueT create_unwind_entry (int);
3482 static void start_unwind_section (const segT, int);
3483 static void add_unwind_opcode (valueT, int);
3484 static void flush_pending_unwind (void);
3485
3486 /* Directives: Data. */
3487
3488 static void
3489 s_arm_elf_cons (int nbytes)
3490 {
3491 expressionS exp;
3492
3493 #ifdef md_flush_pending_output
3494 md_flush_pending_output ();
3495 #endif
3496
3497 if (is_it_end_of_statement ())
3498 {
3499 demand_empty_rest_of_line ();
3500 return;
3501 }
3502
3503 #ifdef md_cons_align
3504 md_cons_align (nbytes);
3505 #endif
3506
3507 mapping_state (MAP_DATA);
3508 do
3509 {
3510 int reloc;
3511 char *base = input_line_pointer;
3512
3513 expression (& exp);
3514
3515 if (exp.X_op != O_symbol)
3516 emit_expr (&exp, (unsigned int) nbytes);
3517 else
3518 {
3519 char *before_reloc = input_line_pointer;
3520 reloc = parse_reloc (&input_line_pointer);
3521 if (reloc == -1)
3522 {
3523 as_bad (_("unrecognized relocation suffix"));
3524 ignore_rest_of_line ();
3525 return;
3526 }
3527 else if (reloc == BFD_RELOC_UNUSED)
3528 emit_expr (&exp, (unsigned int) nbytes);
3529 else
3530 {
3531 reloc_howto_type *howto = (reloc_howto_type *)
3532 bfd_reloc_type_lookup (stdoutput,
3533 (bfd_reloc_code_real_type) reloc);
3534 int size = bfd_get_reloc_size (howto);
3535
3536 if (reloc == BFD_RELOC_ARM_PLT32)
3537 {
3538 as_bad (_("(plt) is only valid on branch targets"));
3539 reloc = BFD_RELOC_UNUSED;
3540 size = 0;
3541 }
3542
3543 if (size > nbytes)
3544 as_bad (_("%s relocations do not fit in %d bytes"),
3545 howto->name, nbytes);
3546 else
3547 {
3548 /* We've parsed an expression stopping at O_symbol.
3549 But there may be more expression left now that we
3550 have parsed the relocation marker. Parse it again.
3551 XXX Surely there is a cleaner way to do this. */
3552 char *p = input_line_pointer;
3553 int offset;
3554 char *save_buf = (char *) alloca (input_line_pointer - base);
3555 memcpy (save_buf, base, input_line_pointer - base);
3556 memmove (base + (input_line_pointer - before_reloc),
3557 base, before_reloc - base);
3558
3559 input_line_pointer = base + (input_line_pointer-before_reloc);
3560 expression (&exp);
3561 memcpy (base, save_buf, p - base);
3562
3563 offset = nbytes - size;
3564 p = frag_more (nbytes);
3565 memset (p, 0, nbytes);
3566 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3567 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3568 }
3569 }
3570 }
3571 }
3572 while (*input_line_pointer++ == ',');
3573
3574 /* Put terminator back into stream. */
3575 input_line_pointer --;
3576 demand_empty_rest_of_line ();
3577 }
3578
3579 /* Emit an expression containing a 32-bit thumb instruction.
3580 Implementation based on put_thumb32_insn. */
3581
3582 static void
3583 emit_thumb32_expr (expressionS * exp)
3584 {
3585 expressionS exp_high = *exp;
3586
3587 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3588 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3589 exp->X_add_number &= 0xffff;
3590 emit_expr (exp, (unsigned int) THUMB_SIZE);
3591 }
3592
3593 /* Guess the instruction size based on the opcode. */
3594
3595 static int
3596 thumb_insn_size (int opcode)
3597 {
3598 if ((unsigned int) opcode < 0xe800u)
3599 return 2;
3600 else if ((unsigned int) opcode >= 0xe8000000u)
3601 return 4;
3602 else
3603 return 0;
3604 }
3605
3606 static bfd_boolean
3607 emit_insn (expressionS *exp, int nbytes)
3608 {
3609 int size = 0;
3610
3611 if (exp->X_op == O_constant)
3612 {
3613 size = nbytes;
3614
3615 if (size == 0)
3616 size = thumb_insn_size (exp->X_add_number);
3617
3618 if (size != 0)
3619 {
3620 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3621 {
3622 as_bad (_(".inst.n operand too big. "\
3623 "Use .inst.w instead"));
3624 size = 0;
3625 }
3626 else
3627 {
3628 if (now_it.state == AUTOMATIC_IT_BLOCK)
3629 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3630 else
3631 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3632
3633 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3634 emit_thumb32_expr (exp);
3635 else
3636 emit_expr (exp, (unsigned int) size);
3637
3638 it_fsm_post_encode ();
3639 }
3640 }
3641 else
3642 as_bad (_("cannot determine Thumb instruction size. " \
3643 "Use .inst.n/.inst.w instead"));
3644 }
3645 else
3646 as_bad (_("constant expression required"));
3647
3648 return (size != 0);
3649 }
3650
3651 /* Like s_arm_elf_cons but do not use md_cons_align and
3652 set the mapping state to MAP_ARM/MAP_THUMB. */
3653
3654 static void
3655 s_arm_elf_inst (int nbytes)
3656 {
3657 if (is_it_end_of_statement ())
3658 {
3659 demand_empty_rest_of_line ();
3660 return;
3661 }
3662
3663 /* Calling mapping_state () here will not change ARM/THUMB,
3664 but will ensure not to be in DATA state. */
3665
3666 if (thumb_mode)
3667 mapping_state (MAP_THUMB);
3668 else
3669 {
3670 if (nbytes != 0)
3671 {
3672 as_bad (_("width suffixes are invalid in ARM mode"));
3673 ignore_rest_of_line ();
3674 return;
3675 }
3676
3677 nbytes = 4;
3678
3679 mapping_state (MAP_ARM);
3680 }
3681
3682 do
3683 {
3684 expressionS exp;
3685
3686 expression (& exp);
3687
3688 if (! emit_insn (& exp, nbytes))
3689 {
3690 ignore_rest_of_line ();
3691 return;
3692 }
3693 }
3694 while (*input_line_pointer++ == ',');
3695
3696 /* Put terminator back into stream. */
3697 input_line_pointer --;
3698 demand_empty_rest_of_line ();
3699 }
3700
3701 /* Parse a .rel31 directive. */
3702
3703 static void
3704 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3705 {
3706 expressionS exp;
3707 char *p;
3708 valueT highbit;
3709
3710 highbit = 0;
3711 if (*input_line_pointer == '1')
3712 highbit = 0x80000000;
3713 else if (*input_line_pointer != '0')
3714 as_bad (_("expected 0 or 1"));
3715
3716 input_line_pointer++;
3717 if (*input_line_pointer != ',')
3718 as_bad (_("missing comma"));
3719 input_line_pointer++;
3720
3721 #ifdef md_flush_pending_output
3722 md_flush_pending_output ();
3723 #endif
3724
3725 #ifdef md_cons_align
3726 md_cons_align (4);
3727 #endif
3728
3729 mapping_state (MAP_DATA);
3730
3731 expression (&exp);
3732
3733 p = frag_more (4);
3734 md_number_to_chars (p, highbit, 4);
3735 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3736 BFD_RELOC_ARM_PREL31);
3737
3738 demand_empty_rest_of_line ();
3739 }
3740
3741 /* Directives: AEABI stack-unwind tables. */
3742
3743 /* Parse an unwind_fnstart directive. Simply records the current location. */
3744
3745 static void
3746 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3747 {
3748 demand_empty_rest_of_line ();
3749 if (unwind.proc_start)
3750 {
3751 as_bad (_("duplicate .fnstart directive"));
3752 return;
3753 }
3754
3755 /* Mark the start of the function. */
3756 unwind.proc_start = expr_build_dot ();
3757
3758 /* Reset the rest of the unwind info. */
3759 unwind.opcode_count = 0;
3760 unwind.table_entry = NULL;
3761 unwind.personality_routine = NULL;
3762 unwind.personality_index = -1;
3763 unwind.frame_size = 0;
3764 unwind.fp_offset = 0;
3765 unwind.fp_reg = REG_SP;
3766 unwind.fp_used = 0;
3767 unwind.sp_restored = 0;
3768 }
3769
3770
3771 /* Parse a handlerdata directive. Creates the exception handling table entry
3772 for the function. */
3773
3774 static void
3775 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3776 {
3777 demand_empty_rest_of_line ();
3778 if (!unwind.proc_start)
3779 as_bad (MISSING_FNSTART);
3780
3781 if (unwind.table_entry)
3782 as_bad (_("duplicate .handlerdata directive"));
3783
3784 create_unwind_entry (1);
3785 }
3786
3787 /* Parse an unwind_fnend directive. Generates the index table entry. */
3788
3789 static void
3790 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3791 {
3792 long where;
3793 char *ptr;
3794 valueT val;
3795 unsigned int marked_pr_dependency;
3796
3797 demand_empty_rest_of_line ();
3798
3799 if (!unwind.proc_start)
3800 {
3801 as_bad (_(".fnend directive without .fnstart"));
3802 return;
3803 }
3804
3805 /* Add eh table entry. */
3806 if (unwind.table_entry == NULL)
3807 val = create_unwind_entry (0);
3808 else
3809 val = 0;
3810
3811 /* Add index table entry. This is two words. */
3812 start_unwind_section (unwind.saved_seg, 1);
3813 frag_align (2, 0, 0);
3814 record_alignment (now_seg, 2);
3815
3816 ptr = frag_more (8);
3817 memset (ptr, 0, 8);
3818 where = frag_now_fix () - 8;
3819
3820 /* Self relative offset of the function start. */
3821 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3822 BFD_RELOC_ARM_PREL31);
3823
3824 /* Indicate dependency on EHABI-defined personality routines to the
3825 linker, if it hasn't been done already. */
3826 marked_pr_dependency
3827 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3828 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3829 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3830 {
3831 static const char *const name[] =
3832 {
3833 "__aeabi_unwind_cpp_pr0",
3834 "__aeabi_unwind_cpp_pr1",
3835 "__aeabi_unwind_cpp_pr2"
3836 };
3837 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3838 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3839 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3840 |= 1 << unwind.personality_index;
3841 }
3842
3843 if (val)
3844 /* Inline exception table entry. */
3845 md_number_to_chars (ptr + 4, val, 4);
3846 else
3847 /* Self relative offset of the table entry. */
3848 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3849 BFD_RELOC_ARM_PREL31);
3850
3851 /* Restore the original section. */
3852 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3853
3854 unwind.proc_start = NULL;
3855 }
3856
3857
3858 /* Parse an unwind_cantunwind directive. */
3859
3860 static void
3861 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3862 {
3863 demand_empty_rest_of_line ();
3864 if (!unwind.proc_start)
3865 as_bad (MISSING_FNSTART);
3866
3867 if (unwind.personality_routine || unwind.personality_index != -1)
3868 as_bad (_("personality routine specified for cantunwind frame"));
3869
3870 unwind.personality_index = -2;
3871 }
3872
3873
3874 /* Parse a personalityindex directive. */
3875
3876 static void
3877 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3878 {
3879 expressionS exp;
3880
3881 if (!unwind.proc_start)
3882 as_bad (MISSING_FNSTART);
3883
3884 if (unwind.personality_routine || unwind.personality_index != -1)
3885 as_bad (_("duplicate .personalityindex directive"));
3886
3887 expression (&exp);
3888
3889 if (exp.X_op != O_constant
3890 || exp.X_add_number < 0 || exp.X_add_number > 15)
3891 {
3892 as_bad (_("bad personality routine number"));
3893 ignore_rest_of_line ();
3894 return;
3895 }
3896
3897 unwind.personality_index = exp.X_add_number;
3898
3899 demand_empty_rest_of_line ();
3900 }
3901
3902
3903 /* Parse a personality directive. */
3904
3905 static void
3906 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3907 {
3908 char *name, *p, c;
3909
3910 if (!unwind.proc_start)
3911 as_bad (MISSING_FNSTART);
3912
3913 if (unwind.personality_routine || unwind.personality_index != -1)
3914 as_bad (_("duplicate .personality directive"));
3915
3916 name = input_line_pointer;
3917 c = get_symbol_end ();
3918 p = input_line_pointer;
3919 unwind.personality_routine = symbol_find_or_make (name);
3920 *p = c;
3921 demand_empty_rest_of_line ();
3922 }
3923
3924
3925 /* Parse a directive saving core registers. */
3926
3927 static void
3928 s_arm_unwind_save_core (void)
3929 {
3930 valueT op;
3931 long range;
3932 int n;
3933
3934 range = parse_reg_list (&input_line_pointer);
3935 if (range == FAIL)
3936 {
3937 as_bad (_("expected register list"));
3938 ignore_rest_of_line ();
3939 return;
3940 }
3941
3942 demand_empty_rest_of_line ();
3943
3944 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3945 into .unwind_save {..., sp...}. We aren't bothered about the value of
3946 ip because it is clobbered by calls. */
3947 if (unwind.sp_restored && unwind.fp_reg == 12
3948 && (range & 0x3000) == 0x1000)
3949 {
3950 unwind.opcode_count--;
3951 unwind.sp_restored = 0;
3952 range = (range | 0x2000) & ~0x1000;
3953 unwind.pending_offset = 0;
3954 }
3955
3956 /* Pop r4-r15. */
3957 if (range & 0xfff0)
3958 {
3959 /* See if we can use the short opcodes. These pop a block of up to 8
3960 registers starting with r4, plus maybe r14. */
3961 for (n = 0; n < 8; n++)
3962 {
3963 /* Break at the first non-saved register. */
3964 if ((range & (1 << (n + 4))) == 0)
3965 break;
3966 }
3967 /* See if there are any other bits set. */
3968 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3969 {
3970 /* Use the long form. */
3971 op = 0x8000 | ((range >> 4) & 0xfff);
3972 add_unwind_opcode (op, 2);
3973 }
3974 else
3975 {
3976 /* Use the short form. */
3977 if (range & 0x4000)
3978 op = 0xa8; /* Pop r14. */
3979 else
3980 op = 0xa0; /* Do not pop r14. */
3981 op |= (n - 1);
3982 add_unwind_opcode (op, 1);
3983 }
3984 }
3985
3986 /* Pop r0-r3. */
3987 if (range & 0xf)
3988 {
3989 op = 0xb100 | (range & 0xf);
3990 add_unwind_opcode (op, 2);
3991 }
3992
3993 /* Record the number of bytes pushed. */
3994 for (n = 0; n < 16; n++)
3995 {
3996 if (range & (1 << n))
3997 unwind.frame_size += 4;
3998 }
3999 }
4000
4001
4002 /* Parse a directive saving FPA registers. */
4003
4004 static void
4005 s_arm_unwind_save_fpa (int reg)
4006 {
4007 expressionS exp;
4008 int num_regs;
4009 valueT op;
4010
4011 /* Get Number of registers to transfer. */
4012 if (skip_past_comma (&input_line_pointer) != FAIL)
4013 expression (&exp);
4014 else
4015 exp.X_op = O_illegal;
4016
4017 if (exp.X_op != O_constant)
4018 {
4019 as_bad (_("expected , <constant>"));
4020 ignore_rest_of_line ();
4021 return;
4022 }
4023
4024 num_regs = exp.X_add_number;
4025
4026 if (num_regs < 1 || num_regs > 4)
4027 {
4028 as_bad (_("number of registers must be in the range [1:4]"));
4029 ignore_rest_of_line ();
4030 return;
4031 }
4032
4033 demand_empty_rest_of_line ();
4034
4035 if (reg == 4)
4036 {
4037 /* Short form. */
4038 op = 0xb4 | (num_regs - 1);
4039 add_unwind_opcode (op, 1);
4040 }
4041 else
4042 {
4043 /* Long form. */
4044 op = 0xc800 | (reg << 4) | (num_regs - 1);
4045 add_unwind_opcode (op, 2);
4046 }
4047 unwind.frame_size += num_regs * 12;
4048 }
4049
4050
4051 /* Parse a directive saving VFP registers for ARMv6 and above. */
4052
4053 static void
4054 s_arm_unwind_save_vfp_armv6 (void)
4055 {
4056 int count;
4057 unsigned int start;
4058 valueT op;
4059 int num_vfpv3_regs = 0;
4060 int num_regs_below_16;
4061
4062 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4063 if (count == FAIL)
4064 {
4065 as_bad (_("expected register list"));
4066 ignore_rest_of_line ();
4067 return;
4068 }
4069
4070 demand_empty_rest_of_line ();
4071
4072 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4073 than FSTMX/FLDMX-style ones). */
4074
4075 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4076 if (start >= 16)
4077 num_vfpv3_regs = count;
4078 else if (start + count > 16)
4079 num_vfpv3_regs = start + count - 16;
4080
4081 if (num_vfpv3_regs > 0)
4082 {
4083 int start_offset = start > 16 ? start - 16 : 0;
4084 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4085 add_unwind_opcode (op, 2);
4086 }
4087
4088 /* Generate opcode for registers numbered in the range 0 .. 15. */
4089 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4090 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4091 if (num_regs_below_16 > 0)
4092 {
4093 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4094 add_unwind_opcode (op, 2);
4095 }
4096
4097 unwind.frame_size += count * 8;
4098 }
4099
4100
4101 /* Parse a directive saving VFP registers for pre-ARMv6. */
4102
4103 static void
4104 s_arm_unwind_save_vfp (void)
4105 {
4106 int count;
4107 unsigned int reg;
4108 valueT op;
4109
4110 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4111 if (count == FAIL)
4112 {
4113 as_bad (_("expected register list"));
4114 ignore_rest_of_line ();
4115 return;
4116 }
4117
4118 demand_empty_rest_of_line ();
4119
4120 if (reg == 8)
4121 {
4122 /* Short form. */
4123 op = 0xb8 | (count - 1);
4124 add_unwind_opcode (op, 1);
4125 }
4126 else
4127 {
4128 /* Long form. */
4129 op = 0xb300 | (reg << 4) | (count - 1);
4130 add_unwind_opcode (op, 2);
4131 }
4132 unwind.frame_size += count * 8 + 4;
4133 }
4134
4135
4136 /* Parse a directive saving iWMMXt data registers. */
4137
4138 static void
4139 s_arm_unwind_save_mmxwr (void)
4140 {
4141 int reg;
4142 int hi_reg;
4143 int i;
4144 unsigned mask = 0;
4145 valueT op;
4146
4147 if (*input_line_pointer == '{')
4148 input_line_pointer++;
4149
4150 do
4151 {
4152 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4153
4154 if (reg == FAIL)
4155 {
4156 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4157 goto error;
4158 }
4159
4160 if (mask >> reg)
4161 as_tsktsk (_("register list not in ascending order"));
4162 mask |= 1 << reg;
4163
4164 if (*input_line_pointer == '-')
4165 {
4166 input_line_pointer++;
4167 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4168 if (hi_reg == FAIL)
4169 {
4170 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4171 goto error;
4172 }
4173 else if (reg >= hi_reg)
4174 {
4175 as_bad (_("bad register range"));
4176 goto error;
4177 }
4178 for (; reg < hi_reg; reg++)
4179 mask |= 1 << reg;
4180 }
4181 }
4182 while (skip_past_comma (&input_line_pointer) != FAIL);
4183
4184 skip_past_char (&input_line_pointer, '}');
4185
4186 demand_empty_rest_of_line ();
4187
4188 /* Generate any deferred opcodes because we're going to be looking at
4189 the list. */
4190 flush_pending_unwind ();
4191
4192 for (i = 0; i < 16; i++)
4193 {
4194 if (mask & (1 << i))
4195 unwind.frame_size += 8;
4196 }
4197
4198 /* Attempt to combine with a previous opcode. We do this because gcc
4199 likes to output separate unwind directives for a single block of
4200 registers. */
4201 if (unwind.opcode_count > 0)
4202 {
4203 i = unwind.opcodes[unwind.opcode_count - 1];
4204 if ((i & 0xf8) == 0xc0)
4205 {
4206 i &= 7;
4207 /* Only merge if the blocks are contiguous. */
4208 if (i < 6)
4209 {
4210 if ((mask & 0xfe00) == (1 << 9))
4211 {
4212 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4213 unwind.opcode_count--;
4214 }
4215 }
4216 else if (i == 6 && unwind.opcode_count >= 2)
4217 {
4218 i = unwind.opcodes[unwind.opcode_count - 2];
4219 reg = i >> 4;
4220 i &= 0xf;
4221
4222 op = 0xffff << (reg - 1);
4223 if (reg > 0
4224 && ((mask & op) == (1u << (reg - 1))))
4225 {
4226 op = (1 << (reg + i + 1)) - 1;
4227 op &= ~((1 << reg) - 1);
4228 mask |= op;
4229 unwind.opcode_count -= 2;
4230 }
4231 }
4232 }
4233 }
4234
4235 hi_reg = 15;
4236 /* We want to generate opcodes in the order the registers have been
4237 saved, ie. descending order. */
4238 for (reg = 15; reg >= -1; reg--)
4239 {
4240 /* Save registers in blocks. */
4241 if (reg < 0
4242 || !(mask & (1 << reg)))
4243 {
4244 /* We found an unsaved reg. Generate opcodes to save the
4245 preceding block. */
4246 if (reg != hi_reg)
4247 {
4248 if (reg == 9)
4249 {
4250 /* Short form. */
4251 op = 0xc0 | (hi_reg - 10);
4252 add_unwind_opcode (op, 1);
4253 }
4254 else
4255 {
4256 /* Long form. */
4257 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4258 add_unwind_opcode (op, 2);
4259 }
4260 }
4261 hi_reg = reg - 1;
4262 }
4263 }
4264
4265 return;
4266 error:
4267 ignore_rest_of_line ();
4268 }
4269
4270 static void
4271 s_arm_unwind_save_mmxwcg (void)
4272 {
4273 int reg;
4274 int hi_reg;
4275 unsigned mask = 0;
4276 valueT op;
4277
4278 if (*input_line_pointer == '{')
4279 input_line_pointer++;
4280
4281 skip_whitespace (input_line_pointer);
4282
4283 do
4284 {
4285 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4286
4287 if (reg == FAIL)
4288 {
4289 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4290 goto error;
4291 }
4292
4293 reg -= 8;
4294 if (mask >> reg)
4295 as_tsktsk (_("register list not in ascending order"));
4296 mask |= 1 << reg;
4297
4298 if (*input_line_pointer == '-')
4299 {
4300 input_line_pointer++;
4301 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4302 if (hi_reg == FAIL)
4303 {
4304 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4305 goto error;
4306 }
4307 else if (reg >= hi_reg)
4308 {
4309 as_bad (_("bad register range"));
4310 goto error;
4311 }
4312 for (; reg < hi_reg; reg++)
4313 mask |= 1 << reg;
4314 }
4315 }
4316 while (skip_past_comma (&input_line_pointer) != FAIL);
4317
4318 skip_past_char (&input_line_pointer, '}');
4319
4320 demand_empty_rest_of_line ();
4321
4322 /* Generate any deferred opcodes because we're going to be looking at
4323 the list. */
4324 flush_pending_unwind ();
4325
4326 for (reg = 0; reg < 16; reg++)
4327 {
4328 if (mask & (1 << reg))
4329 unwind.frame_size += 4;
4330 }
4331 op = 0xc700 | mask;
4332 add_unwind_opcode (op, 2);
4333 return;
4334 error:
4335 ignore_rest_of_line ();
4336 }
4337
4338
4339 /* Parse an unwind_save directive.
4340 If the argument is non-zero, this is a .vsave directive. */
4341
4342 static void
4343 s_arm_unwind_save (int arch_v6)
4344 {
4345 char *peek;
4346 struct reg_entry *reg;
4347 bfd_boolean had_brace = FALSE;
4348
4349 if (!unwind.proc_start)
4350 as_bad (MISSING_FNSTART);
4351
4352 /* Figure out what sort of save we have. */
4353 peek = input_line_pointer;
4354
4355 if (*peek == '{')
4356 {
4357 had_brace = TRUE;
4358 peek++;
4359 }
4360
4361 reg = arm_reg_parse_multi (&peek);
4362
4363 if (!reg)
4364 {
4365 as_bad (_("register expected"));
4366 ignore_rest_of_line ();
4367 return;
4368 }
4369
4370 switch (reg->type)
4371 {
4372 case REG_TYPE_FN:
4373 if (had_brace)
4374 {
4375 as_bad (_("FPA .unwind_save does not take a register list"));
4376 ignore_rest_of_line ();
4377 return;
4378 }
4379 input_line_pointer = peek;
4380 s_arm_unwind_save_fpa (reg->number);
4381 return;
4382
4383 case REG_TYPE_RN:
4384 s_arm_unwind_save_core ();
4385 return;
4386
4387 case REG_TYPE_VFD:
4388 if (arch_v6)
4389 s_arm_unwind_save_vfp_armv6 ();
4390 else
4391 s_arm_unwind_save_vfp ();
4392 return;
4393
4394 case REG_TYPE_MMXWR:
4395 s_arm_unwind_save_mmxwr ();
4396 return;
4397
4398 case REG_TYPE_MMXWCG:
4399 s_arm_unwind_save_mmxwcg ();
4400 return;
4401
4402 default:
4403 as_bad (_(".unwind_save does not support this kind of register"));
4404 ignore_rest_of_line ();
4405 }
4406 }
4407
4408
4409 /* Parse an unwind_movsp directive. */
4410
4411 static void
4412 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4413 {
4414 int reg;
4415 valueT op;
4416 int offset;
4417
4418 if (!unwind.proc_start)
4419 as_bad (MISSING_FNSTART);
4420
4421 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4422 if (reg == FAIL)
4423 {
4424 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4425 ignore_rest_of_line ();
4426 return;
4427 }
4428
4429 /* Optional constant. */
4430 if (skip_past_comma (&input_line_pointer) != FAIL)
4431 {
4432 if (immediate_for_directive (&offset) == FAIL)
4433 return;
4434 }
4435 else
4436 offset = 0;
4437
4438 demand_empty_rest_of_line ();
4439
4440 if (reg == REG_SP || reg == REG_PC)
4441 {
4442 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4443 return;
4444 }
4445
4446 if (unwind.fp_reg != REG_SP)
4447 as_bad (_("unexpected .unwind_movsp directive"));
4448
4449 /* Generate opcode to restore the value. */
4450 op = 0x90 | reg;
4451 add_unwind_opcode (op, 1);
4452
4453 /* Record the information for later. */
4454 unwind.fp_reg = reg;
4455 unwind.fp_offset = unwind.frame_size - offset;
4456 unwind.sp_restored = 1;
4457 }
4458
4459 /* Parse an unwind_pad directive. */
4460
4461 static void
4462 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4463 {
4464 int offset;
4465
4466 if (!unwind.proc_start)
4467 as_bad (MISSING_FNSTART);
4468
4469 if (immediate_for_directive (&offset) == FAIL)
4470 return;
4471
4472 if (offset & 3)
4473 {
4474 as_bad (_("stack increment must be multiple of 4"));
4475 ignore_rest_of_line ();
4476 return;
4477 }
4478
4479 /* Don't generate any opcodes, just record the details for later. */
4480 unwind.frame_size += offset;
4481 unwind.pending_offset += offset;
4482
4483 demand_empty_rest_of_line ();
4484 }
4485
4486 /* Parse an unwind_setfp directive. */
4487
4488 static void
4489 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4490 {
4491 int sp_reg;
4492 int fp_reg;
4493 int offset;
4494
4495 if (!unwind.proc_start)
4496 as_bad (MISSING_FNSTART);
4497
4498 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4499 if (skip_past_comma (&input_line_pointer) == FAIL)
4500 sp_reg = FAIL;
4501 else
4502 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4503
4504 if (fp_reg == FAIL || sp_reg == FAIL)
4505 {
4506 as_bad (_("expected <reg>, <reg>"));
4507 ignore_rest_of_line ();
4508 return;
4509 }
4510
4511 /* Optional constant. */
4512 if (skip_past_comma (&input_line_pointer) != FAIL)
4513 {
4514 if (immediate_for_directive (&offset) == FAIL)
4515 return;
4516 }
4517 else
4518 offset = 0;
4519
4520 demand_empty_rest_of_line ();
4521
4522 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4523 {
4524 as_bad (_("register must be either sp or set by a previous"
4525 "unwind_movsp directive"));
4526 return;
4527 }
4528
4529 /* Don't generate any opcodes, just record the information for later. */
4530 unwind.fp_reg = fp_reg;
4531 unwind.fp_used = 1;
4532 if (sp_reg == REG_SP)
4533 unwind.fp_offset = unwind.frame_size - offset;
4534 else
4535 unwind.fp_offset -= offset;
4536 }
4537
4538 /* Parse an unwind_raw directive. */
4539
4540 static void
4541 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4542 {
4543 expressionS exp;
4544 /* This is an arbitrary limit. */
4545 unsigned char op[16];
4546 int count;
4547
4548 if (!unwind.proc_start)
4549 as_bad (MISSING_FNSTART);
4550
4551 expression (&exp);
4552 if (exp.X_op == O_constant
4553 && skip_past_comma (&input_line_pointer) != FAIL)
4554 {
4555 unwind.frame_size += exp.X_add_number;
4556 expression (&exp);
4557 }
4558 else
4559 exp.X_op = O_illegal;
4560
4561 if (exp.X_op != O_constant)
4562 {
4563 as_bad (_("expected <offset>, <opcode>"));
4564 ignore_rest_of_line ();
4565 return;
4566 }
4567
4568 count = 0;
4569
4570 /* Parse the opcode. */
4571 for (;;)
4572 {
4573 if (count >= 16)
4574 {
4575 as_bad (_("unwind opcode too long"));
4576 ignore_rest_of_line ();
4577 }
4578 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4579 {
4580 as_bad (_("invalid unwind opcode"));
4581 ignore_rest_of_line ();
4582 return;
4583 }
4584 op[count++] = exp.X_add_number;
4585
4586 /* Parse the next byte. */
4587 if (skip_past_comma (&input_line_pointer) == FAIL)
4588 break;
4589
4590 expression (&exp);
4591 }
4592
4593 /* Add the opcode bytes in reverse order. */
4594 while (count--)
4595 add_unwind_opcode (op[count], 1);
4596
4597 demand_empty_rest_of_line ();
4598 }
4599
4600
4601 /* Parse a .eabi_attribute directive. */
4602
4603 static void
4604 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4605 {
4606 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4607
4608 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4609 attributes_set_explicitly[tag] = 1;
4610 }
4611
4612 /* Emit a tls fix for the symbol. */
4613
4614 static void
4615 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4616 {
4617 char *p;
4618 expressionS exp;
4619 #ifdef md_flush_pending_output
4620 md_flush_pending_output ();
4621 #endif
4622
4623 #ifdef md_cons_align
4624 md_cons_align (4);
4625 #endif
4626
4627 /* Since we're just labelling the code, there's no need to define a
4628 mapping symbol. */
4629 expression (&exp);
4630 p = obstack_next_free (&frchain_now->frch_obstack);
4631 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4632 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4633 : BFD_RELOC_ARM_TLS_DESCSEQ);
4634 }
4635 #endif /* OBJ_ELF */
4636
4637 static void s_arm_arch (int);
4638 static void s_arm_object_arch (int);
4639 static void s_arm_cpu (int);
4640 static void s_arm_fpu (int);
4641 static void s_arm_arch_extension (int);
4642
4643 #ifdef TE_PE
4644
4645 static void
4646 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4647 {
4648 expressionS exp;
4649
4650 do
4651 {
4652 expression (&exp);
4653 if (exp.X_op == O_symbol)
4654 exp.X_op = O_secrel;
4655
4656 emit_expr (&exp, 4);
4657 }
4658 while (*input_line_pointer++ == ',');
4659
4660 input_line_pointer--;
4661 demand_empty_rest_of_line ();
4662 }
4663 #endif /* TE_PE */
4664
4665 /* This table describes all the machine specific pseudo-ops the assembler
4666 has to support. The fields are:
4667 pseudo-op name without dot
4668 function to call to execute this pseudo-op
4669 Integer arg to pass to the function. */
4670
4671 const pseudo_typeS md_pseudo_table[] =
4672 {
4673 /* Never called because '.req' does not start a line. */
4674 { "req", s_req, 0 },
4675 /* Following two are likewise never called. */
4676 { "dn", s_dn, 0 },
4677 { "qn", s_qn, 0 },
4678 { "unreq", s_unreq, 0 },
4679 { "bss", s_bss, 0 },
4680 { "align", s_align, 0 },
4681 { "arm", s_arm, 0 },
4682 { "thumb", s_thumb, 0 },
4683 { "code", s_code, 0 },
4684 { "force_thumb", s_force_thumb, 0 },
4685 { "thumb_func", s_thumb_func, 0 },
4686 { "thumb_set", s_thumb_set, 0 },
4687 { "even", s_even, 0 },
4688 { "ltorg", s_ltorg, 0 },
4689 { "pool", s_ltorg, 0 },
4690 { "syntax", s_syntax, 0 },
4691 { "cpu", s_arm_cpu, 0 },
4692 { "arch", s_arm_arch, 0 },
4693 { "object_arch", s_arm_object_arch, 0 },
4694 { "fpu", s_arm_fpu, 0 },
4695 { "arch_extension", s_arm_arch_extension, 0 },
4696 #ifdef OBJ_ELF
4697 { "word", s_arm_elf_cons, 4 },
4698 { "long", s_arm_elf_cons, 4 },
4699 { "inst.n", s_arm_elf_inst, 2 },
4700 { "inst.w", s_arm_elf_inst, 4 },
4701 { "inst", s_arm_elf_inst, 0 },
4702 { "rel31", s_arm_rel31, 0 },
4703 { "fnstart", s_arm_unwind_fnstart, 0 },
4704 { "fnend", s_arm_unwind_fnend, 0 },
4705 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4706 { "personality", s_arm_unwind_personality, 0 },
4707 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4708 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4709 { "save", s_arm_unwind_save, 0 },
4710 { "vsave", s_arm_unwind_save, 1 },
4711 { "movsp", s_arm_unwind_movsp, 0 },
4712 { "pad", s_arm_unwind_pad, 0 },
4713 { "setfp", s_arm_unwind_setfp, 0 },
4714 { "unwind_raw", s_arm_unwind_raw, 0 },
4715 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4716 { "tlsdescseq", s_arm_tls_descseq, 0 },
4717 #else
4718 { "word", cons, 4},
4719
4720 /* These are used for dwarf. */
4721 {"2byte", cons, 2},
4722 {"4byte", cons, 4},
4723 {"8byte", cons, 8},
4724 /* These are used for dwarf2. */
4725 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4726 { "loc", dwarf2_directive_loc, 0 },
4727 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4728 #endif
4729 { "extend", float_cons, 'x' },
4730 { "ldouble", float_cons, 'x' },
4731 { "packed", float_cons, 'p' },
4732 #ifdef TE_PE
4733 {"secrel32", pe_directive_secrel, 0},
4734 #endif
4735
4736 /* These are for compatibility with CodeComposer Studio. */
4737 {"ref", s_ccs_ref, 0},
4738 {"def", s_ccs_def, 0},
4739 {"asmfunc", s_ccs_asmfunc, 0},
4740 {"endasmfunc", s_ccs_endasmfunc, 0},
4741
4742 { 0, 0, 0 }
4743 };
4744 \f
4745 /* Parser functions used exclusively in instruction operands. */
4746
4747 /* Generic immediate-value read function for use in insn parsing.
4748 STR points to the beginning of the immediate (the leading #);
4749 VAL receives the value; if the value is outside [MIN, MAX]
4750 issue an error. PREFIX_OPT is true if the immediate prefix is
4751 optional. */
4752
4753 static int
4754 parse_immediate (char **str, int *val, int min, int max,
4755 bfd_boolean prefix_opt)
4756 {
4757 expressionS exp;
4758 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4759 if (exp.X_op != O_constant)
4760 {
4761 inst.error = _("constant expression required");
4762 return FAIL;
4763 }
4764
4765 if (exp.X_add_number < min || exp.X_add_number > max)
4766 {
4767 inst.error = _("immediate value out of range");
4768 return FAIL;
4769 }
4770
4771 *val = exp.X_add_number;
4772 return SUCCESS;
4773 }
4774
4775 /* Less-generic immediate-value read function with the possibility of loading a
4776 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4777 instructions. Puts the result directly in inst.operands[i]. */
4778
4779 static int
4780 parse_big_immediate (char **str, int i, expressionS *in_exp,
4781 bfd_boolean allow_symbol_p)
4782 {
4783 expressionS exp;
4784 expressionS *exp_p = in_exp ? in_exp : &exp;
4785 char *ptr = *str;
4786
4787 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4788
4789 if (exp_p->X_op == O_constant)
4790 {
4791 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4792 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4793 O_constant. We have to be careful not to break compilation for
4794 32-bit X_add_number, though. */
4795 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4796 {
4797 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4798 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4799 & 0xffffffff);
4800 inst.operands[i].regisimm = 1;
4801 }
4802 }
4803 else if (exp_p->X_op == O_big
4804 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4805 {
4806 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4807
4808 /* Bignums have their least significant bits in
4809 generic_bignum[0]. Make sure we put 32 bits in imm and
4810 32 bits in reg, in a (hopefully) portable way. */
4811 gas_assert (parts != 0);
4812
4813 /* Make sure that the number is not too big.
4814 PR 11972: Bignums can now be sign-extended to the
4815 size of a .octa so check that the out of range bits
4816 are all zero or all one. */
4817 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4818 {
4819 LITTLENUM_TYPE m = -1;
4820
4821 if (generic_bignum[parts * 2] != 0
4822 && generic_bignum[parts * 2] != m)
4823 return FAIL;
4824
4825 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4826 if (generic_bignum[j] != generic_bignum[j-1])
4827 return FAIL;
4828 }
4829
4830 inst.operands[i].imm = 0;
4831 for (j = 0; j < parts; j++, idx++)
4832 inst.operands[i].imm |= generic_bignum[idx]
4833 << (LITTLENUM_NUMBER_OF_BITS * j);
4834 inst.operands[i].reg = 0;
4835 for (j = 0; j < parts; j++, idx++)
4836 inst.operands[i].reg |= generic_bignum[idx]
4837 << (LITTLENUM_NUMBER_OF_BITS * j);
4838 inst.operands[i].regisimm = 1;
4839 }
4840 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4841 return FAIL;
4842
4843 *str = ptr;
4844
4845 return SUCCESS;
4846 }
4847
4848 /* Returns the pseudo-register number of an FPA immediate constant,
4849 or FAIL if there isn't a valid constant here. */
4850
4851 static int
4852 parse_fpa_immediate (char ** str)
4853 {
4854 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4855 char * save_in;
4856 expressionS exp;
4857 int i;
4858 int j;
4859
4860 /* First try and match exact strings, this is to guarantee
4861 that some formats will work even for cross assembly. */
4862
4863 for (i = 0; fp_const[i]; i++)
4864 {
4865 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4866 {
4867 char *start = *str;
4868
4869 *str += strlen (fp_const[i]);
4870 if (is_end_of_line[(unsigned char) **str])
4871 return i + 8;
4872 *str = start;
4873 }
4874 }
4875
4876 /* Just because we didn't get a match doesn't mean that the constant
4877 isn't valid, just that it is in a format that we don't
4878 automatically recognize. Try parsing it with the standard
4879 expression routines. */
4880
4881 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4882
4883 /* Look for a raw floating point number. */
4884 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4885 && is_end_of_line[(unsigned char) *save_in])
4886 {
4887 for (i = 0; i < NUM_FLOAT_VALS; i++)
4888 {
4889 for (j = 0; j < MAX_LITTLENUMS; j++)
4890 {
4891 if (words[j] != fp_values[i][j])
4892 break;
4893 }
4894
4895 if (j == MAX_LITTLENUMS)
4896 {
4897 *str = save_in;
4898 return i + 8;
4899 }
4900 }
4901 }
4902
4903 /* Try and parse a more complex expression, this will probably fail
4904 unless the code uses a floating point prefix (eg "0f"). */
4905 save_in = input_line_pointer;
4906 input_line_pointer = *str;
4907 if (expression (&exp) == absolute_section
4908 && exp.X_op == O_big
4909 && exp.X_add_number < 0)
4910 {
4911 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4912 Ditto for 15. */
4913 if (gen_to_words (words, 5, (long) 15) == 0)
4914 {
4915 for (i = 0; i < NUM_FLOAT_VALS; i++)
4916 {
4917 for (j = 0; j < MAX_LITTLENUMS; j++)
4918 {
4919 if (words[j] != fp_values[i][j])
4920 break;
4921 }
4922
4923 if (j == MAX_LITTLENUMS)
4924 {
4925 *str = input_line_pointer;
4926 input_line_pointer = save_in;
4927 return i + 8;
4928 }
4929 }
4930 }
4931 }
4932
4933 *str = input_line_pointer;
4934 input_line_pointer = save_in;
4935 inst.error = _("invalid FPA immediate expression");
4936 return FAIL;
4937 }
4938
4939 /* Returns 1 if a number has "quarter-precision" float format
4940 0baBbbbbbc defgh000 00000000 00000000. */
4941
4942 static int
4943 is_quarter_float (unsigned imm)
4944 {
4945 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4946 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4947 }
4948
4949
4950 /* Detect the presence of a floating point or integer zero constant,
4951 i.e. #0.0 or #0. */
4952
4953 static bfd_boolean
4954 parse_ifimm_zero (char **in)
4955 {
4956 int error_code;
4957
4958 if (!is_immediate_prefix (**in))
4959 return FALSE;
4960
4961 ++*in;
4962 error_code = atof_generic (in, ".", EXP_CHARS,
4963 &generic_floating_point_number);
4964
4965 if (!error_code
4966 && generic_floating_point_number.sign == '+'
4967 && (generic_floating_point_number.low
4968 > generic_floating_point_number.leader))
4969 return TRUE;
4970
4971 return FALSE;
4972 }
4973
4974 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4975 0baBbbbbbc defgh000 00000000 00000000.
4976 The zero and minus-zero cases need special handling, since they can't be
4977 encoded in the "quarter-precision" float format, but can nonetheless be
4978 loaded as integer constants. */
4979
4980 static unsigned
4981 parse_qfloat_immediate (char **ccp, int *immed)
4982 {
4983 char *str = *ccp;
4984 char *fpnum;
4985 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4986 int found_fpchar = 0;
4987
4988 skip_past_char (&str, '#');
4989
4990 /* We must not accidentally parse an integer as a floating-point number. Make
4991 sure that the value we parse is not an integer by checking for special
4992 characters '.' or 'e'.
4993 FIXME: This is a horrible hack, but doing better is tricky because type
4994 information isn't in a very usable state at parse time. */
4995 fpnum = str;
4996 skip_whitespace (fpnum);
4997
4998 if (strncmp (fpnum, "0x", 2) == 0)
4999 return FAIL;
5000 else
5001 {
5002 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5003 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5004 {
5005 found_fpchar = 1;
5006 break;
5007 }
5008
5009 if (!found_fpchar)
5010 return FAIL;
5011 }
5012
5013 if ((str = atof_ieee (str, 's', words)) != NULL)
5014 {
5015 unsigned fpword = 0;
5016 int i;
5017
5018 /* Our FP word must be 32 bits (single-precision FP). */
5019 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5020 {
5021 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5022 fpword |= words[i];
5023 }
5024
5025 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5026 *immed = fpword;
5027 else
5028 return FAIL;
5029
5030 *ccp = str;
5031
5032 return SUCCESS;
5033 }
5034
5035 return FAIL;
5036 }
5037
5038 /* Shift operands. */
5039 enum shift_kind
5040 {
5041 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5042 };
5043
5044 struct asm_shift_name
5045 {
5046 const char *name;
5047 enum shift_kind kind;
5048 };
5049
5050 /* Third argument to parse_shift. */
5051 enum parse_shift_mode
5052 {
5053 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5054 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5055 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5056 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5057 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5058 };
5059
5060 /* Parse a <shift> specifier on an ARM data processing instruction.
5061 This has three forms:
5062
5063 (LSL|LSR|ASL|ASR|ROR) Rs
5064 (LSL|LSR|ASL|ASR|ROR) #imm
5065 RRX
5066
5067 Note that ASL is assimilated to LSL in the instruction encoding, and
5068 RRX to ROR #0 (which cannot be written as such). */
5069
5070 static int
5071 parse_shift (char **str, int i, enum parse_shift_mode mode)
5072 {
5073 const struct asm_shift_name *shift_name;
5074 enum shift_kind shift;
5075 char *s = *str;
5076 char *p = s;
5077 int reg;
5078
5079 for (p = *str; ISALPHA (*p); p++)
5080 ;
5081
5082 if (p == *str)
5083 {
5084 inst.error = _("shift expression expected");
5085 return FAIL;
5086 }
5087
5088 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5089 p - *str);
5090
5091 if (shift_name == NULL)
5092 {
5093 inst.error = _("shift expression expected");
5094 return FAIL;
5095 }
5096
5097 shift = shift_name->kind;
5098
5099 switch (mode)
5100 {
5101 case NO_SHIFT_RESTRICT:
5102 case SHIFT_IMMEDIATE: break;
5103
5104 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5105 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5106 {
5107 inst.error = _("'LSL' or 'ASR' required");
5108 return FAIL;
5109 }
5110 break;
5111
5112 case SHIFT_LSL_IMMEDIATE:
5113 if (shift != SHIFT_LSL)
5114 {
5115 inst.error = _("'LSL' required");
5116 return FAIL;
5117 }
5118 break;
5119
5120 case SHIFT_ASR_IMMEDIATE:
5121 if (shift != SHIFT_ASR)
5122 {
5123 inst.error = _("'ASR' required");
5124 return FAIL;
5125 }
5126 break;
5127
5128 default: abort ();
5129 }
5130
5131 if (shift != SHIFT_RRX)
5132 {
5133 /* Whitespace can appear here if the next thing is a bare digit. */
5134 skip_whitespace (p);
5135
5136 if (mode == NO_SHIFT_RESTRICT
5137 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5138 {
5139 inst.operands[i].imm = reg;
5140 inst.operands[i].immisreg = 1;
5141 }
5142 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5143 return FAIL;
5144 }
5145 inst.operands[i].shift_kind = shift;
5146 inst.operands[i].shifted = 1;
5147 *str = p;
5148 return SUCCESS;
5149 }
5150
5151 /* Parse a <shifter_operand> for an ARM data processing instruction:
5152
5153 #<immediate>
5154 #<immediate>, <rotate>
5155 <Rm>
5156 <Rm>, <shift>
5157
5158 where <shift> is defined by parse_shift above, and <rotate> is a
5159 multiple of 2 between 0 and 30. Validation of immediate operands
5160 is deferred to md_apply_fix. */
5161
5162 static int
5163 parse_shifter_operand (char **str, int i)
5164 {
5165 int value;
5166 expressionS exp;
5167
5168 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5169 {
5170 inst.operands[i].reg = value;
5171 inst.operands[i].isreg = 1;
5172
5173 /* parse_shift will override this if appropriate */
5174 inst.reloc.exp.X_op = O_constant;
5175 inst.reloc.exp.X_add_number = 0;
5176
5177 if (skip_past_comma (str) == FAIL)
5178 return SUCCESS;
5179
5180 /* Shift operation on register. */
5181 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5182 }
5183
5184 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5185 return FAIL;
5186
5187 if (skip_past_comma (str) == SUCCESS)
5188 {
5189 /* #x, y -- ie explicit rotation by Y. */
5190 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5191 return FAIL;
5192
5193 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5194 {
5195 inst.error = _("constant expression expected");
5196 return FAIL;
5197 }
5198
5199 value = exp.X_add_number;
5200 if (value < 0 || value > 30 || value % 2 != 0)
5201 {
5202 inst.error = _("invalid rotation");
5203 return FAIL;
5204 }
5205 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5206 {
5207 inst.error = _("invalid constant");
5208 return FAIL;
5209 }
5210
5211 /* Encode as specified. */
5212 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5213 return SUCCESS;
5214 }
5215
5216 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5217 inst.reloc.pc_rel = 0;
5218 return SUCCESS;
5219 }
5220
5221 /* Group relocation information. Each entry in the table contains the
5222 textual name of the relocation as may appear in assembler source
5223 and must end with a colon.
5224 Along with this textual name are the relocation codes to be used if
5225 the corresponding instruction is an ALU instruction (ADD or SUB only),
5226 an LDR, an LDRS, or an LDC. */
5227
5228 struct group_reloc_table_entry
5229 {
5230 const char *name;
5231 int alu_code;
5232 int ldr_code;
5233 int ldrs_code;
5234 int ldc_code;
5235 };
5236
5237 typedef enum
5238 {
5239 /* Varieties of non-ALU group relocation. */
5240
5241 GROUP_LDR,
5242 GROUP_LDRS,
5243 GROUP_LDC
5244 } group_reloc_type;
5245
5246 static struct group_reloc_table_entry group_reloc_table[] =
5247 { /* Program counter relative: */
5248 { "pc_g0_nc",
5249 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5250 0, /* LDR */
5251 0, /* LDRS */
5252 0 }, /* LDC */
5253 { "pc_g0",
5254 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5255 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5256 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5257 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5258 { "pc_g1_nc",
5259 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5260 0, /* LDR */
5261 0, /* LDRS */
5262 0 }, /* LDC */
5263 { "pc_g1",
5264 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5265 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5266 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5267 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5268 { "pc_g2",
5269 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5270 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5271 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5272 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5273 /* Section base relative */
5274 { "sb_g0_nc",
5275 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5276 0, /* LDR */
5277 0, /* LDRS */
5278 0 }, /* LDC */
5279 { "sb_g0",
5280 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5281 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5282 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5283 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5284 { "sb_g1_nc",
5285 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5286 0, /* LDR */
5287 0, /* LDRS */
5288 0 }, /* LDC */
5289 { "sb_g1",
5290 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5291 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5292 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5293 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5294 { "sb_g2",
5295 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5296 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5297 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5298 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5299
5300 /* Given the address of a pointer pointing to the textual name of a group
5301 relocation as may appear in assembler source, attempt to find its details
5302 in group_reloc_table. The pointer will be updated to the character after
5303 the trailing colon. On failure, FAIL will be returned; SUCCESS
5304 otherwise. On success, *entry will be updated to point at the relevant
5305 group_reloc_table entry. */
5306
5307 static int
5308 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5309 {
5310 unsigned int i;
5311 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5312 {
5313 int length = strlen (group_reloc_table[i].name);
5314
5315 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5316 && (*str)[length] == ':')
5317 {
5318 *out = &group_reloc_table[i];
5319 *str += (length + 1);
5320 return SUCCESS;
5321 }
5322 }
5323
5324 return FAIL;
5325 }
5326
5327 /* Parse a <shifter_operand> for an ARM data processing instruction
5328 (as for parse_shifter_operand) where group relocations are allowed:
5329
5330 #<immediate>
5331 #<immediate>, <rotate>
5332 #:<group_reloc>:<expression>
5333 <Rm>
5334 <Rm>, <shift>
5335
5336 where <group_reloc> is one of the strings defined in group_reloc_table.
5337 The hashes are optional.
5338
5339 Everything else is as for parse_shifter_operand. */
5340
5341 static parse_operand_result
5342 parse_shifter_operand_group_reloc (char **str, int i)
5343 {
5344 /* Determine if we have the sequence of characters #: or just :
5345 coming next. If we do, then we check for a group relocation.
5346 If we don't, punt the whole lot to parse_shifter_operand. */
5347
5348 if (((*str)[0] == '#' && (*str)[1] == ':')
5349 || (*str)[0] == ':')
5350 {
5351 struct group_reloc_table_entry *entry;
5352
5353 if ((*str)[0] == '#')
5354 (*str) += 2;
5355 else
5356 (*str)++;
5357
5358 /* Try to parse a group relocation. Anything else is an error. */
5359 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5360 {
5361 inst.error = _("unknown group relocation");
5362 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5363 }
5364
5365 /* We now have the group relocation table entry corresponding to
5366 the name in the assembler source. Next, we parse the expression. */
5367 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5368 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5369
5370 /* Record the relocation type (always the ALU variant here). */
5371 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5372 gas_assert (inst.reloc.type != 0);
5373
5374 return PARSE_OPERAND_SUCCESS;
5375 }
5376 else
5377 return parse_shifter_operand (str, i) == SUCCESS
5378 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5379
5380 /* Never reached. */
5381 }
5382
5383 /* Parse a Neon alignment expression. Information is written to
5384 inst.operands[i]. We assume the initial ':' has been skipped.
5385
5386 align .imm = align << 8, .immisalign=1, .preind=0 */
5387 static parse_operand_result
5388 parse_neon_alignment (char **str, int i)
5389 {
5390 char *p = *str;
5391 expressionS exp;
5392
5393 my_get_expression (&exp, &p, GE_NO_PREFIX);
5394
5395 if (exp.X_op != O_constant)
5396 {
5397 inst.error = _("alignment must be constant");
5398 return PARSE_OPERAND_FAIL;
5399 }
5400
5401 inst.operands[i].imm = exp.X_add_number << 8;
5402 inst.operands[i].immisalign = 1;
5403 /* Alignments are not pre-indexes. */
5404 inst.operands[i].preind = 0;
5405
5406 *str = p;
5407 return PARSE_OPERAND_SUCCESS;
5408 }
5409
5410 /* Parse all forms of an ARM address expression. Information is written
5411 to inst.operands[i] and/or inst.reloc.
5412
5413 Preindexed addressing (.preind=1):
5414
5415 [Rn, #offset] .reg=Rn .reloc.exp=offset
5416 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5417 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5418 .shift_kind=shift .reloc.exp=shift_imm
5419
5420 These three may have a trailing ! which causes .writeback to be set also.
5421
5422 Postindexed addressing (.postind=1, .writeback=1):
5423
5424 [Rn], #offset .reg=Rn .reloc.exp=offset
5425 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5426 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 .shift_kind=shift .reloc.exp=shift_imm
5428
5429 Unindexed addressing (.preind=0, .postind=0):
5430
5431 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5432
5433 Other:
5434
5435 [Rn]{!} shorthand for [Rn,#0]{!}
5436 =immediate .isreg=0 .reloc.exp=immediate
5437 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5438
5439 It is the caller's responsibility to check for addressing modes not
5440 supported by the instruction, and to set inst.reloc.type. */
5441
5442 static parse_operand_result
5443 parse_address_main (char **str, int i, int group_relocations,
5444 group_reloc_type group_type)
5445 {
5446 char *p = *str;
5447 int reg;
5448
5449 if (skip_past_char (&p, '[') == FAIL)
5450 {
5451 if (skip_past_char (&p, '=') == FAIL)
5452 {
5453 /* Bare address - translate to PC-relative offset. */
5454 inst.reloc.pc_rel = 1;
5455 inst.operands[i].reg = REG_PC;
5456 inst.operands[i].isreg = 1;
5457 inst.operands[i].preind = 1;
5458
5459 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5460 return PARSE_OPERAND_FAIL;
5461 }
5462 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5463 /*allow_symbol_p=*/TRUE))
5464 return PARSE_OPERAND_FAIL;
5465
5466 *str = p;
5467 return PARSE_OPERAND_SUCCESS;
5468 }
5469
5470 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5471 skip_whitespace (p);
5472
5473 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5474 {
5475 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5476 return PARSE_OPERAND_FAIL;
5477 }
5478 inst.operands[i].reg = reg;
5479 inst.operands[i].isreg = 1;
5480
5481 if (skip_past_comma (&p) == SUCCESS)
5482 {
5483 inst.operands[i].preind = 1;
5484
5485 if (*p == '+') p++;
5486 else if (*p == '-') p++, inst.operands[i].negative = 1;
5487
5488 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5489 {
5490 inst.operands[i].imm = reg;
5491 inst.operands[i].immisreg = 1;
5492
5493 if (skip_past_comma (&p) == SUCCESS)
5494 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5495 return PARSE_OPERAND_FAIL;
5496 }
5497 else if (skip_past_char (&p, ':') == SUCCESS)
5498 {
5499 /* FIXME: '@' should be used here, but it's filtered out by generic
5500 code before we get to see it here. This may be subject to
5501 change. */
5502 parse_operand_result result = parse_neon_alignment (&p, i);
5503
5504 if (result != PARSE_OPERAND_SUCCESS)
5505 return result;
5506 }
5507 else
5508 {
5509 if (inst.operands[i].negative)
5510 {
5511 inst.operands[i].negative = 0;
5512 p--;
5513 }
5514
5515 if (group_relocations
5516 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5517 {
5518 struct group_reloc_table_entry *entry;
5519
5520 /* Skip over the #: or : sequence. */
5521 if (*p == '#')
5522 p += 2;
5523 else
5524 p++;
5525
5526 /* Try to parse a group relocation. Anything else is an
5527 error. */
5528 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5529 {
5530 inst.error = _("unknown group relocation");
5531 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5532 }
5533
5534 /* We now have the group relocation table entry corresponding to
5535 the name in the assembler source. Next, we parse the
5536 expression. */
5537 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5538 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5539
5540 /* Record the relocation type. */
5541 switch (group_type)
5542 {
5543 case GROUP_LDR:
5544 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5545 break;
5546
5547 case GROUP_LDRS:
5548 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5549 break;
5550
5551 case GROUP_LDC:
5552 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5553 break;
5554
5555 default:
5556 gas_assert (0);
5557 }
5558
5559 if (inst.reloc.type == 0)
5560 {
5561 inst.error = _("this group relocation is not allowed on this instruction");
5562 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5563 }
5564 }
5565 else
5566 {
5567 char *q = p;
5568 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5569 return PARSE_OPERAND_FAIL;
5570 /* If the offset is 0, find out if it's a +0 or -0. */
5571 if (inst.reloc.exp.X_op == O_constant
5572 && inst.reloc.exp.X_add_number == 0)
5573 {
5574 skip_whitespace (q);
5575 if (*q == '#')
5576 {
5577 q++;
5578 skip_whitespace (q);
5579 }
5580 if (*q == '-')
5581 inst.operands[i].negative = 1;
5582 }
5583 }
5584 }
5585 }
5586 else if (skip_past_char (&p, ':') == SUCCESS)
5587 {
5588 /* FIXME: '@' should be used here, but it's filtered out by generic code
5589 before we get to see it here. This may be subject to change. */
5590 parse_operand_result result = parse_neon_alignment (&p, i);
5591
5592 if (result != PARSE_OPERAND_SUCCESS)
5593 return result;
5594 }
5595
5596 if (skip_past_char (&p, ']') == FAIL)
5597 {
5598 inst.error = _("']' expected");
5599 return PARSE_OPERAND_FAIL;
5600 }
5601
5602 if (skip_past_char (&p, '!') == SUCCESS)
5603 inst.operands[i].writeback = 1;
5604
5605 else if (skip_past_comma (&p) == SUCCESS)
5606 {
5607 if (skip_past_char (&p, '{') == SUCCESS)
5608 {
5609 /* [Rn], {expr} - unindexed, with option */
5610 if (parse_immediate (&p, &inst.operands[i].imm,
5611 0, 255, TRUE) == FAIL)
5612 return PARSE_OPERAND_FAIL;
5613
5614 if (skip_past_char (&p, '}') == FAIL)
5615 {
5616 inst.error = _("'}' expected at end of 'option' field");
5617 return PARSE_OPERAND_FAIL;
5618 }
5619 if (inst.operands[i].preind)
5620 {
5621 inst.error = _("cannot combine index with option");
5622 return PARSE_OPERAND_FAIL;
5623 }
5624 *str = p;
5625 return PARSE_OPERAND_SUCCESS;
5626 }
5627 else
5628 {
5629 inst.operands[i].postind = 1;
5630 inst.operands[i].writeback = 1;
5631
5632 if (inst.operands[i].preind)
5633 {
5634 inst.error = _("cannot combine pre- and post-indexing");
5635 return PARSE_OPERAND_FAIL;
5636 }
5637
5638 if (*p == '+') p++;
5639 else if (*p == '-') p++, inst.operands[i].negative = 1;
5640
5641 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5642 {
5643 /* We might be using the immediate for alignment already. If we
5644 are, OR the register number into the low-order bits. */
5645 if (inst.operands[i].immisalign)
5646 inst.operands[i].imm |= reg;
5647 else
5648 inst.operands[i].imm = reg;
5649 inst.operands[i].immisreg = 1;
5650
5651 if (skip_past_comma (&p) == SUCCESS)
5652 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5653 return PARSE_OPERAND_FAIL;
5654 }
5655 else
5656 {
5657 char *q = p;
5658 if (inst.operands[i].negative)
5659 {
5660 inst.operands[i].negative = 0;
5661 p--;
5662 }
5663 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5664 return PARSE_OPERAND_FAIL;
5665 /* If the offset is 0, find out if it's a +0 or -0. */
5666 if (inst.reloc.exp.X_op == O_constant
5667 && inst.reloc.exp.X_add_number == 0)
5668 {
5669 skip_whitespace (q);
5670 if (*q == '#')
5671 {
5672 q++;
5673 skip_whitespace (q);
5674 }
5675 if (*q == '-')
5676 inst.operands[i].negative = 1;
5677 }
5678 }
5679 }
5680 }
5681
5682 /* If at this point neither .preind nor .postind is set, we have a
5683 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5684 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5685 {
5686 inst.operands[i].preind = 1;
5687 inst.reloc.exp.X_op = O_constant;
5688 inst.reloc.exp.X_add_number = 0;
5689 }
5690 *str = p;
5691 return PARSE_OPERAND_SUCCESS;
5692 }
5693
5694 static int
5695 parse_address (char **str, int i)
5696 {
5697 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5698 ? SUCCESS : FAIL;
5699 }
5700
5701 static parse_operand_result
5702 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5703 {
5704 return parse_address_main (str, i, 1, type);
5705 }
5706
5707 /* Parse an operand for a MOVW or MOVT instruction. */
5708 static int
5709 parse_half (char **str)
5710 {
5711 char * p;
5712
5713 p = *str;
5714 skip_past_char (&p, '#');
5715 if (strncasecmp (p, ":lower16:", 9) == 0)
5716 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5717 else if (strncasecmp (p, ":upper16:", 9) == 0)
5718 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5719
5720 if (inst.reloc.type != BFD_RELOC_UNUSED)
5721 {
5722 p += 9;
5723 skip_whitespace (p);
5724 }
5725
5726 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5727 return FAIL;
5728
5729 if (inst.reloc.type == BFD_RELOC_UNUSED)
5730 {
5731 if (inst.reloc.exp.X_op != O_constant)
5732 {
5733 inst.error = _("constant expression expected");
5734 return FAIL;
5735 }
5736 if (inst.reloc.exp.X_add_number < 0
5737 || inst.reloc.exp.X_add_number > 0xffff)
5738 {
5739 inst.error = _("immediate value out of range");
5740 return FAIL;
5741 }
5742 }
5743 *str = p;
5744 return SUCCESS;
5745 }
5746
5747 /* Miscellaneous. */
5748
5749 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5750 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5751 static int
5752 parse_psr (char **str, bfd_boolean lhs)
5753 {
5754 char *p;
5755 unsigned long psr_field;
5756 const struct asm_psr *psr;
5757 char *start;
5758 bfd_boolean is_apsr = FALSE;
5759 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5760
5761 /* PR gas/12698: If the user has specified -march=all then m_profile will
5762 be TRUE, but we want to ignore it in this case as we are building for any
5763 CPU type, including non-m variants. */
5764 if (selected_cpu.core == arm_arch_any.core)
5765 m_profile = FALSE;
5766
5767 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5768 feature for ease of use and backwards compatibility. */
5769 p = *str;
5770 if (strncasecmp (p, "SPSR", 4) == 0)
5771 {
5772 if (m_profile)
5773 goto unsupported_psr;
5774
5775 psr_field = SPSR_BIT;
5776 }
5777 else if (strncasecmp (p, "CPSR", 4) == 0)
5778 {
5779 if (m_profile)
5780 goto unsupported_psr;
5781
5782 psr_field = 0;
5783 }
5784 else if (strncasecmp (p, "APSR", 4) == 0)
5785 {
5786 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5787 and ARMv7-R architecture CPUs. */
5788 is_apsr = TRUE;
5789 psr_field = 0;
5790 }
5791 else if (m_profile)
5792 {
5793 start = p;
5794 do
5795 p++;
5796 while (ISALNUM (*p) || *p == '_');
5797
5798 if (strncasecmp (start, "iapsr", 5) == 0
5799 || strncasecmp (start, "eapsr", 5) == 0
5800 || strncasecmp (start, "xpsr", 4) == 0
5801 || strncasecmp (start, "psr", 3) == 0)
5802 p = start + strcspn (start, "rR") + 1;
5803
5804 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5805 p - start);
5806
5807 if (!psr)
5808 return FAIL;
5809
5810 /* If APSR is being written, a bitfield may be specified. Note that
5811 APSR itself is handled above. */
5812 if (psr->field <= 3)
5813 {
5814 psr_field = psr->field;
5815 is_apsr = TRUE;
5816 goto check_suffix;
5817 }
5818
5819 *str = p;
5820 /* M-profile MSR instructions have the mask field set to "10", except
5821 *PSR variants which modify APSR, which may use a different mask (and
5822 have been handled already). Do that by setting the PSR_f field
5823 here. */
5824 return psr->field | (lhs ? PSR_f : 0);
5825 }
5826 else
5827 goto unsupported_psr;
5828
5829 p += 4;
5830 check_suffix:
5831 if (*p == '_')
5832 {
5833 /* A suffix follows. */
5834 p++;
5835 start = p;
5836
5837 do
5838 p++;
5839 while (ISALNUM (*p) || *p == '_');
5840
5841 if (is_apsr)
5842 {
5843 /* APSR uses a notation for bits, rather than fields. */
5844 unsigned int nzcvq_bits = 0;
5845 unsigned int g_bit = 0;
5846 char *bit;
5847
5848 for (bit = start; bit != p; bit++)
5849 {
5850 switch (TOLOWER (*bit))
5851 {
5852 case 'n':
5853 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5854 break;
5855
5856 case 'z':
5857 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5858 break;
5859
5860 case 'c':
5861 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5862 break;
5863
5864 case 'v':
5865 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5866 break;
5867
5868 case 'q':
5869 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5870 break;
5871
5872 case 'g':
5873 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5874 break;
5875
5876 default:
5877 inst.error = _("unexpected bit specified after APSR");
5878 return FAIL;
5879 }
5880 }
5881
5882 if (nzcvq_bits == 0x1f)
5883 psr_field |= PSR_f;
5884
5885 if (g_bit == 0x1)
5886 {
5887 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5888 {
5889 inst.error = _("selected processor does not "
5890 "support DSP extension");
5891 return FAIL;
5892 }
5893
5894 psr_field |= PSR_s;
5895 }
5896
5897 if ((nzcvq_bits & 0x20) != 0
5898 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5899 || (g_bit & 0x2) != 0)
5900 {
5901 inst.error = _("bad bitmask specified after APSR");
5902 return FAIL;
5903 }
5904 }
5905 else
5906 {
5907 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5908 p - start);
5909 if (!psr)
5910 goto error;
5911
5912 psr_field |= psr->field;
5913 }
5914 }
5915 else
5916 {
5917 if (ISALNUM (*p))
5918 goto error; /* Garbage after "[CS]PSR". */
5919
5920 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5921 is deprecated, but allow it anyway. */
5922 if (is_apsr && lhs)
5923 {
5924 psr_field |= PSR_f;
5925 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5926 "deprecated"));
5927 }
5928 else if (!m_profile)
5929 /* These bits are never right for M-profile devices: don't set them
5930 (only code paths which read/write APSR reach here). */
5931 psr_field |= (PSR_c | PSR_f);
5932 }
5933 *str = p;
5934 return psr_field;
5935
5936 unsupported_psr:
5937 inst.error = _("selected processor does not support requested special "
5938 "purpose register");
5939 return FAIL;
5940
5941 error:
5942 inst.error = _("flag for {c}psr instruction expected");
5943 return FAIL;
5944 }
5945
5946 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5947 value suitable for splatting into the AIF field of the instruction. */
5948
5949 static int
5950 parse_cps_flags (char **str)
5951 {
5952 int val = 0;
5953 int saw_a_flag = 0;
5954 char *s = *str;
5955
5956 for (;;)
5957 switch (*s++)
5958 {
5959 case '\0': case ',':
5960 goto done;
5961
5962 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5963 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5964 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5965
5966 default:
5967 inst.error = _("unrecognized CPS flag");
5968 return FAIL;
5969 }
5970
5971 done:
5972 if (saw_a_flag == 0)
5973 {
5974 inst.error = _("missing CPS flags");
5975 return FAIL;
5976 }
5977
5978 *str = s - 1;
5979 return val;
5980 }
5981
5982 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5983 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5984
5985 static int
5986 parse_endian_specifier (char **str)
5987 {
5988 int little_endian;
5989 char *s = *str;
5990
5991 if (strncasecmp (s, "BE", 2))
5992 little_endian = 0;
5993 else if (strncasecmp (s, "LE", 2))
5994 little_endian = 1;
5995 else
5996 {
5997 inst.error = _("valid endian specifiers are be or le");
5998 return FAIL;
5999 }
6000
6001 if (ISALNUM (s[2]) || s[2] == '_')
6002 {
6003 inst.error = _("valid endian specifiers are be or le");
6004 return FAIL;
6005 }
6006
6007 *str = s + 2;
6008 return little_endian;
6009 }
6010
6011 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6012 value suitable for poking into the rotate field of an sxt or sxta
6013 instruction, or FAIL on error. */
6014
6015 static int
6016 parse_ror (char **str)
6017 {
6018 int rot;
6019 char *s = *str;
6020
6021 if (strncasecmp (s, "ROR", 3) == 0)
6022 s += 3;
6023 else
6024 {
6025 inst.error = _("missing rotation field after comma");
6026 return FAIL;
6027 }
6028
6029 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6030 return FAIL;
6031
6032 switch (rot)
6033 {
6034 case 0: *str = s; return 0x0;
6035 case 8: *str = s; return 0x1;
6036 case 16: *str = s; return 0x2;
6037 case 24: *str = s; return 0x3;
6038
6039 default:
6040 inst.error = _("rotation can only be 0, 8, 16, or 24");
6041 return FAIL;
6042 }
6043 }
6044
6045 /* Parse a conditional code (from conds[] below). The value returned is in the
6046 range 0 .. 14, or FAIL. */
6047 static int
6048 parse_cond (char **str)
6049 {
6050 char *q;
6051 const struct asm_cond *c;
6052 int n;
6053 /* Condition codes are always 2 characters, so matching up to
6054 3 characters is sufficient. */
6055 char cond[3];
6056
6057 q = *str;
6058 n = 0;
6059 while (ISALPHA (*q) && n < 3)
6060 {
6061 cond[n] = TOLOWER (*q);
6062 q++;
6063 n++;
6064 }
6065
6066 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6067 if (!c)
6068 {
6069 inst.error = _("condition required");
6070 return FAIL;
6071 }
6072
6073 *str = q;
6074 return c->value;
6075 }
6076
6077 /* If the given feature available in the selected CPU, mark it as used.
6078 Returns TRUE iff feature is available. */
6079 static bfd_boolean
6080 mark_feature_used (const arm_feature_set *feature)
6081 {
6082 /* Ensure the option is valid on the current architecture. */
6083 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6084 return FALSE;
6085
6086 /* Add the appropriate architecture feature for the barrier option used.
6087 */
6088 if (thumb_mode)
6089 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6090 else
6091 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6092
6093 return TRUE;
6094 }
6095
6096 /* Parse an option for a barrier instruction. Returns the encoding for the
6097 option, or FAIL. */
6098 static int
6099 parse_barrier (char **str)
6100 {
6101 char *p, *q;
6102 const struct asm_barrier_opt *o;
6103
6104 p = q = *str;
6105 while (ISALPHA (*q))
6106 q++;
6107
6108 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6109 q - p);
6110 if (!o)
6111 return FAIL;
6112
6113 if (!mark_feature_used (&o->arch))
6114 return FAIL;
6115
6116 *str = q;
6117 return o->value;
6118 }
6119
6120 /* Parse the operands of a table branch instruction. Similar to a memory
6121 operand. */
6122 static int
6123 parse_tb (char **str)
6124 {
6125 char * p = *str;
6126 int reg;
6127
6128 if (skip_past_char (&p, '[') == FAIL)
6129 {
6130 inst.error = _("'[' expected");
6131 return FAIL;
6132 }
6133
6134 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6135 {
6136 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6137 return FAIL;
6138 }
6139 inst.operands[0].reg = reg;
6140
6141 if (skip_past_comma (&p) == FAIL)
6142 {
6143 inst.error = _("',' expected");
6144 return FAIL;
6145 }
6146
6147 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6148 {
6149 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6150 return FAIL;
6151 }
6152 inst.operands[0].imm = reg;
6153
6154 if (skip_past_comma (&p) == SUCCESS)
6155 {
6156 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6157 return FAIL;
6158 if (inst.reloc.exp.X_add_number != 1)
6159 {
6160 inst.error = _("invalid shift");
6161 return FAIL;
6162 }
6163 inst.operands[0].shifted = 1;
6164 }
6165
6166 if (skip_past_char (&p, ']') == FAIL)
6167 {
6168 inst.error = _("']' expected");
6169 return FAIL;
6170 }
6171 *str = p;
6172 return SUCCESS;
6173 }
6174
6175 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6176 information on the types the operands can take and how they are encoded.
6177 Up to four operands may be read; this function handles setting the
6178 ".present" field for each read operand itself.
6179 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6180 else returns FAIL. */
6181
6182 static int
6183 parse_neon_mov (char **str, int *which_operand)
6184 {
6185 int i = *which_operand, val;
6186 enum arm_reg_type rtype;
6187 char *ptr = *str;
6188 struct neon_type_el optype;
6189
6190 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6191 {
6192 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6193 inst.operands[i].reg = val;
6194 inst.operands[i].isscalar = 1;
6195 inst.operands[i].vectype = optype;
6196 inst.operands[i++].present = 1;
6197
6198 if (skip_past_comma (&ptr) == FAIL)
6199 goto wanted_comma;
6200
6201 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6202 goto wanted_arm;
6203
6204 inst.operands[i].reg = val;
6205 inst.operands[i].isreg = 1;
6206 inst.operands[i].present = 1;
6207 }
6208 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6209 != FAIL)
6210 {
6211 /* Cases 0, 1, 2, 3, 5 (D only). */
6212 if (skip_past_comma (&ptr) == FAIL)
6213 goto wanted_comma;
6214
6215 inst.operands[i].reg = val;
6216 inst.operands[i].isreg = 1;
6217 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6218 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6219 inst.operands[i].isvec = 1;
6220 inst.operands[i].vectype = optype;
6221 inst.operands[i++].present = 1;
6222
6223 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6224 {
6225 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6226 Case 13: VMOV <Sd>, <Rm> */
6227 inst.operands[i].reg = val;
6228 inst.operands[i].isreg = 1;
6229 inst.operands[i].present = 1;
6230
6231 if (rtype == REG_TYPE_NQ)
6232 {
6233 first_error (_("can't use Neon quad register here"));
6234 return FAIL;
6235 }
6236 else if (rtype != REG_TYPE_VFS)
6237 {
6238 i++;
6239 if (skip_past_comma (&ptr) == FAIL)
6240 goto wanted_comma;
6241 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6242 goto wanted_arm;
6243 inst.operands[i].reg = val;
6244 inst.operands[i].isreg = 1;
6245 inst.operands[i].present = 1;
6246 }
6247 }
6248 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6249 &optype)) != FAIL)
6250 {
6251 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6252 Case 1: VMOV<c><q> <Dd>, <Dm>
6253 Case 8: VMOV.F32 <Sd>, <Sm>
6254 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6255
6256 inst.operands[i].reg = val;
6257 inst.operands[i].isreg = 1;
6258 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6259 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6260 inst.operands[i].isvec = 1;
6261 inst.operands[i].vectype = optype;
6262 inst.operands[i].present = 1;
6263
6264 if (skip_past_comma (&ptr) == SUCCESS)
6265 {
6266 /* Case 15. */
6267 i++;
6268
6269 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6270 goto wanted_arm;
6271
6272 inst.operands[i].reg = val;
6273 inst.operands[i].isreg = 1;
6274 inst.operands[i++].present = 1;
6275
6276 if (skip_past_comma (&ptr) == FAIL)
6277 goto wanted_comma;
6278
6279 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6280 goto wanted_arm;
6281
6282 inst.operands[i].reg = val;
6283 inst.operands[i].isreg = 1;
6284 inst.operands[i].present = 1;
6285 }
6286 }
6287 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6288 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6289 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6290 Case 10: VMOV.F32 <Sd>, #<imm>
6291 Case 11: VMOV.F64 <Dd>, #<imm> */
6292 inst.operands[i].immisfloat = 1;
6293 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6294 == SUCCESS)
6295 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6296 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6297 ;
6298 else
6299 {
6300 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6301 return FAIL;
6302 }
6303 }
6304 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6305 {
6306 /* Cases 6, 7. */
6307 inst.operands[i].reg = val;
6308 inst.operands[i].isreg = 1;
6309 inst.operands[i++].present = 1;
6310
6311 if (skip_past_comma (&ptr) == FAIL)
6312 goto wanted_comma;
6313
6314 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6315 {
6316 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6317 inst.operands[i].reg = val;
6318 inst.operands[i].isscalar = 1;
6319 inst.operands[i].present = 1;
6320 inst.operands[i].vectype = optype;
6321 }
6322 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6323 {
6324 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6325 inst.operands[i].reg = val;
6326 inst.operands[i].isreg = 1;
6327 inst.operands[i++].present = 1;
6328
6329 if (skip_past_comma (&ptr) == FAIL)
6330 goto wanted_comma;
6331
6332 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6333 == FAIL)
6334 {
6335 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6336 return FAIL;
6337 }
6338
6339 inst.operands[i].reg = val;
6340 inst.operands[i].isreg = 1;
6341 inst.operands[i].isvec = 1;
6342 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6343 inst.operands[i].vectype = optype;
6344 inst.operands[i].present = 1;
6345
6346 if (rtype == REG_TYPE_VFS)
6347 {
6348 /* Case 14. */
6349 i++;
6350 if (skip_past_comma (&ptr) == FAIL)
6351 goto wanted_comma;
6352 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6353 &optype)) == FAIL)
6354 {
6355 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6356 return FAIL;
6357 }
6358 inst.operands[i].reg = val;
6359 inst.operands[i].isreg = 1;
6360 inst.operands[i].isvec = 1;
6361 inst.operands[i].issingle = 1;
6362 inst.operands[i].vectype = optype;
6363 inst.operands[i].present = 1;
6364 }
6365 }
6366 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6367 != FAIL)
6368 {
6369 /* Case 13. */
6370 inst.operands[i].reg = val;
6371 inst.operands[i].isreg = 1;
6372 inst.operands[i].isvec = 1;
6373 inst.operands[i].issingle = 1;
6374 inst.operands[i].vectype = optype;
6375 inst.operands[i].present = 1;
6376 }
6377 }
6378 else
6379 {
6380 first_error (_("parse error"));
6381 return FAIL;
6382 }
6383
6384 /* Successfully parsed the operands. Update args. */
6385 *which_operand = i;
6386 *str = ptr;
6387 return SUCCESS;
6388
6389 wanted_comma:
6390 first_error (_("expected comma"));
6391 return FAIL;
6392
6393 wanted_arm:
6394 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6395 return FAIL;
6396 }
6397
6398 /* Use this macro when the operand constraints are different
6399 for ARM and THUMB (e.g. ldrd). */
6400 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6401 ((arm_operand) | ((thumb_operand) << 16))
6402
6403 /* Matcher codes for parse_operands. */
6404 enum operand_parse_code
6405 {
6406 OP_stop, /* end of line */
6407
6408 OP_RR, /* ARM register */
6409 OP_RRnpc, /* ARM register, not r15 */
6410 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6411 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6412 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6413 optional trailing ! */
6414 OP_RRw, /* ARM register, not r15, optional trailing ! */
6415 OP_RCP, /* Coprocessor number */
6416 OP_RCN, /* Coprocessor register */
6417 OP_RF, /* FPA register */
6418 OP_RVS, /* VFP single precision register */
6419 OP_RVD, /* VFP double precision register (0..15) */
6420 OP_RND, /* Neon double precision register (0..31) */
6421 OP_RNQ, /* Neon quad precision register */
6422 OP_RVSD, /* VFP single or double precision register */
6423 OP_RNDQ, /* Neon double or quad precision register */
6424 OP_RNSDQ, /* Neon single, double or quad precision register */
6425 OP_RNSC, /* Neon scalar D[X] */
6426 OP_RVC, /* VFP control register */
6427 OP_RMF, /* Maverick F register */
6428 OP_RMD, /* Maverick D register */
6429 OP_RMFX, /* Maverick FX register */
6430 OP_RMDX, /* Maverick DX register */
6431 OP_RMAX, /* Maverick AX register */
6432 OP_RMDS, /* Maverick DSPSC register */
6433 OP_RIWR, /* iWMMXt wR register */
6434 OP_RIWC, /* iWMMXt wC register */
6435 OP_RIWG, /* iWMMXt wCG register */
6436 OP_RXA, /* XScale accumulator register */
6437
6438 OP_REGLST, /* ARM register list */
6439 OP_VRSLST, /* VFP single-precision register list */
6440 OP_VRDLST, /* VFP double-precision register list */
6441 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6442 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6443 OP_NSTRLST, /* Neon element/structure list */
6444
6445 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6446 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6447 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6448 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6449 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6450 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6451 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6452 OP_VMOV, /* Neon VMOV operands. */
6453 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6454 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6455 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6456
6457 OP_I0, /* immediate zero */
6458 OP_I7, /* immediate value 0 .. 7 */
6459 OP_I15, /* 0 .. 15 */
6460 OP_I16, /* 1 .. 16 */
6461 OP_I16z, /* 0 .. 16 */
6462 OP_I31, /* 0 .. 31 */
6463 OP_I31w, /* 0 .. 31, optional trailing ! */
6464 OP_I32, /* 1 .. 32 */
6465 OP_I32z, /* 0 .. 32 */
6466 OP_I63, /* 0 .. 63 */
6467 OP_I63s, /* -64 .. 63 */
6468 OP_I64, /* 1 .. 64 */
6469 OP_I64z, /* 0 .. 64 */
6470 OP_I255, /* 0 .. 255 */
6471
6472 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6473 OP_I7b, /* 0 .. 7 */
6474 OP_I15b, /* 0 .. 15 */
6475 OP_I31b, /* 0 .. 31 */
6476
6477 OP_SH, /* shifter operand */
6478 OP_SHG, /* shifter operand with possible group relocation */
6479 OP_ADDR, /* Memory address expression (any mode) */
6480 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6481 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6482 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6483 OP_EXP, /* arbitrary expression */
6484 OP_EXPi, /* same, with optional immediate prefix */
6485 OP_EXPr, /* same, with optional relocation suffix */
6486 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6487
6488 OP_CPSF, /* CPS flags */
6489 OP_ENDI, /* Endianness specifier */
6490 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6491 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6492 OP_COND, /* conditional code */
6493 OP_TB, /* Table branch. */
6494
6495 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6496
6497 OP_RRnpc_I0, /* ARM register or literal 0 */
6498 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6499 OP_RR_EXi, /* ARM register or expression with imm prefix */
6500 OP_RF_IF, /* FPA register or immediate */
6501 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6502 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6503
6504 /* Optional operands. */
6505 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6506 OP_oI31b, /* 0 .. 31 */
6507 OP_oI32b, /* 1 .. 32 */
6508 OP_oI32z, /* 0 .. 32 */
6509 OP_oIffffb, /* 0 .. 65535 */
6510 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6511
6512 OP_oRR, /* ARM register */
6513 OP_oRRnpc, /* ARM register, not the PC */
6514 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6515 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6516 OP_oRND, /* Optional Neon double precision register */
6517 OP_oRNQ, /* Optional Neon quad precision register */
6518 OP_oRNDQ, /* Optional Neon double or quad precision register */
6519 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6520 OP_oSHll, /* LSL immediate */
6521 OP_oSHar, /* ASR immediate */
6522 OP_oSHllar, /* LSL or ASR immediate */
6523 OP_oROR, /* ROR 0/8/16/24 */
6524 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6525
6526 /* Some pre-defined mixed (ARM/THUMB) operands. */
6527 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6528 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6529 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6530
6531 OP_FIRST_OPTIONAL = OP_oI7b
6532 };
6533
6534 /* Generic instruction operand parser. This does no encoding and no
6535 semantic validation; it merely squirrels values away in the inst
6536 structure. Returns SUCCESS or FAIL depending on whether the
6537 specified grammar matched. */
6538 static int
6539 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6540 {
6541 unsigned const int *upat = pattern;
6542 char *backtrack_pos = 0;
6543 const char *backtrack_error = 0;
6544 int i, val = 0, backtrack_index = 0;
6545 enum arm_reg_type rtype;
6546 parse_operand_result result;
6547 unsigned int op_parse_code;
6548
6549 #define po_char_or_fail(chr) \
6550 do \
6551 { \
6552 if (skip_past_char (&str, chr) == FAIL) \
6553 goto bad_args; \
6554 } \
6555 while (0)
6556
6557 #define po_reg_or_fail(regtype) \
6558 do \
6559 { \
6560 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6561 & inst.operands[i].vectype); \
6562 if (val == FAIL) \
6563 { \
6564 first_error (_(reg_expected_msgs[regtype])); \
6565 goto failure; \
6566 } \
6567 inst.operands[i].reg = val; \
6568 inst.operands[i].isreg = 1; \
6569 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6570 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6571 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6572 || rtype == REG_TYPE_VFD \
6573 || rtype == REG_TYPE_NQ); \
6574 } \
6575 while (0)
6576
6577 #define po_reg_or_goto(regtype, label) \
6578 do \
6579 { \
6580 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6581 & inst.operands[i].vectype); \
6582 if (val == FAIL) \
6583 goto label; \
6584 \
6585 inst.operands[i].reg = val; \
6586 inst.operands[i].isreg = 1; \
6587 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6588 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6589 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6590 || rtype == REG_TYPE_VFD \
6591 || rtype == REG_TYPE_NQ); \
6592 } \
6593 while (0)
6594
6595 #define po_imm_or_fail(min, max, popt) \
6596 do \
6597 { \
6598 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6599 goto failure; \
6600 inst.operands[i].imm = val; \
6601 } \
6602 while (0)
6603
6604 #define po_scalar_or_goto(elsz, label) \
6605 do \
6606 { \
6607 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6608 if (val == FAIL) \
6609 goto label; \
6610 inst.operands[i].reg = val; \
6611 inst.operands[i].isscalar = 1; \
6612 } \
6613 while (0)
6614
6615 #define po_misc_or_fail(expr) \
6616 do \
6617 { \
6618 if (expr) \
6619 goto failure; \
6620 } \
6621 while (0)
6622
6623 #define po_misc_or_fail_no_backtrack(expr) \
6624 do \
6625 { \
6626 result = expr; \
6627 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6628 backtrack_pos = 0; \
6629 if (result != PARSE_OPERAND_SUCCESS) \
6630 goto failure; \
6631 } \
6632 while (0)
6633
6634 #define po_barrier_or_imm(str) \
6635 do \
6636 { \
6637 val = parse_barrier (&str); \
6638 if (val == FAIL && ! ISALPHA (*str)) \
6639 goto immediate; \
6640 if (val == FAIL \
6641 /* ISB can only take SY as an option. */ \
6642 || ((inst.instruction & 0xf0) == 0x60 \
6643 && val != 0xf)) \
6644 { \
6645 inst.error = _("invalid barrier type"); \
6646 backtrack_pos = 0; \
6647 goto failure; \
6648 } \
6649 } \
6650 while (0)
6651
6652 skip_whitespace (str);
6653
6654 for (i = 0; upat[i] != OP_stop; i++)
6655 {
6656 op_parse_code = upat[i];
6657 if (op_parse_code >= 1<<16)
6658 op_parse_code = thumb ? (op_parse_code >> 16)
6659 : (op_parse_code & ((1<<16)-1));
6660
6661 if (op_parse_code >= OP_FIRST_OPTIONAL)
6662 {
6663 /* Remember where we are in case we need to backtrack. */
6664 gas_assert (!backtrack_pos);
6665 backtrack_pos = str;
6666 backtrack_error = inst.error;
6667 backtrack_index = i;
6668 }
6669
6670 if (i > 0 && (i > 1 || inst.operands[0].present))
6671 po_char_or_fail (',');
6672
6673 switch (op_parse_code)
6674 {
6675 /* Registers */
6676 case OP_oRRnpc:
6677 case OP_oRRnpcsp:
6678 case OP_RRnpc:
6679 case OP_RRnpcsp:
6680 case OP_oRR:
6681 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6682 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6683 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6684 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6685 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6686 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6687 case OP_oRND:
6688 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6689 case OP_RVC:
6690 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6691 break;
6692 /* Also accept generic coprocessor regs for unknown registers. */
6693 coproc_reg:
6694 po_reg_or_fail (REG_TYPE_CN);
6695 break;
6696 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6697 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6698 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6699 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6700 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6701 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6702 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6703 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6704 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6705 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6706 case OP_oRNQ:
6707 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6708 case OP_oRNDQ:
6709 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6710 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6711 case OP_oRNSDQ:
6712 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6713
6714 /* Neon scalar. Using an element size of 8 means that some invalid
6715 scalars are accepted here, so deal with those in later code. */
6716 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6717
6718 case OP_RNDQ_I0:
6719 {
6720 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6721 break;
6722 try_imm0:
6723 po_imm_or_fail (0, 0, TRUE);
6724 }
6725 break;
6726
6727 case OP_RVSD_I0:
6728 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6729 break;
6730
6731 case OP_RSVD_FI0:
6732 {
6733 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6734 break;
6735 try_ifimm0:
6736 if (parse_ifimm_zero (&str))
6737 inst.operands[i].imm = 0;
6738 else
6739 {
6740 inst.error
6741 = _("only floating point zero is allowed as immediate value");
6742 goto failure;
6743 }
6744 }
6745 break;
6746
6747 case OP_RR_RNSC:
6748 {
6749 po_scalar_or_goto (8, try_rr);
6750 break;
6751 try_rr:
6752 po_reg_or_fail (REG_TYPE_RN);
6753 }
6754 break;
6755
6756 case OP_RNSDQ_RNSC:
6757 {
6758 po_scalar_or_goto (8, try_nsdq);
6759 break;
6760 try_nsdq:
6761 po_reg_or_fail (REG_TYPE_NSDQ);
6762 }
6763 break;
6764
6765 case OP_RNDQ_RNSC:
6766 {
6767 po_scalar_or_goto (8, try_ndq);
6768 break;
6769 try_ndq:
6770 po_reg_or_fail (REG_TYPE_NDQ);
6771 }
6772 break;
6773
6774 case OP_RND_RNSC:
6775 {
6776 po_scalar_or_goto (8, try_vfd);
6777 break;
6778 try_vfd:
6779 po_reg_or_fail (REG_TYPE_VFD);
6780 }
6781 break;
6782
6783 case OP_VMOV:
6784 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6785 not careful then bad things might happen. */
6786 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6787 break;
6788
6789 case OP_RNDQ_Ibig:
6790 {
6791 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6792 break;
6793 try_immbig:
6794 /* There's a possibility of getting a 64-bit immediate here, so
6795 we need special handling. */
6796 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6797 == FAIL)
6798 {
6799 inst.error = _("immediate value is out of range");
6800 goto failure;
6801 }
6802 }
6803 break;
6804
6805 case OP_RNDQ_I63b:
6806 {
6807 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6808 break;
6809 try_shimm:
6810 po_imm_or_fail (0, 63, TRUE);
6811 }
6812 break;
6813
6814 case OP_RRnpcb:
6815 po_char_or_fail ('[');
6816 po_reg_or_fail (REG_TYPE_RN);
6817 po_char_or_fail (']');
6818 break;
6819
6820 case OP_RRnpctw:
6821 case OP_RRw:
6822 case OP_oRRw:
6823 po_reg_or_fail (REG_TYPE_RN);
6824 if (skip_past_char (&str, '!') == SUCCESS)
6825 inst.operands[i].writeback = 1;
6826 break;
6827
6828 /* Immediates */
6829 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6830 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6831 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6832 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6833 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6834 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6835 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6836 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6837 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6838 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6839 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6840 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6841
6842 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6843 case OP_oI7b:
6844 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6845 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6846 case OP_oI31b:
6847 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6848 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6849 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6850 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6851
6852 /* Immediate variants */
6853 case OP_oI255c:
6854 po_char_or_fail ('{');
6855 po_imm_or_fail (0, 255, TRUE);
6856 po_char_or_fail ('}');
6857 break;
6858
6859 case OP_I31w:
6860 /* The expression parser chokes on a trailing !, so we have
6861 to find it first and zap it. */
6862 {
6863 char *s = str;
6864 while (*s && *s != ',')
6865 s++;
6866 if (s[-1] == '!')
6867 {
6868 s[-1] = '\0';
6869 inst.operands[i].writeback = 1;
6870 }
6871 po_imm_or_fail (0, 31, TRUE);
6872 if (str == s - 1)
6873 str = s;
6874 }
6875 break;
6876
6877 /* Expressions */
6878 case OP_EXPi: EXPi:
6879 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6880 GE_OPT_PREFIX));
6881 break;
6882
6883 case OP_EXP:
6884 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6885 GE_NO_PREFIX));
6886 break;
6887
6888 case OP_EXPr: EXPr:
6889 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6890 GE_NO_PREFIX));
6891 if (inst.reloc.exp.X_op == O_symbol)
6892 {
6893 val = parse_reloc (&str);
6894 if (val == -1)
6895 {
6896 inst.error = _("unrecognized relocation suffix");
6897 goto failure;
6898 }
6899 else if (val != BFD_RELOC_UNUSED)
6900 {
6901 inst.operands[i].imm = val;
6902 inst.operands[i].hasreloc = 1;
6903 }
6904 }
6905 break;
6906
6907 /* Operand for MOVW or MOVT. */
6908 case OP_HALF:
6909 po_misc_or_fail (parse_half (&str));
6910 break;
6911
6912 /* Register or expression. */
6913 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6914 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6915
6916 /* Register or immediate. */
6917 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6918 I0: po_imm_or_fail (0, 0, FALSE); break;
6919
6920 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6921 IF:
6922 if (!is_immediate_prefix (*str))
6923 goto bad_args;
6924 str++;
6925 val = parse_fpa_immediate (&str);
6926 if (val == FAIL)
6927 goto failure;
6928 /* FPA immediates are encoded as registers 8-15.
6929 parse_fpa_immediate has already applied the offset. */
6930 inst.operands[i].reg = val;
6931 inst.operands[i].isreg = 1;
6932 break;
6933
6934 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6935 I32z: po_imm_or_fail (0, 32, FALSE); break;
6936
6937 /* Two kinds of register. */
6938 case OP_RIWR_RIWC:
6939 {
6940 struct reg_entry *rege = arm_reg_parse_multi (&str);
6941 if (!rege
6942 || (rege->type != REG_TYPE_MMXWR
6943 && rege->type != REG_TYPE_MMXWC
6944 && rege->type != REG_TYPE_MMXWCG))
6945 {
6946 inst.error = _("iWMMXt data or control register expected");
6947 goto failure;
6948 }
6949 inst.operands[i].reg = rege->number;
6950 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6951 }
6952 break;
6953
6954 case OP_RIWC_RIWG:
6955 {
6956 struct reg_entry *rege = arm_reg_parse_multi (&str);
6957 if (!rege
6958 || (rege->type != REG_TYPE_MMXWC
6959 && rege->type != REG_TYPE_MMXWCG))
6960 {
6961 inst.error = _("iWMMXt control register expected");
6962 goto failure;
6963 }
6964 inst.operands[i].reg = rege->number;
6965 inst.operands[i].isreg = 1;
6966 }
6967 break;
6968
6969 /* Misc */
6970 case OP_CPSF: val = parse_cps_flags (&str); break;
6971 case OP_ENDI: val = parse_endian_specifier (&str); break;
6972 case OP_oROR: val = parse_ror (&str); break;
6973 case OP_COND: val = parse_cond (&str); break;
6974 case OP_oBARRIER_I15:
6975 po_barrier_or_imm (str); break;
6976 immediate:
6977 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6978 goto failure;
6979 break;
6980
6981 case OP_wPSR:
6982 case OP_rPSR:
6983 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6984 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6985 {
6986 inst.error = _("Banked registers are not available with this "
6987 "architecture.");
6988 goto failure;
6989 }
6990 break;
6991 try_psr:
6992 val = parse_psr (&str, op_parse_code == OP_wPSR);
6993 break;
6994
6995 case OP_APSR_RR:
6996 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6997 break;
6998 try_apsr:
6999 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7000 instruction). */
7001 if (strncasecmp (str, "APSR_", 5) == 0)
7002 {
7003 unsigned found = 0;
7004 str += 5;
7005 while (found < 15)
7006 switch (*str++)
7007 {
7008 case 'c': found = (found & 1) ? 16 : found | 1; break;
7009 case 'n': found = (found & 2) ? 16 : found | 2; break;
7010 case 'z': found = (found & 4) ? 16 : found | 4; break;
7011 case 'v': found = (found & 8) ? 16 : found | 8; break;
7012 default: found = 16;
7013 }
7014 if (found != 15)
7015 goto failure;
7016 inst.operands[i].isvec = 1;
7017 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7018 inst.operands[i].reg = REG_PC;
7019 }
7020 else
7021 goto failure;
7022 break;
7023
7024 case OP_TB:
7025 po_misc_or_fail (parse_tb (&str));
7026 break;
7027
7028 /* Register lists. */
7029 case OP_REGLST:
7030 val = parse_reg_list (&str);
7031 if (*str == '^')
7032 {
7033 inst.operands[1].writeback = 1;
7034 str++;
7035 }
7036 break;
7037
7038 case OP_VRSLST:
7039 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7040 break;
7041
7042 case OP_VRDLST:
7043 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7044 break;
7045
7046 case OP_VRSDLST:
7047 /* Allow Q registers too. */
7048 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7049 REGLIST_NEON_D);
7050 if (val == FAIL)
7051 {
7052 inst.error = NULL;
7053 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7054 REGLIST_VFP_S);
7055 inst.operands[i].issingle = 1;
7056 }
7057 break;
7058
7059 case OP_NRDLST:
7060 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7061 REGLIST_NEON_D);
7062 break;
7063
7064 case OP_NSTRLST:
7065 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7066 &inst.operands[i].vectype);
7067 break;
7068
7069 /* Addressing modes */
7070 case OP_ADDR:
7071 po_misc_or_fail (parse_address (&str, i));
7072 break;
7073
7074 case OP_ADDRGLDR:
7075 po_misc_or_fail_no_backtrack (
7076 parse_address_group_reloc (&str, i, GROUP_LDR));
7077 break;
7078
7079 case OP_ADDRGLDRS:
7080 po_misc_or_fail_no_backtrack (
7081 parse_address_group_reloc (&str, i, GROUP_LDRS));
7082 break;
7083
7084 case OP_ADDRGLDC:
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str, i, GROUP_LDC));
7087 break;
7088
7089 case OP_SH:
7090 po_misc_or_fail (parse_shifter_operand (&str, i));
7091 break;
7092
7093 case OP_SHG:
7094 po_misc_or_fail_no_backtrack (
7095 parse_shifter_operand_group_reloc (&str, i));
7096 break;
7097
7098 case OP_oSHll:
7099 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7100 break;
7101
7102 case OP_oSHar:
7103 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7104 break;
7105
7106 case OP_oSHllar:
7107 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7108 break;
7109
7110 default:
7111 as_fatal (_("unhandled operand code %d"), op_parse_code);
7112 }
7113
7114 /* Various value-based sanity checks and shared operations. We
7115 do not signal immediate failures for the register constraints;
7116 this allows a syntax error to take precedence. */
7117 switch (op_parse_code)
7118 {
7119 case OP_oRRnpc:
7120 case OP_RRnpc:
7121 case OP_RRnpcb:
7122 case OP_RRw:
7123 case OP_oRRw:
7124 case OP_RRnpc_I0:
7125 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7126 inst.error = BAD_PC;
7127 break;
7128
7129 case OP_oRRnpcsp:
7130 case OP_RRnpcsp:
7131 if (inst.operands[i].isreg)
7132 {
7133 if (inst.operands[i].reg == REG_PC)
7134 inst.error = BAD_PC;
7135 else if (inst.operands[i].reg == REG_SP)
7136 inst.error = BAD_SP;
7137 }
7138 break;
7139
7140 case OP_RRnpctw:
7141 if (inst.operands[i].isreg
7142 && inst.operands[i].reg == REG_PC
7143 && (inst.operands[i].writeback || thumb))
7144 inst.error = BAD_PC;
7145 break;
7146
7147 case OP_CPSF:
7148 case OP_ENDI:
7149 case OP_oROR:
7150 case OP_wPSR:
7151 case OP_rPSR:
7152 case OP_COND:
7153 case OP_oBARRIER_I15:
7154 case OP_REGLST:
7155 case OP_VRSLST:
7156 case OP_VRDLST:
7157 case OP_VRSDLST:
7158 case OP_NRDLST:
7159 case OP_NSTRLST:
7160 if (val == FAIL)
7161 goto failure;
7162 inst.operands[i].imm = val;
7163 break;
7164
7165 default:
7166 break;
7167 }
7168
7169 /* If we get here, this operand was successfully parsed. */
7170 inst.operands[i].present = 1;
7171 continue;
7172
7173 bad_args:
7174 inst.error = BAD_ARGS;
7175
7176 failure:
7177 if (!backtrack_pos)
7178 {
7179 /* The parse routine should already have set inst.error, but set a
7180 default here just in case. */
7181 if (!inst.error)
7182 inst.error = _("syntax error");
7183 return FAIL;
7184 }
7185
7186 /* Do not backtrack over a trailing optional argument that
7187 absorbed some text. We will only fail again, with the
7188 'garbage following instruction' error message, which is
7189 probably less helpful than the current one. */
7190 if (backtrack_index == i && backtrack_pos != str
7191 && upat[i+1] == OP_stop)
7192 {
7193 if (!inst.error)
7194 inst.error = _("syntax error");
7195 return FAIL;
7196 }
7197
7198 /* Try again, skipping the optional argument at backtrack_pos. */
7199 str = backtrack_pos;
7200 inst.error = backtrack_error;
7201 inst.operands[backtrack_index].present = 0;
7202 i = backtrack_index;
7203 backtrack_pos = 0;
7204 }
7205
7206 /* Check that we have parsed all the arguments. */
7207 if (*str != '\0' && !inst.error)
7208 inst.error = _("garbage following instruction");
7209
7210 return inst.error ? FAIL : SUCCESS;
7211 }
7212
7213 #undef po_char_or_fail
7214 #undef po_reg_or_fail
7215 #undef po_reg_or_goto
7216 #undef po_imm_or_fail
7217 #undef po_scalar_or_fail
7218 #undef po_barrier_or_imm
7219
7220 /* Shorthand macro for instruction encoding functions issuing errors. */
7221 #define constraint(expr, err) \
7222 do \
7223 { \
7224 if (expr) \
7225 { \
7226 inst.error = err; \
7227 return; \
7228 } \
7229 } \
7230 while (0)
7231
7232 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7233 instructions are unpredictable if these registers are used. This
7234 is the BadReg predicate in ARM's Thumb-2 documentation. */
7235 #define reject_bad_reg(reg) \
7236 do \
7237 if (reg == REG_SP || reg == REG_PC) \
7238 { \
7239 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7240 return; \
7241 } \
7242 while (0)
7243
7244 /* If REG is R13 (the stack pointer), warn that its use is
7245 deprecated. */
7246 #define warn_deprecated_sp(reg) \
7247 do \
7248 if (warn_on_deprecated && reg == REG_SP) \
7249 as_warn (_("use of r13 is deprecated")); \
7250 while (0)
7251
7252 /* Functions for operand encoding. ARM, then Thumb. */
7253
7254 #define rotate_left(v, n) (v << n | v >> (32 - n))
7255
7256 /* If VAL can be encoded in the immediate field of an ARM instruction,
7257 return the encoded form. Otherwise, return FAIL. */
7258
7259 static unsigned int
7260 encode_arm_immediate (unsigned int val)
7261 {
7262 unsigned int a, i;
7263
7264 for (i = 0; i < 32; i += 2)
7265 if ((a = rotate_left (val, i)) <= 0xff)
7266 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7267
7268 return FAIL;
7269 }
7270
7271 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7272 return the encoded form. Otherwise, return FAIL. */
7273 static unsigned int
7274 encode_thumb32_immediate (unsigned int val)
7275 {
7276 unsigned int a, i;
7277
7278 if (val <= 0xff)
7279 return val;
7280
7281 for (i = 1; i <= 24; i++)
7282 {
7283 a = val >> i;
7284 if ((val & ~(0xff << i)) == 0)
7285 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7286 }
7287
7288 a = val & 0xff;
7289 if (val == ((a << 16) | a))
7290 return 0x100 | a;
7291 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7292 return 0x300 | a;
7293
7294 a = val & 0xff00;
7295 if (val == ((a << 16) | a))
7296 return 0x200 | (a >> 8);
7297
7298 return FAIL;
7299 }
7300 /* Encode a VFP SP or DP register number into inst.instruction. */
7301
7302 static void
7303 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7304 {
7305 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7306 && reg > 15)
7307 {
7308 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7309 {
7310 if (thumb_mode)
7311 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7312 fpu_vfp_ext_d32);
7313 else
7314 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7315 fpu_vfp_ext_d32);
7316 }
7317 else
7318 {
7319 first_error (_("D register out of range for selected VFP version"));
7320 return;
7321 }
7322 }
7323
7324 switch (pos)
7325 {
7326 case VFP_REG_Sd:
7327 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7328 break;
7329
7330 case VFP_REG_Sn:
7331 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7332 break;
7333
7334 case VFP_REG_Sm:
7335 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7336 break;
7337
7338 case VFP_REG_Dd:
7339 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7340 break;
7341
7342 case VFP_REG_Dn:
7343 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7344 break;
7345
7346 case VFP_REG_Dm:
7347 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7348 break;
7349
7350 default:
7351 abort ();
7352 }
7353 }
7354
7355 /* Encode a <shift> in an ARM-format instruction. The immediate,
7356 if any, is handled by md_apply_fix. */
7357 static void
7358 encode_arm_shift (int i)
7359 {
7360 if (inst.operands[i].shift_kind == SHIFT_RRX)
7361 inst.instruction |= SHIFT_ROR << 5;
7362 else
7363 {
7364 inst.instruction |= inst.operands[i].shift_kind << 5;
7365 if (inst.operands[i].immisreg)
7366 {
7367 inst.instruction |= SHIFT_BY_REG;
7368 inst.instruction |= inst.operands[i].imm << 8;
7369 }
7370 else
7371 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7372 }
7373 }
7374
7375 static void
7376 encode_arm_shifter_operand (int i)
7377 {
7378 if (inst.operands[i].isreg)
7379 {
7380 inst.instruction |= inst.operands[i].reg;
7381 encode_arm_shift (i);
7382 }
7383 else
7384 {
7385 inst.instruction |= INST_IMMEDIATE;
7386 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7387 inst.instruction |= inst.operands[i].imm;
7388 }
7389 }
7390
7391 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7392 static void
7393 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7394 {
7395 /* PR 14260:
7396 Generate an error if the operand is not a register. */
7397 constraint (!inst.operands[i].isreg,
7398 _("Instruction does not support =N addresses"));
7399
7400 inst.instruction |= inst.operands[i].reg << 16;
7401
7402 if (inst.operands[i].preind)
7403 {
7404 if (is_t)
7405 {
7406 inst.error = _("instruction does not accept preindexed addressing");
7407 return;
7408 }
7409 inst.instruction |= PRE_INDEX;
7410 if (inst.operands[i].writeback)
7411 inst.instruction |= WRITE_BACK;
7412
7413 }
7414 else if (inst.operands[i].postind)
7415 {
7416 gas_assert (inst.operands[i].writeback);
7417 if (is_t)
7418 inst.instruction |= WRITE_BACK;
7419 }
7420 else /* unindexed - only for coprocessor */
7421 {
7422 inst.error = _("instruction does not accept unindexed addressing");
7423 return;
7424 }
7425
7426 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7427 && (((inst.instruction & 0x000f0000) >> 16)
7428 == ((inst.instruction & 0x0000f000) >> 12)))
7429 as_warn ((inst.instruction & LOAD_BIT)
7430 ? _("destination register same as write-back base")
7431 : _("source register same as write-back base"));
7432 }
7433
7434 /* inst.operands[i] was set up by parse_address. Encode it into an
7435 ARM-format mode 2 load or store instruction. If is_t is true,
7436 reject forms that cannot be used with a T instruction (i.e. not
7437 post-indexed). */
7438 static void
7439 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7440 {
7441 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7442
7443 encode_arm_addr_mode_common (i, is_t);
7444
7445 if (inst.operands[i].immisreg)
7446 {
7447 constraint ((inst.operands[i].imm == REG_PC
7448 || (is_pc && inst.operands[i].writeback)),
7449 BAD_PC_ADDRESSING);
7450 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7451 inst.instruction |= inst.operands[i].imm;
7452 if (!inst.operands[i].negative)
7453 inst.instruction |= INDEX_UP;
7454 if (inst.operands[i].shifted)
7455 {
7456 if (inst.operands[i].shift_kind == SHIFT_RRX)
7457 inst.instruction |= SHIFT_ROR << 5;
7458 else
7459 {
7460 inst.instruction |= inst.operands[i].shift_kind << 5;
7461 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7462 }
7463 }
7464 }
7465 else /* immediate offset in inst.reloc */
7466 {
7467 if (is_pc && !inst.reloc.pc_rel)
7468 {
7469 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7470
7471 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7472 cannot use PC in addressing.
7473 PC cannot be used in writeback addressing, either. */
7474 constraint ((is_t || inst.operands[i].writeback),
7475 BAD_PC_ADDRESSING);
7476
7477 /* Use of PC in str is deprecated for ARMv7. */
7478 if (warn_on_deprecated
7479 && !is_load
7480 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7481 as_warn (_("use of PC in this instruction is deprecated"));
7482 }
7483
7484 if (inst.reloc.type == BFD_RELOC_UNUSED)
7485 {
7486 /* Prefer + for zero encoded value. */
7487 if (!inst.operands[i].negative)
7488 inst.instruction |= INDEX_UP;
7489 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7490 }
7491 }
7492 }
7493
7494 /* inst.operands[i] was set up by parse_address. Encode it into an
7495 ARM-format mode 3 load or store instruction. Reject forms that
7496 cannot be used with such instructions. If is_t is true, reject
7497 forms that cannot be used with a T instruction (i.e. not
7498 post-indexed). */
7499 static void
7500 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7501 {
7502 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7503 {
7504 inst.error = _("instruction does not accept scaled register index");
7505 return;
7506 }
7507
7508 encode_arm_addr_mode_common (i, is_t);
7509
7510 if (inst.operands[i].immisreg)
7511 {
7512 constraint ((inst.operands[i].imm == REG_PC
7513 || (is_t && inst.operands[i].reg == REG_PC)),
7514 BAD_PC_ADDRESSING);
7515 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7516 BAD_PC_WRITEBACK);
7517 inst.instruction |= inst.operands[i].imm;
7518 if (!inst.operands[i].negative)
7519 inst.instruction |= INDEX_UP;
7520 }
7521 else /* immediate offset in inst.reloc */
7522 {
7523 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7524 && inst.operands[i].writeback),
7525 BAD_PC_WRITEBACK);
7526 inst.instruction |= HWOFFSET_IMM;
7527 if (inst.reloc.type == BFD_RELOC_UNUSED)
7528 {
7529 /* Prefer + for zero encoded value. */
7530 if (!inst.operands[i].negative)
7531 inst.instruction |= INDEX_UP;
7532
7533 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7534 }
7535 }
7536 }
7537
7538 /* Write immediate bits [7:0] to the following locations:
7539
7540 |28/24|23 19|18 16|15 4|3 0|
7541 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7542
7543 This function is used by VMOV/VMVN/VORR/VBIC. */
7544
7545 static void
7546 neon_write_immbits (unsigned immbits)
7547 {
7548 inst.instruction |= immbits & 0xf;
7549 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7550 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7551 }
7552
7553 /* Invert low-order SIZE bits of XHI:XLO. */
7554
7555 static void
7556 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7557 {
7558 unsigned immlo = xlo ? *xlo : 0;
7559 unsigned immhi = xhi ? *xhi : 0;
7560
7561 switch (size)
7562 {
7563 case 8:
7564 immlo = (~immlo) & 0xff;
7565 break;
7566
7567 case 16:
7568 immlo = (~immlo) & 0xffff;
7569 break;
7570
7571 case 64:
7572 immhi = (~immhi) & 0xffffffff;
7573 /* fall through. */
7574
7575 case 32:
7576 immlo = (~immlo) & 0xffffffff;
7577 break;
7578
7579 default:
7580 abort ();
7581 }
7582
7583 if (xlo)
7584 *xlo = immlo;
7585
7586 if (xhi)
7587 *xhi = immhi;
7588 }
7589
7590 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7591 A, B, C, D. */
7592
7593 static int
7594 neon_bits_same_in_bytes (unsigned imm)
7595 {
7596 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7597 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7598 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7599 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7600 }
7601
7602 /* For immediate of above form, return 0bABCD. */
7603
7604 static unsigned
7605 neon_squash_bits (unsigned imm)
7606 {
7607 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7608 | ((imm & 0x01000000) >> 21);
7609 }
7610
7611 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7612
7613 static unsigned
7614 neon_qfloat_bits (unsigned imm)
7615 {
7616 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7617 }
7618
7619 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7620 the instruction. *OP is passed as the initial value of the op field, and
7621 may be set to a different value depending on the constant (i.e.
7622 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7623 MVN). If the immediate looks like a repeated pattern then also
7624 try smaller element sizes. */
7625
7626 static int
7627 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7628 unsigned *immbits, int *op, int size,
7629 enum neon_el_type type)
7630 {
7631 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7632 float. */
7633 if (type == NT_float && !float_p)
7634 return FAIL;
7635
7636 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7637 {
7638 if (size != 32 || *op == 1)
7639 return FAIL;
7640 *immbits = neon_qfloat_bits (immlo);
7641 return 0xf;
7642 }
7643
7644 if (size == 64)
7645 {
7646 if (neon_bits_same_in_bytes (immhi)
7647 && neon_bits_same_in_bytes (immlo))
7648 {
7649 if (*op == 1)
7650 return FAIL;
7651 *immbits = (neon_squash_bits (immhi) << 4)
7652 | neon_squash_bits (immlo);
7653 *op = 1;
7654 return 0xe;
7655 }
7656
7657 if (immhi != immlo)
7658 return FAIL;
7659 }
7660
7661 if (size >= 32)
7662 {
7663 if (immlo == (immlo & 0x000000ff))
7664 {
7665 *immbits = immlo;
7666 return 0x0;
7667 }
7668 else if (immlo == (immlo & 0x0000ff00))
7669 {
7670 *immbits = immlo >> 8;
7671 return 0x2;
7672 }
7673 else if (immlo == (immlo & 0x00ff0000))
7674 {
7675 *immbits = immlo >> 16;
7676 return 0x4;
7677 }
7678 else if (immlo == (immlo & 0xff000000))
7679 {
7680 *immbits = immlo >> 24;
7681 return 0x6;
7682 }
7683 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7684 {
7685 *immbits = (immlo >> 8) & 0xff;
7686 return 0xc;
7687 }
7688 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7689 {
7690 *immbits = (immlo >> 16) & 0xff;
7691 return 0xd;
7692 }
7693
7694 if ((immlo & 0xffff) != (immlo >> 16))
7695 return FAIL;
7696 immlo &= 0xffff;
7697 }
7698
7699 if (size >= 16)
7700 {
7701 if (immlo == (immlo & 0x000000ff))
7702 {
7703 *immbits = immlo;
7704 return 0x8;
7705 }
7706 else if (immlo == (immlo & 0x0000ff00))
7707 {
7708 *immbits = immlo >> 8;
7709 return 0xa;
7710 }
7711
7712 if ((immlo & 0xff) != (immlo >> 8))
7713 return FAIL;
7714 immlo &= 0xff;
7715 }
7716
7717 if (immlo == (immlo & 0x000000ff))
7718 {
7719 /* Don't allow MVN with 8-bit immediate. */
7720 if (*op == 1)
7721 return FAIL;
7722 *immbits = immlo;
7723 return 0xe;
7724 }
7725
7726 return FAIL;
7727 }
7728
7729 enum lit_type
7730 {
7731 CONST_THUMB,
7732 CONST_ARM,
7733 CONST_VEC
7734 };
7735
7736 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7737 Determine whether it can be performed with a move instruction; if
7738 it can, convert inst.instruction to that move instruction and
7739 return TRUE; if it can't, convert inst.instruction to a literal-pool
7740 load and return FALSE. If this is not a valid thing to do in the
7741 current context, set inst.error and return TRUE.
7742
7743 inst.operands[i] describes the destination register. */
7744
7745 static bfd_boolean
7746 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7747 {
7748 unsigned long tbit;
7749 bfd_boolean thumb_p = (t == CONST_THUMB);
7750 bfd_boolean arm_p = (t == CONST_ARM);
7751 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7752
7753 if (thumb_p)
7754 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7755 else
7756 tbit = LOAD_BIT;
7757
7758 if ((inst.instruction & tbit) == 0)
7759 {
7760 inst.error = _("invalid pseudo operation");
7761 return TRUE;
7762 }
7763 if (inst.reloc.exp.X_op != O_constant
7764 && inst.reloc.exp.X_op != O_symbol
7765 && inst.reloc.exp.X_op != O_big)
7766 {
7767 inst.error = _("constant expression expected");
7768 return TRUE;
7769 }
7770 if ((inst.reloc.exp.X_op == O_constant
7771 || inst.reloc.exp.X_op == O_big)
7772 && !inst.operands[i].issingle)
7773 {
7774 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7775 {
7776 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7777 {
7778 /* This can be done with a mov(1) instruction. */
7779 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7780 inst.instruction |= inst.reloc.exp.X_add_number;
7781 return TRUE;
7782 }
7783 }
7784 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7785 {
7786 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7787 if (value != FAIL)
7788 {
7789 /* This can be done with a mov instruction. */
7790 inst.instruction &= LITERAL_MASK;
7791 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7792 inst.instruction |= value & 0xfff;
7793 return TRUE;
7794 }
7795
7796 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7797 if (value != FAIL)
7798 {
7799 /* This can be done with a mvn instruction. */
7800 inst.instruction &= LITERAL_MASK;
7801 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7802 inst.instruction |= value & 0xfff;
7803 return TRUE;
7804 }
7805 }
7806 else if (vec64_p)
7807 {
7808 int op = 0;
7809 unsigned immbits = 0;
7810 unsigned immlo = inst.operands[1].imm;
7811 unsigned immhi = inst.operands[1].regisimm
7812 ? inst.operands[1].reg
7813 : inst.reloc.exp.X_unsigned
7814 ? 0
7815 : ((int64_t)((int) immlo)) >> 32;
7816 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7817 &op, 64, NT_invtype);
7818
7819 if (cmode == FAIL)
7820 {
7821 neon_invert_size (&immlo, &immhi, 64);
7822 op = !op;
7823 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7824 &op, 64, NT_invtype);
7825 }
7826 if (cmode != FAIL)
7827 {
7828 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7829 | (1 << 23)
7830 | (cmode << 8)
7831 | (op << 5)
7832 | (1 << 4);
7833 /* Fill other bits in vmov encoding for both thumb and arm. */
7834 if (thumb_mode)
7835 inst.instruction |= (0x7 << 29) | (0xF << 24);
7836 else
7837 inst.instruction |= (0xF << 28) | (0x1 << 25);
7838 neon_write_immbits (immbits);
7839 return TRUE;
7840 }
7841 }
7842 }
7843
7844 if (add_to_lit_pool ((!inst.operands[i].isvec
7845 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7846 return TRUE;
7847
7848 inst.operands[1].reg = REG_PC;
7849 inst.operands[1].isreg = 1;
7850 inst.operands[1].preind = 1;
7851 inst.reloc.pc_rel = 1;
7852 inst.reloc.type = (thumb_p
7853 ? BFD_RELOC_ARM_THUMB_OFFSET
7854 : (mode_3
7855 ? BFD_RELOC_ARM_HWLITERAL
7856 : BFD_RELOC_ARM_LITERAL));
7857 return FALSE;
7858 }
7859
7860 /* inst.operands[i] was set up by parse_address. Encode it into an
7861 ARM-format instruction. Reject all forms which cannot be encoded
7862 into a coprocessor load/store instruction. If wb_ok is false,
7863 reject use of writeback; if unind_ok is false, reject use of
7864 unindexed addressing. If reloc_override is not 0, use it instead
7865 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7866 (in which case it is preserved). */
7867
7868 static int
7869 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7870 {
7871 if (!inst.operands[i].isreg)
7872 {
7873 gas_assert (inst.operands[0].isvec);
7874 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7875 return SUCCESS;
7876 }
7877
7878 inst.instruction |= inst.operands[i].reg << 16;
7879
7880 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7881
7882 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7883 {
7884 gas_assert (!inst.operands[i].writeback);
7885 if (!unind_ok)
7886 {
7887 inst.error = _("instruction does not support unindexed addressing");
7888 return FAIL;
7889 }
7890 inst.instruction |= inst.operands[i].imm;
7891 inst.instruction |= INDEX_UP;
7892 return SUCCESS;
7893 }
7894
7895 if (inst.operands[i].preind)
7896 inst.instruction |= PRE_INDEX;
7897
7898 if (inst.operands[i].writeback)
7899 {
7900 if (inst.operands[i].reg == REG_PC)
7901 {
7902 inst.error = _("pc may not be used with write-back");
7903 return FAIL;
7904 }
7905 if (!wb_ok)
7906 {
7907 inst.error = _("instruction does not support writeback");
7908 return FAIL;
7909 }
7910 inst.instruction |= WRITE_BACK;
7911 }
7912
7913 if (reloc_override)
7914 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7915 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7916 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7917 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7918 {
7919 if (thumb_mode)
7920 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7921 else
7922 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7923 }
7924
7925 /* Prefer + for zero encoded value. */
7926 if (!inst.operands[i].negative)
7927 inst.instruction |= INDEX_UP;
7928
7929 return SUCCESS;
7930 }
7931
7932 /* Functions for instruction encoding, sorted by sub-architecture.
7933 First some generics; their names are taken from the conventional
7934 bit positions for register arguments in ARM format instructions. */
7935
7936 static void
7937 do_noargs (void)
7938 {
7939 }
7940
7941 static void
7942 do_rd (void)
7943 {
7944 inst.instruction |= inst.operands[0].reg << 12;
7945 }
7946
7947 static void
7948 do_rd_rm (void)
7949 {
7950 inst.instruction |= inst.operands[0].reg << 12;
7951 inst.instruction |= inst.operands[1].reg;
7952 }
7953
7954 static void
7955 do_rm_rn (void)
7956 {
7957 inst.instruction |= inst.operands[0].reg;
7958 inst.instruction |= inst.operands[1].reg << 16;
7959 }
7960
7961 static void
7962 do_rd_rn (void)
7963 {
7964 inst.instruction |= inst.operands[0].reg << 12;
7965 inst.instruction |= inst.operands[1].reg << 16;
7966 }
7967
7968 static void
7969 do_rn_rd (void)
7970 {
7971 inst.instruction |= inst.operands[0].reg << 16;
7972 inst.instruction |= inst.operands[1].reg << 12;
7973 }
7974
7975 static bfd_boolean
7976 check_obsolete (const arm_feature_set *feature, const char *msg)
7977 {
7978 if (ARM_CPU_IS_ANY (cpu_variant))
7979 {
7980 as_warn ("%s", msg);
7981 return TRUE;
7982 }
7983 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7984 {
7985 as_bad ("%s", msg);
7986 return TRUE;
7987 }
7988
7989 return FALSE;
7990 }
7991
7992 static void
7993 do_rd_rm_rn (void)
7994 {
7995 unsigned Rn = inst.operands[2].reg;
7996 /* Enforce restrictions on SWP instruction. */
7997 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7998 {
7999 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8000 _("Rn must not overlap other operands"));
8001
8002 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8003 */
8004 if (!check_obsolete (&arm_ext_v8,
8005 _("swp{b} use is obsoleted for ARMv8 and later"))
8006 && warn_on_deprecated
8007 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8008 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8009 }
8010
8011 inst.instruction |= inst.operands[0].reg << 12;
8012 inst.instruction |= inst.operands[1].reg;
8013 inst.instruction |= Rn << 16;
8014 }
8015
8016 static void
8017 do_rd_rn_rm (void)
8018 {
8019 inst.instruction |= inst.operands[0].reg << 12;
8020 inst.instruction |= inst.operands[1].reg << 16;
8021 inst.instruction |= inst.operands[2].reg;
8022 }
8023
8024 static void
8025 do_rm_rd_rn (void)
8026 {
8027 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8028 constraint (((inst.reloc.exp.X_op != O_constant
8029 && inst.reloc.exp.X_op != O_illegal)
8030 || inst.reloc.exp.X_add_number != 0),
8031 BAD_ADDR_MODE);
8032 inst.instruction |= inst.operands[0].reg;
8033 inst.instruction |= inst.operands[1].reg << 12;
8034 inst.instruction |= inst.operands[2].reg << 16;
8035 }
8036
8037 static void
8038 do_imm0 (void)
8039 {
8040 inst.instruction |= inst.operands[0].imm;
8041 }
8042
8043 static void
8044 do_rd_cpaddr (void)
8045 {
8046 inst.instruction |= inst.operands[0].reg << 12;
8047 encode_arm_cp_address (1, TRUE, TRUE, 0);
8048 }
8049
8050 /* ARM instructions, in alphabetical order by function name (except
8051 that wrapper functions appear immediately after the function they
8052 wrap). */
8053
8054 /* This is a pseudo-op of the form "adr rd, label" to be converted
8055 into a relative address of the form "add rd, pc, #label-.-8". */
8056
8057 static void
8058 do_adr (void)
8059 {
8060 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8061
8062 /* Frag hacking will turn this into a sub instruction if the offset turns
8063 out to be negative. */
8064 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8065 inst.reloc.pc_rel = 1;
8066 inst.reloc.exp.X_add_number -= 8;
8067 }
8068
8069 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8070 into a relative address of the form:
8071 add rd, pc, #low(label-.-8)"
8072 add rd, rd, #high(label-.-8)" */
8073
8074 static void
8075 do_adrl (void)
8076 {
8077 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8078
8079 /* Frag hacking will turn this into a sub instruction if the offset turns
8080 out to be negative. */
8081 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8082 inst.reloc.pc_rel = 1;
8083 inst.size = INSN_SIZE * 2;
8084 inst.reloc.exp.X_add_number -= 8;
8085 }
8086
8087 static void
8088 do_arit (void)
8089 {
8090 if (!inst.operands[1].present)
8091 inst.operands[1].reg = inst.operands[0].reg;
8092 inst.instruction |= inst.operands[0].reg << 12;
8093 inst.instruction |= inst.operands[1].reg << 16;
8094 encode_arm_shifter_operand (2);
8095 }
8096
8097 static void
8098 do_barrier (void)
8099 {
8100 if (inst.operands[0].present)
8101 inst.instruction |= inst.operands[0].imm;
8102 else
8103 inst.instruction |= 0xf;
8104 }
8105
8106 static void
8107 do_bfc (void)
8108 {
8109 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8110 constraint (msb > 32, _("bit-field extends past end of register"));
8111 /* The instruction encoding stores the LSB and MSB,
8112 not the LSB and width. */
8113 inst.instruction |= inst.operands[0].reg << 12;
8114 inst.instruction |= inst.operands[1].imm << 7;
8115 inst.instruction |= (msb - 1) << 16;
8116 }
8117
8118 static void
8119 do_bfi (void)
8120 {
8121 unsigned int msb;
8122
8123 /* #0 in second position is alternative syntax for bfc, which is
8124 the same instruction but with REG_PC in the Rm field. */
8125 if (!inst.operands[1].isreg)
8126 inst.operands[1].reg = REG_PC;
8127
8128 msb = inst.operands[2].imm + inst.operands[3].imm;
8129 constraint (msb > 32, _("bit-field extends past end of register"));
8130 /* The instruction encoding stores the LSB and MSB,
8131 not the LSB and width. */
8132 inst.instruction |= inst.operands[0].reg << 12;
8133 inst.instruction |= inst.operands[1].reg;
8134 inst.instruction |= inst.operands[2].imm << 7;
8135 inst.instruction |= (msb - 1) << 16;
8136 }
8137
8138 static void
8139 do_bfx (void)
8140 {
8141 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8142 _("bit-field extends past end of register"));
8143 inst.instruction |= inst.operands[0].reg << 12;
8144 inst.instruction |= inst.operands[1].reg;
8145 inst.instruction |= inst.operands[2].imm << 7;
8146 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8147 }
8148
8149 /* ARM V5 breakpoint instruction (argument parse)
8150 BKPT <16 bit unsigned immediate>
8151 Instruction is not conditional.
8152 The bit pattern given in insns[] has the COND_ALWAYS condition,
8153 and it is an error if the caller tried to override that. */
8154
8155 static void
8156 do_bkpt (void)
8157 {
8158 /* Top 12 of 16 bits to bits 19:8. */
8159 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8160
8161 /* Bottom 4 of 16 bits to bits 3:0. */
8162 inst.instruction |= inst.operands[0].imm & 0xf;
8163 }
8164
8165 static void
8166 encode_branch (int default_reloc)
8167 {
8168 if (inst.operands[0].hasreloc)
8169 {
8170 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8171 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8172 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8173 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8174 ? BFD_RELOC_ARM_PLT32
8175 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8176 }
8177 else
8178 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8179 inst.reloc.pc_rel = 1;
8180 }
8181
8182 static void
8183 do_branch (void)
8184 {
8185 #ifdef OBJ_ELF
8186 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8187 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8188 else
8189 #endif
8190 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8191 }
8192
8193 static void
8194 do_bl (void)
8195 {
8196 #ifdef OBJ_ELF
8197 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8198 {
8199 if (inst.cond == COND_ALWAYS)
8200 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8201 else
8202 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8203 }
8204 else
8205 #endif
8206 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8207 }
8208
8209 /* ARM V5 branch-link-exchange instruction (argument parse)
8210 BLX <target_addr> ie BLX(1)
8211 BLX{<condition>} <Rm> ie BLX(2)
8212 Unfortunately, there are two different opcodes for this mnemonic.
8213 So, the insns[].value is not used, and the code here zaps values
8214 into inst.instruction.
8215 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8216
8217 static void
8218 do_blx (void)
8219 {
8220 if (inst.operands[0].isreg)
8221 {
8222 /* Arg is a register; the opcode provided by insns[] is correct.
8223 It is not illegal to do "blx pc", just useless. */
8224 if (inst.operands[0].reg == REG_PC)
8225 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8226
8227 inst.instruction |= inst.operands[0].reg;
8228 }
8229 else
8230 {
8231 /* Arg is an address; this instruction cannot be executed
8232 conditionally, and the opcode must be adjusted.
8233 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8234 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8235 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8236 inst.instruction = 0xfa000000;
8237 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8238 }
8239 }
8240
8241 static void
8242 do_bx (void)
8243 {
8244 bfd_boolean want_reloc;
8245
8246 if (inst.operands[0].reg == REG_PC)
8247 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8248
8249 inst.instruction |= inst.operands[0].reg;
8250 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8251 it is for ARMv4t or earlier. */
8252 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8253 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8254 want_reloc = TRUE;
8255
8256 #ifdef OBJ_ELF
8257 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8258 #endif
8259 want_reloc = FALSE;
8260
8261 if (want_reloc)
8262 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8263 }
8264
8265
8266 /* ARM v5TEJ. Jump to Jazelle code. */
8267
8268 static void
8269 do_bxj (void)
8270 {
8271 if (inst.operands[0].reg == REG_PC)
8272 as_tsktsk (_("use of r15 in bxj is not really useful"));
8273
8274 inst.instruction |= inst.operands[0].reg;
8275 }
8276
8277 /* Co-processor data operation:
8278 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8279 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8280 static void
8281 do_cdp (void)
8282 {
8283 inst.instruction |= inst.operands[0].reg << 8;
8284 inst.instruction |= inst.operands[1].imm << 20;
8285 inst.instruction |= inst.operands[2].reg << 12;
8286 inst.instruction |= inst.operands[3].reg << 16;
8287 inst.instruction |= inst.operands[4].reg;
8288 inst.instruction |= inst.operands[5].imm << 5;
8289 }
8290
8291 static void
8292 do_cmp (void)
8293 {
8294 inst.instruction |= inst.operands[0].reg << 16;
8295 encode_arm_shifter_operand (1);
8296 }
8297
8298 /* Transfer between coprocessor and ARM registers.
8299 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8300 MRC2
8301 MCR{cond}
8302 MCR2
8303
8304 No special properties. */
8305
8306 struct deprecated_coproc_regs_s
8307 {
8308 unsigned cp;
8309 int opc1;
8310 unsigned crn;
8311 unsigned crm;
8312 int opc2;
8313 arm_feature_set deprecated;
8314 arm_feature_set obsoleted;
8315 const char *dep_msg;
8316 const char *obs_msg;
8317 };
8318
8319 #define DEPR_ACCESS_V8 \
8320 N_("This coprocessor register access is deprecated in ARMv8")
8321
8322 /* Table of all deprecated coprocessor registers. */
8323 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8324 {
8325 {15, 0, 7, 10, 5, /* CP15DMB. */
8326 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8327 DEPR_ACCESS_V8, NULL},
8328 {15, 0, 7, 10, 4, /* CP15DSB. */
8329 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8330 DEPR_ACCESS_V8, NULL},
8331 {15, 0, 7, 5, 4, /* CP15ISB. */
8332 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8333 DEPR_ACCESS_V8, NULL},
8334 {14, 6, 1, 0, 0, /* TEEHBR. */
8335 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8336 DEPR_ACCESS_V8, NULL},
8337 {14, 6, 0, 0, 0, /* TEECR. */
8338 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8339 DEPR_ACCESS_V8, NULL},
8340 };
8341
8342 #undef DEPR_ACCESS_V8
8343
8344 static const size_t deprecated_coproc_reg_count =
8345 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8346
8347 static void
8348 do_co_reg (void)
8349 {
8350 unsigned Rd;
8351 size_t i;
8352
8353 Rd = inst.operands[2].reg;
8354 if (thumb_mode)
8355 {
8356 if (inst.instruction == 0xee000010
8357 || inst.instruction == 0xfe000010)
8358 /* MCR, MCR2 */
8359 reject_bad_reg (Rd);
8360 else
8361 /* MRC, MRC2 */
8362 constraint (Rd == REG_SP, BAD_SP);
8363 }
8364 else
8365 {
8366 /* MCR */
8367 if (inst.instruction == 0xe000010)
8368 constraint (Rd == REG_PC, BAD_PC);
8369 }
8370
8371 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8372 {
8373 const struct deprecated_coproc_regs_s *r =
8374 deprecated_coproc_regs + i;
8375
8376 if (inst.operands[0].reg == r->cp
8377 && inst.operands[1].imm == r->opc1
8378 && inst.operands[3].reg == r->crn
8379 && inst.operands[4].reg == r->crm
8380 && inst.operands[5].imm == r->opc2)
8381 {
8382 if (! ARM_CPU_IS_ANY (cpu_variant)
8383 && warn_on_deprecated
8384 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8385 as_warn ("%s", r->dep_msg);
8386 }
8387 }
8388
8389 inst.instruction |= inst.operands[0].reg << 8;
8390 inst.instruction |= inst.operands[1].imm << 21;
8391 inst.instruction |= Rd << 12;
8392 inst.instruction |= inst.operands[3].reg << 16;
8393 inst.instruction |= inst.operands[4].reg;
8394 inst.instruction |= inst.operands[5].imm << 5;
8395 }
8396
8397 /* Transfer between coprocessor register and pair of ARM registers.
8398 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8399 MCRR2
8400 MRRC{cond}
8401 MRRC2
8402
8403 Two XScale instructions are special cases of these:
8404
8405 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8406 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8407
8408 Result unpredictable if Rd or Rn is R15. */
8409
8410 static void
8411 do_co_reg2c (void)
8412 {
8413 unsigned Rd, Rn;
8414
8415 Rd = inst.operands[2].reg;
8416 Rn = inst.operands[3].reg;
8417
8418 if (thumb_mode)
8419 {
8420 reject_bad_reg (Rd);
8421 reject_bad_reg (Rn);
8422 }
8423 else
8424 {
8425 constraint (Rd == REG_PC, BAD_PC);
8426 constraint (Rn == REG_PC, BAD_PC);
8427 }
8428
8429 inst.instruction |= inst.operands[0].reg << 8;
8430 inst.instruction |= inst.operands[1].imm << 4;
8431 inst.instruction |= Rd << 12;
8432 inst.instruction |= Rn << 16;
8433 inst.instruction |= inst.operands[4].reg;
8434 }
8435
8436 static void
8437 do_cpsi (void)
8438 {
8439 inst.instruction |= inst.operands[0].imm << 6;
8440 if (inst.operands[1].present)
8441 {
8442 inst.instruction |= CPSI_MMOD;
8443 inst.instruction |= inst.operands[1].imm;
8444 }
8445 }
8446
8447 static void
8448 do_dbg (void)
8449 {
8450 inst.instruction |= inst.operands[0].imm;
8451 }
8452
8453 static void
8454 do_div (void)
8455 {
8456 unsigned Rd, Rn, Rm;
8457
8458 Rd = inst.operands[0].reg;
8459 Rn = (inst.operands[1].present
8460 ? inst.operands[1].reg : Rd);
8461 Rm = inst.operands[2].reg;
8462
8463 constraint ((Rd == REG_PC), BAD_PC);
8464 constraint ((Rn == REG_PC), BAD_PC);
8465 constraint ((Rm == REG_PC), BAD_PC);
8466
8467 inst.instruction |= Rd << 16;
8468 inst.instruction |= Rn << 0;
8469 inst.instruction |= Rm << 8;
8470 }
8471
8472 static void
8473 do_it (void)
8474 {
8475 /* There is no IT instruction in ARM mode. We
8476 process it to do the validation as if in
8477 thumb mode, just in case the code gets
8478 assembled for thumb using the unified syntax. */
8479
8480 inst.size = 0;
8481 if (unified_syntax)
8482 {
8483 set_it_insn_type (IT_INSN);
8484 now_it.mask = (inst.instruction & 0xf) | 0x10;
8485 now_it.cc = inst.operands[0].imm;
8486 }
8487 }
8488
8489 /* If there is only one register in the register list,
8490 then return its register number. Otherwise return -1. */
8491 static int
8492 only_one_reg_in_list (int range)
8493 {
8494 int i = ffs (range) - 1;
8495 return (i > 15 || range != (1 << i)) ? -1 : i;
8496 }
8497
8498 static void
8499 encode_ldmstm(int from_push_pop_mnem)
8500 {
8501 int base_reg = inst.operands[0].reg;
8502 int range = inst.operands[1].imm;
8503 int one_reg;
8504
8505 inst.instruction |= base_reg << 16;
8506 inst.instruction |= range;
8507
8508 if (inst.operands[1].writeback)
8509 inst.instruction |= LDM_TYPE_2_OR_3;
8510
8511 if (inst.operands[0].writeback)
8512 {
8513 inst.instruction |= WRITE_BACK;
8514 /* Check for unpredictable uses of writeback. */
8515 if (inst.instruction & LOAD_BIT)
8516 {
8517 /* Not allowed in LDM type 2. */
8518 if ((inst.instruction & LDM_TYPE_2_OR_3)
8519 && ((range & (1 << REG_PC)) == 0))
8520 as_warn (_("writeback of base register is UNPREDICTABLE"));
8521 /* Only allowed if base reg not in list for other types. */
8522 else if (range & (1 << base_reg))
8523 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8524 }
8525 else /* STM. */
8526 {
8527 /* Not allowed for type 2. */
8528 if (inst.instruction & LDM_TYPE_2_OR_3)
8529 as_warn (_("writeback of base register is UNPREDICTABLE"));
8530 /* Only allowed if base reg not in list, or first in list. */
8531 else if ((range & (1 << base_reg))
8532 && (range & ((1 << base_reg) - 1)))
8533 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8534 }
8535 }
8536
8537 /* If PUSH/POP has only one register, then use the A2 encoding. */
8538 one_reg = only_one_reg_in_list (range);
8539 if (from_push_pop_mnem && one_reg >= 0)
8540 {
8541 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8542
8543 inst.instruction &= A_COND_MASK;
8544 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8545 inst.instruction |= one_reg << 12;
8546 }
8547 }
8548
8549 static void
8550 do_ldmstm (void)
8551 {
8552 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8553 }
8554
8555 /* ARMv5TE load-consecutive (argument parse)
8556 Mode is like LDRH.
8557
8558 LDRccD R, mode
8559 STRccD R, mode. */
8560
8561 static void
8562 do_ldrd (void)
8563 {
8564 constraint (inst.operands[0].reg % 2 != 0,
8565 _("first transfer register must be even"));
8566 constraint (inst.operands[1].present
8567 && inst.operands[1].reg != inst.operands[0].reg + 1,
8568 _("can only transfer two consecutive registers"));
8569 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8570 constraint (!inst.operands[2].isreg, _("'[' expected"));
8571
8572 if (!inst.operands[1].present)
8573 inst.operands[1].reg = inst.operands[0].reg + 1;
8574
8575 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8576 register and the first register written; we have to diagnose
8577 overlap between the base and the second register written here. */
8578
8579 if (inst.operands[2].reg == inst.operands[1].reg
8580 && (inst.operands[2].writeback || inst.operands[2].postind))
8581 as_warn (_("base register written back, and overlaps "
8582 "second transfer register"));
8583
8584 if (!(inst.instruction & V4_STR_BIT))
8585 {
8586 /* For an index-register load, the index register must not overlap the
8587 destination (even if not write-back). */
8588 if (inst.operands[2].immisreg
8589 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8590 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8591 as_warn (_("index register overlaps transfer register"));
8592 }
8593 inst.instruction |= inst.operands[0].reg << 12;
8594 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8595 }
8596
8597 static void
8598 do_ldrex (void)
8599 {
8600 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8601 || inst.operands[1].postind || inst.operands[1].writeback
8602 || inst.operands[1].immisreg || inst.operands[1].shifted
8603 || inst.operands[1].negative
8604 /* This can arise if the programmer has written
8605 strex rN, rM, foo
8606 or if they have mistakenly used a register name as the last
8607 operand, eg:
8608 strex rN, rM, rX
8609 It is very difficult to distinguish between these two cases
8610 because "rX" might actually be a label. ie the register
8611 name has been occluded by a symbol of the same name. So we
8612 just generate a general 'bad addressing mode' type error
8613 message and leave it up to the programmer to discover the
8614 true cause and fix their mistake. */
8615 || (inst.operands[1].reg == REG_PC),
8616 BAD_ADDR_MODE);
8617
8618 constraint (inst.reloc.exp.X_op != O_constant
8619 || inst.reloc.exp.X_add_number != 0,
8620 _("offset must be zero in ARM encoding"));
8621
8622 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8623
8624 inst.instruction |= inst.operands[0].reg << 12;
8625 inst.instruction |= inst.operands[1].reg << 16;
8626 inst.reloc.type = BFD_RELOC_UNUSED;
8627 }
8628
8629 static void
8630 do_ldrexd (void)
8631 {
8632 constraint (inst.operands[0].reg % 2 != 0,
8633 _("even register required"));
8634 constraint (inst.operands[1].present
8635 && inst.operands[1].reg != inst.operands[0].reg + 1,
8636 _("can only load two consecutive registers"));
8637 /* If op 1 were present and equal to PC, this function wouldn't
8638 have been called in the first place. */
8639 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8640
8641 inst.instruction |= inst.operands[0].reg << 12;
8642 inst.instruction |= inst.operands[2].reg << 16;
8643 }
8644
8645 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8646 which is not a multiple of four is UNPREDICTABLE. */
8647 static void
8648 check_ldr_r15_aligned (void)
8649 {
8650 constraint (!(inst.operands[1].immisreg)
8651 && (inst.operands[0].reg == REG_PC
8652 && inst.operands[1].reg == REG_PC
8653 && (inst.reloc.exp.X_add_number & 0x3)),
8654 _("ldr to register 15 must be 4-byte alligned"));
8655 }
8656
8657 static void
8658 do_ldst (void)
8659 {
8660 inst.instruction |= inst.operands[0].reg << 12;
8661 if (!inst.operands[1].isreg)
8662 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8663 return;
8664 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8665 check_ldr_r15_aligned ();
8666 }
8667
8668 static void
8669 do_ldstt (void)
8670 {
8671 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8672 reject [Rn,...]. */
8673 if (inst.operands[1].preind)
8674 {
8675 constraint (inst.reloc.exp.X_op != O_constant
8676 || inst.reloc.exp.X_add_number != 0,
8677 _("this instruction requires a post-indexed address"));
8678
8679 inst.operands[1].preind = 0;
8680 inst.operands[1].postind = 1;
8681 inst.operands[1].writeback = 1;
8682 }
8683 inst.instruction |= inst.operands[0].reg << 12;
8684 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8685 }
8686
8687 /* Halfword and signed-byte load/store operations. */
8688
8689 static void
8690 do_ldstv4 (void)
8691 {
8692 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8693 inst.instruction |= inst.operands[0].reg << 12;
8694 if (!inst.operands[1].isreg)
8695 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8696 return;
8697 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8698 }
8699
8700 static void
8701 do_ldsttv4 (void)
8702 {
8703 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8704 reject [Rn,...]. */
8705 if (inst.operands[1].preind)
8706 {
8707 constraint (inst.reloc.exp.X_op != O_constant
8708 || inst.reloc.exp.X_add_number != 0,
8709 _("this instruction requires a post-indexed address"));
8710
8711 inst.operands[1].preind = 0;
8712 inst.operands[1].postind = 1;
8713 inst.operands[1].writeback = 1;
8714 }
8715 inst.instruction |= inst.operands[0].reg << 12;
8716 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8717 }
8718
8719 /* Co-processor register load/store.
8720 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8721 static void
8722 do_lstc (void)
8723 {
8724 inst.instruction |= inst.operands[0].reg << 8;
8725 inst.instruction |= inst.operands[1].reg << 12;
8726 encode_arm_cp_address (2, TRUE, TRUE, 0);
8727 }
8728
8729 static void
8730 do_mlas (void)
8731 {
8732 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8733 if (inst.operands[0].reg == inst.operands[1].reg
8734 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8735 && !(inst.instruction & 0x00400000))
8736 as_tsktsk (_("Rd and Rm should be different in mla"));
8737
8738 inst.instruction |= inst.operands[0].reg << 16;
8739 inst.instruction |= inst.operands[1].reg;
8740 inst.instruction |= inst.operands[2].reg << 8;
8741 inst.instruction |= inst.operands[3].reg << 12;
8742 }
8743
8744 static void
8745 do_mov (void)
8746 {
8747 inst.instruction |= inst.operands[0].reg << 12;
8748 encode_arm_shifter_operand (1);
8749 }
8750
8751 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8752 static void
8753 do_mov16 (void)
8754 {
8755 bfd_vma imm;
8756 bfd_boolean top;
8757
8758 top = (inst.instruction & 0x00400000) != 0;
8759 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8760 _(":lower16: not allowed this instruction"));
8761 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8762 _(":upper16: not allowed instruction"));
8763 inst.instruction |= inst.operands[0].reg << 12;
8764 if (inst.reloc.type == BFD_RELOC_UNUSED)
8765 {
8766 imm = inst.reloc.exp.X_add_number;
8767 /* The value is in two pieces: 0:11, 16:19. */
8768 inst.instruction |= (imm & 0x00000fff);
8769 inst.instruction |= (imm & 0x0000f000) << 4;
8770 }
8771 }
8772
8773 static void do_vfp_nsyn_opcode (const char *);
8774
8775 static int
8776 do_vfp_nsyn_mrs (void)
8777 {
8778 if (inst.operands[0].isvec)
8779 {
8780 if (inst.operands[1].reg != 1)
8781 first_error (_("operand 1 must be FPSCR"));
8782 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8783 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8784 do_vfp_nsyn_opcode ("fmstat");
8785 }
8786 else if (inst.operands[1].isvec)
8787 do_vfp_nsyn_opcode ("fmrx");
8788 else
8789 return FAIL;
8790
8791 return SUCCESS;
8792 }
8793
8794 static int
8795 do_vfp_nsyn_msr (void)
8796 {
8797 if (inst.operands[0].isvec)
8798 do_vfp_nsyn_opcode ("fmxr");
8799 else
8800 return FAIL;
8801
8802 return SUCCESS;
8803 }
8804
8805 static void
8806 do_vmrs (void)
8807 {
8808 unsigned Rt = inst.operands[0].reg;
8809
8810 if (thumb_mode && Rt == REG_SP)
8811 {
8812 inst.error = BAD_SP;
8813 return;
8814 }
8815
8816 /* APSR_ sets isvec. All other refs to PC are illegal. */
8817 if (!inst.operands[0].isvec && Rt == REG_PC)
8818 {
8819 inst.error = BAD_PC;
8820 return;
8821 }
8822
8823 /* If we get through parsing the register name, we just insert the number
8824 generated into the instruction without further validation. */
8825 inst.instruction |= (inst.operands[1].reg << 16);
8826 inst.instruction |= (Rt << 12);
8827 }
8828
8829 static void
8830 do_vmsr (void)
8831 {
8832 unsigned Rt = inst.operands[1].reg;
8833
8834 if (thumb_mode)
8835 reject_bad_reg (Rt);
8836 else if (Rt == REG_PC)
8837 {
8838 inst.error = BAD_PC;
8839 return;
8840 }
8841
8842 /* If we get through parsing the register name, we just insert the number
8843 generated into the instruction without further validation. */
8844 inst.instruction |= (inst.operands[0].reg << 16);
8845 inst.instruction |= (Rt << 12);
8846 }
8847
8848 static void
8849 do_mrs (void)
8850 {
8851 unsigned br;
8852
8853 if (do_vfp_nsyn_mrs () == SUCCESS)
8854 return;
8855
8856 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8857 inst.instruction |= inst.operands[0].reg << 12;
8858
8859 if (inst.operands[1].isreg)
8860 {
8861 br = inst.operands[1].reg;
8862 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8863 as_bad (_("bad register for mrs"));
8864 }
8865 else
8866 {
8867 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8868 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8869 != (PSR_c|PSR_f),
8870 _("'APSR', 'CPSR' or 'SPSR' expected"));
8871 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8872 }
8873
8874 inst.instruction |= br;
8875 }
8876
8877 /* Two possible forms:
8878 "{C|S}PSR_<field>, Rm",
8879 "{C|S}PSR_f, #expression". */
8880
8881 static void
8882 do_msr (void)
8883 {
8884 if (do_vfp_nsyn_msr () == SUCCESS)
8885 return;
8886
8887 inst.instruction |= inst.operands[0].imm;
8888 if (inst.operands[1].isreg)
8889 inst.instruction |= inst.operands[1].reg;
8890 else
8891 {
8892 inst.instruction |= INST_IMMEDIATE;
8893 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8894 inst.reloc.pc_rel = 0;
8895 }
8896 }
8897
8898 static void
8899 do_mul (void)
8900 {
8901 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8902
8903 if (!inst.operands[2].present)
8904 inst.operands[2].reg = inst.operands[0].reg;
8905 inst.instruction |= inst.operands[0].reg << 16;
8906 inst.instruction |= inst.operands[1].reg;
8907 inst.instruction |= inst.operands[2].reg << 8;
8908
8909 if (inst.operands[0].reg == inst.operands[1].reg
8910 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8911 as_tsktsk (_("Rd and Rm should be different in mul"));
8912 }
8913
8914 /* Long Multiply Parser
8915 UMULL RdLo, RdHi, Rm, Rs
8916 SMULL RdLo, RdHi, Rm, Rs
8917 UMLAL RdLo, RdHi, Rm, Rs
8918 SMLAL RdLo, RdHi, Rm, Rs. */
8919
8920 static void
8921 do_mull (void)
8922 {
8923 inst.instruction |= inst.operands[0].reg << 12;
8924 inst.instruction |= inst.operands[1].reg << 16;
8925 inst.instruction |= inst.operands[2].reg;
8926 inst.instruction |= inst.operands[3].reg << 8;
8927
8928 /* rdhi and rdlo must be different. */
8929 if (inst.operands[0].reg == inst.operands[1].reg)
8930 as_tsktsk (_("rdhi and rdlo must be different"));
8931
8932 /* rdhi, rdlo and rm must all be different before armv6. */
8933 if ((inst.operands[0].reg == inst.operands[2].reg
8934 || inst.operands[1].reg == inst.operands[2].reg)
8935 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8936 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8937 }
8938
8939 static void
8940 do_nop (void)
8941 {
8942 if (inst.operands[0].present
8943 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8944 {
8945 /* Architectural NOP hints are CPSR sets with no bits selected. */
8946 inst.instruction &= 0xf0000000;
8947 inst.instruction |= 0x0320f000;
8948 if (inst.operands[0].present)
8949 inst.instruction |= inst.operands[0].imm;
8950 }
8951 }
8952
8953 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8954 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8955 Condition defaults to COND_ALWAYS.
8956 Error if Rd, Rn or Rm are R15. */
8957
8958 static void
8959 do_pkhbt (void)
8960 {
8961 inst.instruction |= inst.operands[0].reg << 12;
8962 inst.instruction |= inst.operands[1].reg << 16;
8963 inst.instruction |= inst.operands[2].reg;
8964 if (inst.operands[3].present)
8965 encode_arm_shift (3);
8966 }
8967
8968 /* ARM V6 PKHTB (Argument Parse). */
8969
8970 static void
8971 do_pkhtb (void)
8972 {
8973 if (!inst.operands[3].present)
8974 {
8975 /* If the shift specifier is omitted, turn the instruction
8976 into pkhbt rd, rm, rn. */
8977 inst.instruction &= 0xfff00010;
8978 inst.instruction |= inst.operands[0].reg << 12;
8979 inst.instruction |= inst.operands[1].reg;
8980 inst.instruction |= inst.operands[2].reg << 16;
8981 }
8982 else
8983 {
8984 inst.instruction |= inst.operands[0].reg << 12;
8985 inst.instruction |= inst.operands[1].reg << 16;
8986 inst.instruction |= inst.operands[2].reg;
8987 encode_arm_shift (3);
8988 }
8989 }
8990
8991 /* ARMv5TE: Preload-Cache
8992 MP Extensions: Preload for write
8993
8994 PLD(W) <addr_mode>
8995
8996 Syntactically, like LDR with B=1, W=0, L=1. */
8997
8998 static void
8999 do_pld (void)
9000 {
9001 constraint (!inst.operands[0].isreg,
9002 _("'[' expected after PLD mnemonic"));
9003 constraint (inst.operands[0].postind,
9004 _("post-indexed expression used in preload instruction"));
9005 constraint (inst.operands[0].writeback,
9006 _("writeback used in preload instruction"));
9007 constraint (!inst.operands[0].preind,
9008 _("unindexed addressing used in preload instruction"));
9009 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9010 }
9011
9012 /* ARMv7: PLI <addr_mode> */
9013 static void
9014 do_pli (void)
9015 {
9016 constraint (!inst.operands[0].isreg,
9017 _("'[' expected after PLI mnemonic"));
9018 constraint (inst.operands[0].postind,
9019 _("post-indexed expression used in preload instruction"));
9020 constraint (inst.operands[0].writeback,
9021 _("writeback used in preload instruction"));
9022 constraint (!inst.operands[0].preind,
9023 _("unindexed addressing used in preload instruction"));
9024 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9025 inst.instruction &= ~PRE_INDEX;
9026 }
9027
9028 static void
9029 do_push_pop (void)
9030 {
9031 inst.operands[1] = inst.operands[0];
9032 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9033 inst.operands[0].isreg = 1;
9034 inst.operands[0].writeback = 1;
9035 inst.operands[0].reg = REG_SP;
9036 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9037 }
9038
9039 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9040 word at the specified address and the following word
9041 respectively.
9042 Unconditionally executed.
9043 Error if Rn is R15. */
9044
9045 static void
9046 do_rfe (void)
9047 {
9048 inst.instruction |= inst.operands[0].reg << 16;
9049 if (inst.operands[0].writeback)
9050 inst.instruction |= WRITE_BACK;
9051 }
9052
9053 /* ARM V6 ssat (argument parse). */
9054
9055 static void
9056 do_ssat (void)
9057 {
9058 inst.instruction |= inst.operands[0].reg << 12;
9059 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9060 inst.instruction |= inst.operands[2].reg;
9061
9062 if (inst.operands[3].present)
9063 encode_arm_shift (3);
9064 }
9065
9066 /* ARM V6 usat (argument parse). */
9067
9068 static void
9069 do_usat (void)
9070 {
9071 inst.instruction |= inst.operands[0].reg << 12;
9072 inst.instruction |= inst.operands[1].imm << 16;
9073 inst.instruction |= inst.operands[2].reg;
9074
9075 if (inst.operands[3].present)
9076 encode_arm_shift (3);
9077 }
9078
9079 /* ARM V6 ssat16 (argument parse). */
9080
9081 static void
9082 do_ssat16 (void)
9083 {
9084 inst.instruction |= inst.operands[0].reg << 12;
9085 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9086 inst.instruction |= inst.operands[2].reg;
9087 }
9088
9089 static void
9090 do_usat16 (void)
9091 {
9092 inst.instruction |= inst.operands[0].reg << 12;
9093 inst.instruction |= inst.operands[1].imm << 16;
9094 inst.instruction |= inst.operands[2].reg;
9095 }
9096
9097 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9098 preserving the other bits.
9099
9100 setend <endian_specifier>, where <endian_specifier> is either
9101 BE or LE. */
9102
9103 static void
9104 do_setend (void)
9105 {
9106 if (warn_on_deprecated
9107 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9108 as_warn (_("setend use is deprecated for ARMv8"));
9109
9110 if (inst.operands[0].imm)
9111 inst.instruction |= 0x200;
9112 }
9113
9114 static void
9115 do_shift (void)
9116 {
9117 unsigned int Rm = (inst.operands[1].present
9118 ? inst.operands[1].reg
9119 : inst.operands[0].reg);
9120
9121 inst.instruction |= inst.operands[0].reg << 12;
9122 inst.instruction |= Rm;
9123 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9124 {
9125 inst.instruction |= inst.operands[2].reg << 8;
9126 inst.instruction |= SHIFT_BY_REG;
9127 /* PR 12854: Error on extraneous shifts. */
9128 constraint (inst.operands[2].shifted,
9129 _("extraneous shift as part of operand to shift insn"));
9130 }
9131 else
9132 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9133 }
9134
9135 static void
9136 do_smc (void)
9137 {
9138 inst.reloc.type = BFD_RELOC_ARM_SMC;
9139 inst.reloc.pc_rel = 0;
9140 }
9141
9142 static void
9143 do_hvc (void)
9144 {
9145 inst.reloc.type = BFD_RELOC_ARM_HVC;
9146 inst.reloc.pc_rel = 0;
9147 }
9148
9149 static void
9150 do_swi (void)
9151 {
9152 inst.reloc.type = BFD_RELOC_ARM_SWI;
9153 inst.reloc.pc_rel = 0;
9154 }
9155
9156 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9157 SMLAxy{cond} Rd,Rm,Rs,Rn
9158 SMLAWy{cond} Rd,Rm,Rs,Rn
9159 Error if any register is R15. */
9160
9161 static void
9162 do_smla (void)
9163 {
9164 inst.instruction |= inst.operands[0].reg << 16;
9165 inst.instruction |= inst.operands[1].reg;
9166 inst.instruction |= inst.operands[2].reg << 8;
9167 inst.instruction |= inst.operands[3].reg << 12;
9168 }
9169
9170 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9171 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9172 Error if any register is R15.
9173 Warning if Rdlo == Rdhi. */
9174
9175 static void
9176 do_smlal (void)
9177 {
9178 inst.instruction |= inst.operands[0].reg << 12;
9179 inst.instruction |= inst.operands[1].reg << 16;
9180 inst.instruction |= inst.operands[2].reg;
9181 inst.instruction |= inst.operands[3].reg << 8;
9182
9183 if (inst.operands[0].reg == inst.operands[1].reg)
9184 as_tsktsk (_("rdhi and rdlo must be different"));
9185 }
9186
9187 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9188 SMULxy{cond} Rd,Rm,Rs
9189 Error if any register is R15. */
9190
9191 static void
9192 do_smul (void)
9193 {
9194 inst.instruction |= inst.operands[0].reg << 16;
9195 inst.instruction |= inst.operands[1].reg;
9196 inst.instruction |= inst.operands[2].reg << 8;
9197 }
9198
9199 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9200 the same for both ARM and Thumb-2. */
9201
9202 static void
9203 do_srs (void)
9204 {
9205 int reg;
9206
9207 if (inst.operands[0].present)
9208 {
9209 reg = inst.operands[0].reg;
9210 constraint (reg != REG_SP, _("SRS base register must be r13"));
9211 }
9212 else
9213 reg = REG_SP;
9214
9215 inst.instruction |= reg << 16;
9216 inst.instruction |= inst.operands[1].imm;
9217 if (inst.operands[0].writeback || inst.operands[1].writeback)
9218 inst.instruction |= WRITE_BACK;
9219 }
9220
9221 /* ARM V6 strex (argument parse). */
9222
9223 static void
9224 do_strex (void)
9225 {
9226 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9227 || inst.operands[2].postind || inst.operands[2].writeback
9228 || inst.operands[2].immisreg || inst.operands[2].shifted
9229 || inst.operands[2].negative
9230 /* See comment in do_ldrex(). */
9231 || (inst.operands[2].reg == REG_PC),
9232 BAD_ADDR_MODE);
9233
9234 constraint (inst.operands[0].reg == inst.operands[1].reg
9235 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9236
9237 constraint (inst.reloc.exp.X_op != O_constant
9238 || inst.reloc.exp.X_add_number != 0,
9239 _("offset must be zero in ARM encoding"));
9240
9241 inst.instruction |= inst.operands[0].reg << 12;
9242 inst.instruction |= inst.operands[1].reg;
9243 inst.instruction |= inst.operands[2].reg << 16;
9244 inst.reloc.type = BFD_RELOC_UNUSED;
9245 }
9246
9247 static void
9248 do_t_strexbh (void)
9249 {
9250 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9251 || inst.operands[2].postind || inst.operands[2].writeback
9252 || inst.operands[2].immisreg || inst.operands[2].shifted
9253 || inst.operands[2].negative,
9254 BAD_ADDR_MODE);
9255
9256 constraint (inst.operands[0].reg == inst.operands[1].reg
9257 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9258
9259 do_rm_rd_rn ();
9260 }
9261
9262 static void
9263 do_strexd (void)
9264 {
9265 constraint (inst.operands[1].reg % 2 != 0,
9266 _("even register required"));
9267 constraint (inst.operands[2].present
9268 && inst.operands[2].reg != inst.operands[1].reg + 1,
9269 _("can only store two consecutive registers"));
9270 /* If op 2 were present and equal to PC, this function wouldn't
9271 have been called in the first place. */
9272 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9273
9274 constraint (inst.operands[0].reg == inst.operands[1].reg
9275 || inst.operands[0].reg == inst.operands[1].reg + 1
9276 || inst.operands[0].reg == inst.operands[3].reg,
9277 BAD_OVERLAP);
9278
9279 inst.instruction |= inst.operands[0].reg << 12;
9280 inst.instruction |= inst.operands[1].reg;
9281 inst.instruction |= inst.operands[3].reg << 16;
9282 }
9283
9284 /* ARM V8 STRL. */
9285 static void
9286 do_stlex (void)
9287 {
9288 constraint (inst.operands[0].reg == inst.operands[1].reg
9289 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9290
9291 do_rd_rm_rn ();
9292 }
9293
9294 static void
9295 do_t_stlex (void)
9296 {
9297 constraint (inst.operands[0].reg == inst.operands[1].reg
9298 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9299
9300 do_rm_rd_rn ();
9301 }
9302
9303 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9304 extends it to 32-bits, and adds the result to a value in another
9305 register. You can specify a rotation by 0, 8, 16, or 24 bits
9306 before extracting the 16-bit value.
9307 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9308 Condition defaults to COND_ALWAYS.
9309 Error if any register uses R15. */
9310
9311 static void
9312 do_sxtah (void)
9313 {
9314 inst.instruction |= inst.operands[0].reg << 12;
9315 inst.instruction |= inst.operands[1].reg << 16;
9316 inst.instruction |= inst.operands[2].reg;
9317 inst.instruction |= inst.operands[3].imm << 10;
9318 }
9319
9320 /* ARM V6 SXTH.
9321
9322 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9323 Condition defaults to COND_ALWAYS.
9324 Error if any register uses R15. */
9325
9326 static void
9327 do_sxth (void)
9328 {
9329 inst.instruction |= inst.operands[0].reg << 12;
9330 inst.instruction |= inst.operands[1].reg;
9331 inst.instruction |= inst.operands[2].imm << 10;
9332 }
9333 \f
9334 /* VFP instructions. In a logical order: SP variant first, monad
9335 before dyad, arithmetic then move then load/store. */
9336
9337 static void
9338 do_vfp_sp_monadic (void)
9339 {
9340 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9341 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9342 }
9343
9344 static void
9345 do_vfp_sp_dyadic (void)
9346 {
9347 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9348 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9349 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9350 }
9351
9352 static void
9353 do_vfp_sp_compare_z (void)
9354 {
9355 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9356 }
9357
9358 static void
9359 do_vfp_dp_sp_cvt (void)
9360 {
9361 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9362 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9363 }
9364
9365 static void
9366 do_vfp_sp_dp_cvt (void)
9367 {
9368 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9369 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9370 }
9371
9372 static void
9373 do_vfp_reg_from_sp (void)
9374 {
9375 inst.instruction |= inst.operands[0].reg << 12;
9376 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9377 }
9378
9379 static void
9380 do_vfp_reg2_from_sp2 (void)
9381 {
9382 constraint (inst.operands[2].imm != 2,
9383 _("only two consecutive VFP SP registers allowed here"));
9384 inst.instruction |= inst.operands[0].reg << 12;
9385 inst.instruction |= inst.operands[1].reg << 16;
9386 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9387 }
9388
9389 static void
9390 do_vfp_sp_from_reg (void)
9391 {
9392 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9393 inst.instruction |= inst.operands[1].reg << 12;
9394 }
9395
9396 static void
9397 do_vfp_sp2_from_reg2 (void)
9398 {
9399 constraint (inst.operands[0].imm != 2,
9400 _("only two consecutive VFP SP registers allowed here"));
9401 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9402 inst.instruction |= inst.operands[1].reg << 12;
9403 inst.instruction |= inst.operands[2].reg << 16;
9404 }
9405
9406 static void
9407 do_vfp_sp_ldst (void)
9408 {
9409 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9410 encode_arm_cp_address (1, FALSE, TRUE, 0);
9411 }
9412
9413 static void
9414 do_vfp_dp_ldst (void)
9415 {
9416 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9417 encode_arm_cp_address (1, FALSE, TRUE, 0);
9418 }
9419
9420
9421 static void
9422 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9423 {
9424 if (inst.operands[0].writeback)
9425 inst.instruction |= WRITE_BACK;
9426 else
9427 constraint (ldstm_type != VFP_LDSTMIA,
9428 _("this addressing mode requires base-register writeback"));
9429 inst.instruction |= inst.operands[0].reg << 16;
9430 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9431 inst.instruction |= inst.operands[1].imm;
9432 }
9433
9434 static void
9435 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9436 {
9437 int count;
9438
9439 if (inst.operands[0].writeback)
9440 inst.instruction |= WRITE_BACK;
9441 else
9442 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9443 _("this addressing mode requires base-register writeback"));
9444
9445 inst.instruction |= inst.operands[0].reg << 16;
9446 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9447
9448 count = inst.operands[1].imm << 1;
9449 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9450 count += 1;
9451
9452 inst.instruction |= count;
9453 }
9454
9455 static void
9456 do_vfp_sp_ldstmia (void)
9457 {
9458 vfp_sp_ldstm (VFP_LDSTMIA);
9459 }
9460
9461 static void
9462 do_vfp_sp_ldstmdb (void)
9463 {
9464 vfp_sp_ldstm (VFP_LDSTMDB);
9465 }
9466
9467 static void
9468 do_vfp_dp_ldstmia (void)
9469 {
9470 vfp_dp_ldstm (VFP_LDSTMIA);
9471 }
9472
9473 static void
9474 do_vfp_dp_ldstmdb (void)
9475 {
9476 vfp_dp_ldstm (VFP_LDSTMDB);
9477 }
9478
9479 static void
9480 do_vfp_xp_ldstmia (void)
9481 {
9482 vfp_dp_ldstm (VFP_LDSTMIAX);
9483 }
9484
9485 static void
9486 do_vfp_xp_ldstmdb (void)
9487 {
9488 vfp_dp_ldstm (VFP_LDSTMDBX);
9489 }
9490
9491 static void
9492 do_vfp_dp_rd_rm (void)
9493 {
9494 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9495 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9496 }
9497
9498 static void
9499 do_vfp_dp_rn_rd (void)
9500 {
9501 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9502 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9503 }
9504
9505 static void
9506 do_vfp_dp_rd_rn (void)
9507 {
9508 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9509 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9510 }
9511
9512 static void
9513 do_vfp_dp_rd_rn_rm (void)
9514 {
9515 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9516 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9517 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9518 }
9519
9520 static void
9521 do_vfp_dp_rd (void)
9522 {
9523 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9524 }
9525
9526 static void
9527 do_vfp_dp_rm_rd_rn (void)
9528 {
9529 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9530 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9531 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9532 }
9533
9534 /* VFPv3 instructions. */
9535 static void
9536 do_vfp_sp_const (void)
9537 {
9538 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9539 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9540 inst.instruction |= (inst.operands[1].imm & 0x0f);
9541 }
9542
9543 static void
9544 do_vfp_dp_const (void)
9545 {
9546 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9547 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9548 inst.instruction |= (inst.operands[1].imm & 0x0f);
9549 }
9550
9551 static void
9552 vfp_conv (int srcsize)
9553 {
9554 int immbits = srcsize - inst.operands[1].imm;
9555
9556 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9557 {
9558 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9559 i.e. immbits must be in range 0 - 16. */
9560 inst.error = _("immediate value out of range, expected range [0, 16]");
9561 return;
9562 }
9563 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9564 {
9565 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9566 i.e. immbits must be in range 0 - 31. */
9567 inst.error = _("immediate value out of range, expected range [1, 32]");
9568 return;
9569 }
9570
9571 inst.instruction |= (immbits & 1) << 5;
9572 inst.instruction |= (immbits >> 1);
9573 }
9574
9575 static void
9576 do_vfp_sp_conv_16 (void)
9577 {
9578 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9579 vfp_conv (16);
9580 }
9581
9582 static void
9583 do_vfp_dp_conv_16 (void)
9584 {
9585 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9586 vfp_conv (16);
9587 }
9588
9589 static void
9590 do_vfp_sp_conv_32 (void)
9591 {
9592 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9593 vfp_conv (32);
9594 }
9595
9596 static void
9597 do_vfp_dp_conv_32 (void)
9598 {
9599 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9600 vfp_conv (32);
9601 }
9602 \f
9603 /* FPA instructions. Also in a logical order. */
9604
9605 static void
9606 do_fpa_cmp (void)
9607 {
9608 inst.instruction |= inst.operands[0].reg << 16;
9609 inst.instruction |= inst.operands[1].reg;
9610 }
9611
9612 static void
9613 do_fpa_ldmstm (void)
9614 {
9615 inst.instruction |= inst.operands[0].reg << 12;
9616 switch (inst.operands[1].imm)
9617 {
9618 case 1: inst.instruction |= CP_T_X; break;
9619 case 2: inst.instruction |= CP_T_Y; break;
9620 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9621 case 4: break;
9622 default: abort ();
9623 }
9624
9625 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9626 {
9627 /* The instruction specified "ea" or "fd", so we can only accept
9628 [Rn]{!}. The instruction does not really support stacking or
9629 unstacking, so we have to emulate these by setting appropriate
9630 bits and offsets. */
9631 constraint (inst.reloc.exp.X_op != O_constant
9632 || inst.reloc.exp.X_add_number != 0,
9633 _("this instruction does not support indexing"));
9634
9635 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9636 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9637
9638 if (!(inst.instruction & INDEX_UP))
9639 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9640
9641 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9642 {
9643 inst.operands[2].preind = 0;
9644 inst.operands[2].postind = 1;
9645 }
9646 }
9647
9648 encode_arm_cp_address (2, TRUE, TRUE, 0);
9649 }
9650 \f
9651 /* iWMMXt instructions: strictly in alphabetical order. */
9652
9653 static void
9654 do_iwmmxt_tandorc (void)
9655 {
9656 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9657 }
9658
9659 static void
9660 do_iwmmxt_textrc (void)
9661 {
9662 inst.instruction |= inst.operands[0].reg << 12;
9663 inst.instruction |= inst.operands[1].imm;
9664 }
9665
9666 static void
9667 do_iwmmxt_textrm (void)
9668 {
9669 inst.instruction |= inst.operands[0].reg << 12;
9670 inst.instruction |= inst.operands[1].reg << 16;
9671 inst.instruction |= inst.operands[2].imm;
9672 }
9673
9674 static void
9675 do_iwmmxt_tinsr (void)
9676 {
9677 inst.instruction |= inst.operands[0].reg << 16;
9678 inst.instruction |= inst.operands[1].reg << 12;
9679 inst.instruction |= inst.operands[2].imm;
9680 }
9681
9682 static void
9683 do_iwmmxt_tmia (void)
9684 {
9685 inst.instruction |= inst.operands[0].reg << 5;
9686 inst.instruction |= inst.operands[1].reg;
9687 inst.instruction |= inst.operands[2].reg << 12;
9688 }
9689
9690 static void
9691 do_iwmmxt_waligni (void)
9692 {
9693 inst.instruction |= inst.operands[0].reg << 12;
9694 inst.instruction |= inst.operands[1].reg << 16;
9695 inst.instruction |= inst.operands[2].reg;
9696 inst.instruction |= inst.operands[3].imm << 20;
9697 }
9698
9699 static void
9700 do_iwmmxt_wmerge (void)
9701 {
9702 inst.instruction |= inst.operands[0].reg << 12;
9703 inst.instruction |= inst.operands[1].reg << 16;
9704 inst.instruction |= inst.operands[2].reg;
9705 inst.instruction |= inst.operands[3].imm << 21;
9706 }
9707
9708 static void
9709 do_iwmmxt_wmov (void)
9710 {
9711 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9712 inst.instruction |= inst.operands[0].reg << 12;
9713 inst.instruction |= inst.operands[1].reg << 16;
9714 inst.instruction |= inst.operands[1].reg;
9715 }
9716
9717 static void
9718 do_iwmmxt_wldstbh (void)
9719 {
9720 int reloc;
9721 inst.instruction |= inst.operands[0].reg << 12;
9722 if (thumb_mode)
9723 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9724 else
9725 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9726 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9727 }
9728
9729 static void
9730 do_iwmmxt_wldstw (void)
9731 {
9732 /* RIWR_RIWC clears .isreg for a control register. */
9733 if (!inst.operands[0].isreg)
9734 {
9735 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9736 inst.instruction |= 0xf0000000;
9737 }
9738
9739 inst.instruction |= inst.operands[0].reg << 12;
9740 encode_arm_cp_address (1, TRUE, TRUE, 0);
9741 }
9742
9743 static void
9744 do_iwmmxt_wldstd (void)
9745 {
9746 inst.instruction |= inst.operands[0].reg << 12;
9747 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9748 && inst.operands[1].immisreg)
9749 {
9750 inst.instruction &= ~0x1a000ff;
9751 inst.instruction |= (0xf << 28);
9752 if (inst.operands[1].preind)
9753 inst.instruction |= PRE_INDEX;
9754 if (!inst.operands[1].negative)
9755 inst.instruction |= INDEX_UP;
9756 if (inst.operands[1].writeback)
9757 inst.instruction |= WRITE_BACK;
9758 inst.instruction |= inst.operands[1].reg << 16;
9759 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9760 inst.instruction |= inst.operands[1].imm;
9761 }
9762 else
9763 encode_arm_cp_address (1, TRUE, FALSE, 0);
9764 }
9765
9766 static void
9767 do_iwmmxt_wshufh (void)
9768 {
9769 inst.instruction |= inst.operands[0].reg << 12;
9770 inst.instruction |= inst.operands[1].reg << 16;
9771 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9772 inst.instruction |= (inst.operands[2].imm & 0x0f);
9773 }
9774
9775 static void
9776 do_iwmmxt_wzero (void)
9777 {
9778 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9779 inst.instruction |= inst.operands[0].reg;
9780 inst.instruction |= inst.operands[0].reg << 12;
9781 inst.instruction |= inst.operands[0].reg << 16;
9782 }
9783
9784 static void
9785 do_iwmmxt_wrwrwr_or_imm5 (void)
9786 {
9787 if (inst.operands[2].isreg)
9788 do_rd_rn_rm ();
9789 else {
9790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9791 _("immediate operand requires iWMMXt2"));
9792 do_rd_rn ();
9793 if (inst.operands[2].imm == 0)
9794 {
9795 switch ((inst.instruction >> 20) & 0xf)
9796 {
9797 case 4:
9798 case 5:
9799 case 6:
9800 case 7:
9801 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9802 inst.operands[2].imm = 16;
9803 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9804 break;
9805 case 8:
9806 case 9:
9807 case 10:
9808 case 11:
9809 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9810 inst.operands[2].imm = 32;
9811 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9812 break;
9813 case 12:
9814 case 13:
9815 case 14:
9816 case 15:
9817 {
9818 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9819 unsigned long wrn;
9820 wrn = (inst.instruction >> 16) & 0xf;
9821 inst.instruction &= 0xff0fff0f;
9822 inst.instruction |= wrn;
9823 /* Bail out here; the instruction is now assembled. */
9824 return;
9825 }
9826 }
9827 }
9828 /* Map 32 -> 0, etc. */
9829 inst.operands[2].imm &= 0x1f;
9830 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9831 }
9832 }
9833 \f
9834 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9835 operations first, then control, shift, and load/store. */
9836
9837 /* Insns like "foo X,Y,Z". */
9838
9839 static void
9840 do_mav_triple (void)
9841 {
9842 inst.instruction |= inst.operands[0].reg << 16;
9843 inst.instruction |= inst.operands[1].reg;
9844 inst.instruction |= inst.operands[2].reg << 12;
9845 }
9846
9847 /* Insns like "foo W,X,Y,Z".
9848 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9849
9850 static void
9851 do_mav_quad (void)
9852 {
9853 inst.instruction |= inst.operands[0].reg << 5;
9854 inst.instruction |= inst.operands[1].reg << 12;
9855 inst.instruction |= inst.operands[2].reg << 16;
9856 inst.instruction |= inst.operands[3].reg;
9857 }
9858
9859 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9860 static void
9861 do_mav_dspsc (void)
9862 {
9863 inst.instruction |= inst.operands[1].reg << 12;
9864 }
9865
9866 /* Maverick shift immediate instructions.
9867 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9868 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9869
9870 static void
9871 do_mav_shift (void)
9872 {
9873 int imm = inst.operands[2].imm;
9874
9875 inst.instruction |= inst.operands[0].reg << 12;
9876 inst.instruction |= inst.operands[1].reg << 16;
9877
9878 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9879 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9880 Bit 4 should be 0. */
9881 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9882
9883 inst.instruction |= imm;
9884 }
9885 \f
9886 /* XScale instructions. Also sorted arithmetic before move. */
9887
9888 /* Xscale multiply-accumulate (argument parse)
9889 MIAcc acc0,Rm,Rs
9890 MIAPHcc acc0,Rm,Rs
9891 MIAxycc acc0,Rm,Rs. */
9892
9893 static void
9894 do_xsc_mia (void)
9895 {
9896 inst.instruction |= inst.operands[1].reg;
9897 inst.instruction |= inst.operands[2].reg << 12;
9898 }
9899
9900 /* Xscale move-accumulator-register (argument parse)
9901
9902 MARcc acc0,RdLo,RdHi. */
9903
9904 static void
9905 do_xsc_mar (void)
9906 {
9907 inst.instruction |= inst.operands[1].reg << 12;
9908 inst.instruction |= inst.operands[2].reg << 16;
9909 }
9910
9911 /* Xscale move-register-accumulator (argument parse)
9912
9913 MRAcc RdLo,RdHi,acc0. */
9914
9915 static void
9916 do_xsc_mra (void)
9917 {
9918 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9919 inst.instruction |= inst.operands[0].reg << 12;
9920 inst.instruction |= inst.operands[1].reg << 16;
9921 }
9922 \f
9923 /* Encoding functions relevant only to Thumb. */
9924
9925 /* inst.operands[i] is a shifted-register operand; encode
9926 it into inst.instruction in the format used by Thumb32. */
9927
9928 static void
9929 encode_thumb32_shifted_operand (int i)
9930 {
9931 unsigned int value = inst.reloc.exp.X_add_number;
9932 unsigned int shift = inst.operands[i].shift_kind;
9933
9934 constraint (inst.operands[i].immisreg,
9935 _("shift by register not allowed in thumb mode"));
9936 inst.instruction |= inst.operands[i].reg;
9937 if (shift == SHIFT_RRX)
9938 inst.instruction |= SHIFT_ROR << 4;
9939 else
9940 {
9941 constraint (inst.reloc.exp.X_op != O_constant,
9942 _("expression too complex"));
9943
9944 constraint (value > 32
9945 || (value == 32 && (shift == SHIFT_LSL
9946 || shift == SHIFT_ROR)),
9947 _("shift expression is too large"));
9948
9949 if (value == 0)
9950 shift = SHIFT_LSL;
9951 else if (value == 32)
9952 value = 0;
9953
9954 inst.instruction |= shift << 4;
9955 inst.instruction |= (value & 0x1c) << 10;
9956 inst.instruction |= (value & 0x03) << 6;
9957 }
9958 }
9959
9960
9961 /* inst.operands[i] was set up by parse_address. Encode it into a
9962 Thumb32 format load or store instruction. Reject forms that cannot
9963 be used with such instructions. If is_t is true, reject forms that
9964 cannot be used with a T instruction; if is_d is true, reject forms
9965 that cannot be used with a D instruction. If it is a store insn,
9966 reject PC in Rn. */
9967
9968 static void
9969 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9970 {
9971 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9972
9973 constraint (!inst.operands[i].isreg,
9974 _("Instruction does not support =N addresses"));
9975
9976 inst.instruction |= inst.operands[i].reg << 16;
9977 if (inst.operands[i].immisreg)
9978 {
9979 constraint (is_pc, BAD_PC_ADDRESSING);
9980 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9981 constraint (inst.operands[i].negative,
9982 _("Thumb does not support negative register indexing"));
9983 constraint (inst.operands[i].postind,
9984 _("Thumb does not support register post-indexing"));
9985 constraint (inst.operands[i].writeback,
9986 _("Thumb does not support register indexing with writeback"));
9987 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9988 _("Thumb supports only LSL in shifted register indexing"));
9989
9990 inst.instruction |= inst.operands[i].imm;
9991 if (inst.operands[i].shifted)
9992 {
9993 constraint (inst.reloc.exp.X_op != O_constant,
9994 _("expression too complex"));
9995 constraint (inst.reloc.exp.X_add_number < 0
9996 || inst.reloc.exp.X_add_number > 3,
9997 _("shift out of range"));
9998 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9999 }
10000 inst.reloc.type = BFD_RELOC_UNUSED;
10001 }
10002 else if (inst.operands[i].preind)
10003 {
10004 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10005 constraint (is_t && inst.operands[i].writeback,
10006 _("cannot use writeback with this instruction"));
10007 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10008 BAD_PC_ADDRESSING);
10009
10010 if (is_d)
10011 {
10012 inst.instruction |= 0x01000000;
10013 if (inst.operands[i].writeback)
10014 inst.instruction |= 0x00200000;
10015 }
10016 else
10017 {
10018 inst.instruction |= 0x00000c00;
10019 if (inst.operands[i].writeback)
10020 inst.instruction |= 0x00000100;
10021 }
10022 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10023 }
10024 else if (inst.operands[i].postind)
10025 {
10026 gas_assert (inst.operands[i].writeback);
10027 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10028 constraint (is_t, _("cannot use post-indexing with this instruction"));
10029
10030 if (is_d)
10031 inst.instruction |= 0x00200000;
10032 else
10033 inst.instruction |= 0x00000900;
10034 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10035 }
10036 else /* unindexed - only for coprocessor */
10037 inst.error = _("instruction does not accept unindexed addressing");
10038 }
10039
10040 /* Table of Thumb instructions which exist in both 16- and 32-bit
10041 encodings (the latter only in post-V6T2 cores). The index is the
10042 value used in the insns table below. When there is more than one
10043 possible 16-bit encoding for the instruction, this table always
10044 holds variant (1).
10045 Also contains several pseudo-instructions used during relaxation. */
10046 #define T16_32_TAB \
10047 X(_adc, 4140, eb400000), \
10048 X(_adcs, 4140, eb500000), \
10049 X(_add, 1c00, eb000000), \
10050 X(_adds, 1c00, eb100000), \
10051 X(_addi, 0000, f1000000), \
10052 X(_addis, 0000, f1100000), \
10053 X(_add_pc,000f, f20f0000), \
10054 X(_add_sp,000d, f10d0000), \
10055 X(_adr, 000f, f20f0000), \
10056 X(_and, 4000, ea000000), \
10057 X(_ands, 4000, ea100000), \
10058 X(_asr, 1000, fa40f000), \
10059 X(_asrs, 1000, fa50f000), \
10060 X(_b, e000, f000b000), \
10061 X(_bcond, d000, f0008000), \
10062 X(_bic, 4380, ea200000), \
10063 X(_bics, 4380, ea300000), \
10064 X(_cmn, 42c0, eb100f00), \
10065 X(_cmp, 2800, ebb00f00), \
10066 X(_cpsie, b660, f3af8400), \
10067 X(_cpsid, b670, f3af8600), \
10068 X(_cpy, 4600, ea4f0000), \
10069 X(_dec_sp,80dd, f1ad0d00), \
10070 X(_eor, 4040, ea800000), \
10071 X(_eors, 4040, ea900000), \
10072 X(_inc_sp,00dd, f10d0d00), \
10073 X(_ldmia, c800, e8900000), \
10074 X(_ldr, 6800, f8500000), \
10075 X(_ldrb, 7800, f8100000), \
10076 X(_ldrh, 8800, f8300000), \
10077 X(_ldrsb, 5600, f9100000), \
10078 X(_ldrsh, 5e00, f9300000), \
10079 X(_ldr_pc,4800, f85f0000), \
10080 X(_ldr_pc2,4800, f85f0000), \
10081 X(_ldr_sp,9800, f85d0000), \
10082 X(_lsl, 0000, fa00f000), \
10083 X(_lsls, 0000, fa10f000), \
10084 X(_lsr, 0800, fa20f000), \
10085 X(_lsrs, 0800, fa30f000), \
10086 X(_mov, 2000, ea4f0000), \
10087 X(_movs, 2000, ea5f0000), \
10088 X(_mul, 4340, fb00f000), \
10089 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10090 X(_mvn, 43c0, ea6f0000), \
10091 X(_mvns, 43c0, ea7f0000), \
10092 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10093 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10094 X(_orr, 4300, ea400000), \
10095 X(_orrs, 4300, ea500000), \
10096 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10097 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10098 X(_rev, ba00, fa90f080), \
10099 X(_rev16, ba40, fa90f090), \
10100 X(_revsh, bac0, fa90f0b0), \
10101 X(_ror, 41c0, fa60f000), \
10102 X(_rors, 41c0, fa70f000), \
10103 X(_sbc, 4180, eb600000), \
10104 X(_sbcs, 4180, eb700000), \
10105 X(_stmia, c000, e8800000), \
10106 X(_str, 6000, f8400000), \
10107 X(_strb, 7000, f8000000), \
10108 X(_strh, 8000, f8200000), \
10109 X(_str_sp,9000, f84d0000), \
10110 X(_sub, 1e00, eba00000), \
10111 X(_subs, 1e00, ebb00000), \
10112 X(_subi, 8000, f1a00000), \
10113 X(_subis, 8000, f1b00000), \
10114 X(_sxtb, b240, fa4ff080), \
10115 X(_sxth, b200, fa0ff080), \
10116 X(_tst, 4200, ea100f00), \
10117 X(_uxtb, b2c0, fa5ff080), \
10118 X(_uxth, b280, fa1ff080), \
10119 X(_nop, bf00, f3af8000), \
10120 X(_yield, bf10, f3af8001), \
10121 X(_wfe, bf20, f3af8002), \
10122 X(_wfi, bf30, f3af8003), \
10123 X(_sev, bf40, f3af8004), \
10124 X(_sevl, bf50, f3af8005), \
10125 X(_udf, de00, f7f0a000)
10126
10127 /* To catch errors in encoding functions, the codes are all offset by
10128 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10129 as 16-bit instructions. */
10130 #define X(a,b,c) T_MNEM##a
10131 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10132 #undef X
10133
10134 #define X(a,b,c) 0x##b
10135 static const unsigned short thumb_op16[] = { T16_32_TAB };
10136 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10137 #undef X
10138
10139 #define X(a,b,c) 0x##c
10140 static const unsigned int thumb_op32[] = { T16_32_TAB };
10141 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10142 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10143 #undef X
10144 #undef T16_32_TAB
10145
10146 /* Thumb instruction encoders, in alphabetical order. */
10147
10148 /* ADDW or SUBW. */
10149
10150 static void
10151 do_t_add_sub_w (void)
10152 {
10153 int Rd, Rn;
10154
10155 Rd = inst.operands[0].reg;
10156 Rn = inst.operands[1].reg;
10157
10158 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10159 is the SP-{plus,minus}-immediate form of the instruction. */
10160 if (Rn == REG_SP)
10161 constraint (Rd == REG_PC, BAD_PC);
10162 else
10163 reject_bad_reg (Rd);
10164
10165 inst.instruction |= (Rn << 16) | (Rd << 8);
10166 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10167 }
10168
10169 /* Parse an add or subtract instruction. We get here with inst.instruction
10170 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10171
10172 static void
10173 do_t_add_sub (void)
10174 {
10175 int Rd, Rs, Rn;
10176
10177 Rd = inst.operands[0].reg;
10178 Rs = (inst.operands[1].present
10179 ? inst.operands[1].reg /* Rd, Rs, foo */
10180 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10181
10182 if (Rd == REG_PC)
10183 set_it_insn_type_last ();
10184
10185 if (unified_syntax)
10186 {
10187 bfd_boolean flags;
10188 bfd_boolean narrow;
10189 int opcode;
10190
10191 flags = (inst.instruction == T_MNEM_adds
10192 || inst.instruction == T_MNEM_subs);
10193 if (flags)
10194 narrow = !in_it_block ();
10195 else
10196 narrow = in_it_block ();
10197 if (!inst.operands[2].isreg)
10198 {
10199 int add;
10200
10201 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10202
10203 add = (inst.instruction == T_MNEM_add
10204 || inst.instruction == T_MNEM_adds);
10205 opcode = 0;
10206 if (inst.size_req != 4)
10207 {
10208 /* Attempt to use a narrow opcode, with relaxation if
10209 appropriate. */
10210 if (Rd == REG_SP && Rs == REG_SP && !flags)
10211 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10212 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10213 opcode = T_MNEM_add_sp;
10214 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10215 opcode = T_MNEM_add_pc;
10216 else if (Rd <= 7 && Rs <= 7 && narrow)
10217 {
10218 if (flags)
10219 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10220 else
10221 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10222 }
10223 if (opcode)
10224 {
10225 inst.instruction = THUMB_OP16(opcode);
10226 inst.instruction |= (Rd << 4) | Rs;
10227 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10228 if (inst.size_req != 2)
10229 inst.relax = opcode;
10230 }
10231 else
10232 constraint (inst.size_req == 2, BAD_HIREG);
10233 }
10234 if (inst.size_req == 4
10235 || (inst.size_req != 2 && !opcode))
10236 {
10237 if (Rd == REG_PC)
10238 {
10239 constraint (add, BAD_PC);
10240 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10241 _("only SUBS PC, LR, #const allowed"));
10242 constraint (inst.reloc.exp.X_op != O_constant,
10243 _("expression too complex"));
10244 constraint (inst.reloc.exp.X_add_number < 0
10245 || inst.reloc.exp.X_add_number > 0xff,
10246 _("immediate value out of range"));
10247 inst.instruction = T2_SUBS_PC_LR
10248 | inst.reloc.exp.X_add_number;
10249 inst.reloc.type = BFD_RELOC_UNUSED;
10250 return;
10251 }
10252 else if (Rs == REG_PC)
10253 {
10254 /* Always use addw/subw. */
10255 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10256 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10257 }
10258 else
10259 {
10260 inst.instruction = THUMB_OP32 (inst.instruction);
10261 inst.instruction = (inst.instruction & 0xe1ffffff)
10262 | 0x10000000;
10263 if (flags)
10264 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10265 else
10266 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10267 }
10268 inst.instruction |= Rd << 8;
10269 inst.instruction |= Rs << 16;
10270 }
10271 }
10272 else
10273 {
10274 unsigned int value = inst.reloc.exp.X_add_number;
10275 unsigned int shift = inst.operands[2].shift_kind;
10276
10277 Rn = inst.operands[2].reg;
10278 /* See if we can do this with a 16-bit instruction. */
10279 if (!inst.operands[2].shifted && inst.size_req != 4)
10280 {
10281 if (Rd > 7 || Rs > 7 || Rn > 7)
10282 narrow = FALSE;
10283
10284 if (narrow)
10285 {
10286 inst.instruction = ((inst.instruction == T_MNEM_adds
10287 || inst.instruction == T_MNEM_add)
10288 ? T_OPCODE_ADD_R3
10289 : T_OPCODE_SUB_R3);
10290 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10291 return;
10292 }
10293
10294 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10295 {
10296 /* Thumb-1 cores (except v6-M) require at least one high
10297 register in a narrow non flag setting add. */
10298 if (Rd > 7 || Rn > 7
10299 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10300 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10301 {
10302 if (Rd == Rn)
10303 {
10304 Rn = Rs;
10305 Rs = Rd;
10306 }
10307 inst.instruction = T_OPCODE_ADD_HI;
10308 inst.instruction |= (Rd & 8) << 4;
10309 inst.instruction |= (Rd & 7);
10310 inst.instruction |= Rn << 3;
10311 return;
10312 }
10313 }
10314 }
10315
10316 constraint (Rd == REG_PC, BAD_PC);
10317 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10318 constraint (Rs == REG_PC, BAD_PC);
10319 reject_bad_reg (Rn);
10320
10321 /* If we get here, it can't be done in 16 bits. */
10322 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10323 _("shift must be constant"));
10324 inst.instruction = THUMB_OP32 (inst.instruction);
10325 inst.instruction |= Rd << 8;
10326 inst.instruction |= Rs << 16;
10327 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10328 _("shift value over 3 not allowed in thumb mode"));
10329 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10330 _("only LSL shift allowed in thumb mode"));
10331 encode_thumb32_shifted_operand (2);
10332 }
10333 }
10334 else
10335 {
10336 constraint (inst.instruction == T_MNEM_adds
10337 || inst.instruction == T_MNEM_subs,
10338 BAD_THUMB32);
10339
10340 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10341 {
10342 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10343 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10344 BAD_HIREG);
10345
10346 inst.instruction = (inst.instruction == T_MNEM_add
10347 ? 0x0000 : 0x8000);
10348 inst.instruction |= (Rd << 4) | Rs;
10349 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10350 return;
10351 }
10352
10353 Rn = inst.operands[2].reg;
10354 constraint (inst.operands[2].shifted, _("unshifted register required"));
10355
10356 /* We now have Rd, Rs, and Rn set to registers. */
10357 if (Rd > 7 || Rs > 7 || Rn > 7)
10358 {
10359 /* Can't do this for SUB. */
10360 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10361 inst.instruction = T_OPCODE_ADD_HI;
10362 inst.instruction |= (Rd & 8) << 4;
10363 inst.instruction |= (Rd & 7);
10364 if (Rs == Rd)
10365 inst.instruction |= Rn << 3;
10366 else if (Rn == Rd)
10367 inst.instruction |= Rs << 3;
10368 else
10369 constraint (1, _("dest must overlap one source register"));
10370 }
10371 else
10372 {
10373 inst.instruction = (inst.instruction == T_MNEM_add
10374 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10375 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10376 }
10377 }
10378 }
10379
10380 static void
10381 do_t_adr (void)
10382 {
10383 unsigned Rd;
10384
10385 Rd = inst.operands[0].reg;
10386 reject_bad_reg (Rd);
10387
10388 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10389 {
10390 /* Defer to section relaxation. */
10391 inst.relax = inst.instruction;
10392 inst.instruction = THUMB_OP16 (inst.instruction);
10393 inst.instruction |= Rd << 4;
10394 }
10395 else if (unified_syntax && inst.size_req != 2)
10396 {
10397 /* Generate a 32-bit opcode. */
10398 inst.instruction = THUMB_OP32 (inst.instruction);
10399 inst.instruction |= Rd << 8;
10400 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10401 inst.reloc.pc_rel = 1;
10402 }
10403 else
10404 {
10405 /* Generate a 16-bit opcode. */
10406 inst.instruction = THUMB_OP16 (inst.instruction);
10407 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10408 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10409 inst.reloc.pc_rel = 1;
10410
10411 inst.instruction |= Rd << 4;
10412 }
10413 }
10414
10415 /* Arithmetic instructions for which there is just one 16-bit
10416 instruction encoding, and it allows only two low registers.
10417 For maximal compatibility with ARM syntax, we allow three register
10418 operands even when Thumb-32 instructions are not available, as long
10419 as the first two are identical. For instance, both "sbc r0,r1" and
10420 "sbc r0,r0,r1" are allowed. */
10421 static void
10422 do_t_arit3 (void)
10423 {
10424 int Rd, Rs, Rn;
10425
10426 Rd = inst.operands[0].reg;
10427 Rs = (inst.operands[1].present
10428 ? inst.operands[1].reg /* Rd, Rs, foo */
10429 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10430 Rn = inst.operands[2].reg;
10431
10432 reject_bad_reg (Rd);
10433 reject_bad_reg (Rs);
10434 if (inst.operands[2].isreg)
10435 reject_bad_reg (Rn);
10436
10437 if (unified_syntax)
10438 {
10439 if (!inst.operands[2].isreg)
10440 {
10441 /* For an immediate, we always generate a 32-bit opcode;
10442 section relaxation will shrink it later if possible. */
10443 inst.instruction = THUMB_OP32 (inst.instruction);
10444 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10445 inst.instruction |= Rd << 8;
10446 inst.instruction |= Rs << 16;
10447 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10448 }
10449 else
10450 {
10451 bfd_boolean narrow;
10452
10453 /* See if we can do this with a 16-bit instruction. */
10454 if (THUMB_SETS_FLAGS (inst.instruction))
10455 narrow = !in_it_block ();
10456 else
10457 narrow = in_it_block ();
10458
10459 if (Rd > 7 || Rn > 7 || Rs > 7)
10460 narrow = FALSE;
10461 if (inst.operands[2].shifted)
10462 narrow = FALSE;
10463 if (inst.size_req == 4)
10464 narrow = FALSE;
10465
10466 if (narrow
10467 && Rd == Rs)
10468 {
10469 inst.instruction = THUMB_OP16 (inst.instruction);
10470 inst.instruction |= Rd;
10471 inst.instruction |= Rn << 3;
10472 return;
10473 }
10474
10475 /* If we get here, it can't be done in 16 bits. */
10476 constraint (inst.operands[2].shifted
10477 && inst.operands[2].immisreg,
10478 _("shift must be constant"));
10479 inst.instruction = THUMB_OP32 (inst.instruction);
10480 inst.instruction |= Rd << 8;
10481 inst.instruction |= Rs << 16;
10482 encode_thumb32_shifted_operand (2);
10483 }
10484 }
10485 else
10486 {
10487 /* On its face this is a lie - the instruction does set the
10488 flags. However, the only supported mnemonic in this mode
10489 says it doesn't. */
10490 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10491
10492 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10493 _("unshifted register required"));
10494 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10495 constraint (Rd != Rs,
10496 _("dest and source1 must be the same register"));
10497
10498 inst.instruction = THUMB_OP16 (inst.instruction);
10499 inst.instruction |= Rd;
10500 inst.instruction |= Rn << 3;
10501 }
10502 }
10503
10504 /* Similarly, but for instructions where the arithmetic operation is
10505 commutative, so we can allow either of them to be different from
10506 the destination operand in a 16-bit instruction. For instance, all
10507 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10508 accepted. */
10509 static void
10510 do_t_arit3c (void)
10511 {
10512 int Rd, Rs, Rn;
10513
10514 Rd = inst.operands[0].reg;
10515 Rs = (inst.operands[1].present
10516 ? inst.operands[1].reg /* Rd, Rs, foo */
10517 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10518 Rn = inst.operands[2].reg;
10519
10520 reject_bad_reg (Rd);
10521 reject_bad_reg (Rs);
10522 if (inst.operands[2].isreg)
10523 reject_bad_reg (Rn);
10524
10525 if (unified_syntax)
10526 {
10527 if (!inst.operands[2].isreg)
10528 {
10529 /* For an immediate, we always generate a 32-bit opcode;
10530 section relaxation will shrink it later if possible. */
10531 inst.instruction = THUMB_OP32 (inst.instruction);
10532 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10533 inst.instruction |= Rd << 8;
10534 inst.instruction |= Rs << 16;
10535 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10536 }
10537 else
10538 {
10539 bfd_boolean narrow;
10540
10541 /* See if we can do this with a 16-bit instruction. */
10542 if (THUMB_SETS_FLAGS (inst.instruction))
10543 narrow = !in_it_block ();
10544 else
10545 narrow = in_it_block ();
10546
10547 if (Rd > 7 || Rn > 7 || Rs > 7)
10548 narrow = FALSE;
10549 if (inst.operands[2].shifted)
10550 narrow = FALSE;
10551 if (inst.size_req == 4)
10552 narrow = FALSE;
10553
10554 if (narrow)
10555 {
10556 if (Rd == Rs)
10557 {
10558 inst.instruction = THUMB_OP16 (inst.instruction);
10559 inst.instruction |= Rd;
10560 inst.instruction |= Rn << 3;
10561 return;
10562 }
10563 if (Rd == Rn)
10564 {
10565 inst.instruction = THUMB_OP16 (inst.instruction);
10566 inst.instruction |= Rd;
10567 inst.instruction |= Rs << 3;
10568 return;
10569 }
10570 }
10571
10572 /* If we get here, it can't be done in 16 bits. */
10573 constraint (inst.operands[2].shifted
10574 && inst.operands[2].immisreg,
10575 _("shift must be constant"));
10576 inst.instruction = THUMB_OP32 (inst.instruction);
10577 inst.instruction |= Rd << 8;
10578 inst.instruction |= Rs << 16;
10579 encode_thumb32_shifted_operand (2);
10580 }
10581 }
10582 else
10583 {
10584 /* On its face this is a lie - the instruction does set the
10585 flags. However, the only supported mnemonic in this mode
10586 says it doesn't. */
10587 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10588
10589 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10590 _("unshifted register required"));
10591 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10592
10593 inst.instruction = THUMB_OP16 (inst.instruction);
10594 inst.instruction |= Rd;
10595
10596 if (Rd == Rs)
10597 inst.instruction |= Rn << 3;
10598 else if (Rd == Rn)
10599 inst.instruction |= Rs << 3;
10600 else
10601 constraint (1, _("dest must overlap one source register"));
10602 }
10603 }
10604
10605 static void
10606 do_t_bfc (void)
10607 {
10608 unsigned Rd;
10609 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10610 constraint (msb > 32, _("bit-field extends past end of register"));
10611 /* The instruction encoding stores the LSB and MSB,
10612 not the LSB and width. */
10613 Rd = inst.operands[0].reg;
10614 reject_bad_reg (Rd);
10615 inst.instruction |= Rd << 8;
10616 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10617 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10618 inst.instruction |= msb - 1;
10619 }
10620
10621 static void
10622 do_t_bfi (void)
10623 {
10624 int Rd, Rn;
10625 unsigned int msb;
10626
10627 Rd = inst.operands[0].reg;
10628 reject_bad_reg (Rd);
10629
10630 /* #0 in second position is alternative syntax for bfc, which is
10631 the same instruction but with REG_PC in the Rm field. */
10632 if (!inst.operands[1].isreg)
10633 Rn = REG_PC;
10634 else
10635 {
10636 Rn = inst.operands[1].reg;
10637 reject_bad_reg (Rn);
10638 }
10639
10640 msb = inst.operands[2].imm + inst.operands[3].imm;
10641 constraint (msb > 32, _("bit-field extends past end of register"));
10642 /* The instruction encoding stores the LSB and MSB,
10643 not the LSB and width. */
10644 inst.instruction |= Rd << 8;
10645 inst.instruction |= Rn << 16;
10646 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10647 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10648 inst.instruction |= msb - 1;
10649 }
10650
10651 static void
10652 do_t_bfx (void)
10653 {
10654 unsigned Rd, Rn;
10655
10656 Rd = inst.operands[0].reg;
10657 Rn = inst.operands[1].reg;
10658
10659 reject_bad_reg (Rd);
10660 reject_bad_reg (Rn);
10661
10662 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10663 _("bit-field extends past end of register"));
10664 inst.instruction |= Rd << 8;
10665 inst.instruction |= Rn << 16;
10666 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10667 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10668 inst.instruction |= inst.operands[3].imm - 1;
10669 }
10670
10671 /* ARM V5 Thumb BLX (argument parse)
10672 BLX <target_addr> which is BLX(1)
10673 BLX <Rm> which is BLX(2)
10674 Unfortunately, there are two different opcodes for this mnemonic.
10675 So, the insns[].value is not used, and the code here zaps values
10676 into inst.instruction.
10677
10678 ??? How to take advantage of the additional two bits of displacement
10679 available in Thumb32 mode? Need new relocation? */
10680
10681 static void
10682 do_t_blx (void)
10683 {
10684 set_it_insn_type_last ();
10685
10686 if (inst.operands[0].isreg)
10687 {
10688 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10689 /* We have a register, so this is BLX(2). */
10690 inst.instruction |= inst.operands[0].reg << 3;
10691 }
10692 else
10693 {
10694 /* No register. This must be BLX(1). */
10695 inst.instruction = 0xf000e800;
10696 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10697 }
10698 }
10699
10700 static void
10701 do_t_branch (void)
10702 {
10703 int opcode;
10704 int cond;
10705 int reloc;
10706
10707 cond = inst.cond;
10708 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10709
10710 if (in_it_block ())
10711 {
10712 /* Conditional branches inside IT blocks are encoded as unconditional
10713 branches. */
10714 cond = COND_ALWAYS;
10715 }
10716 else
10717 cond = inst.cond;
10718
10719 if (cond != COND_ALWAYS)
10720 opcode = T_MNEM_bcond;
10721 else
10722 opcode = inst.instruction;
10723
10724 if (unified_syntax
10725 && (inst.size_req == 4
10726 || (inst.size_req != 2
10727 && (inst.operands[0].hasreloc
10728 || inst.reloc.exp.X_op == O_constant))))
10729 {
10730 inst.instruction = THUMB_OP32(opcode);
10731 if (cond == COND_ALWAYS)
10732 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10733 else
10734 {
10735 gas_assert (cond != 0xF);
10736 inst.instruction |= cond << 22;
10737 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10738 }
10739 }
10740 else
10741 {
10742 inst.instruction = THUMB_OP16(opcode);
10743 if (cond == COND_ALWAYS)
10744 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10745 else
10746 {
10747 inst.instruction |= cond << 8;
10748 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10749 }
10750 /* Allow section relaxation. */
10751 if (unified_syntax && inst.size_req != 2)
10752 inst.relax = opcode;
10753 }
10754 inst.reloc.type = reloc;
10755 inst.reloc.pc_rel = 1;
10756 }
10757
10758 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10759 between the two is the maximum immediate allowed - which is passed in
10760 RANGE. */
10761 static void
10762 do_t_bkpt_hlt1 (int range)
10763 {
10764 constraint (inst.cond != COND_ALWAYS,
10765 _("instruction is always unconditional"));
10766 if (inst.operands[0].present)
10767 {
10768 constraint (inst.operands[0].imm > range,
10769 _("immediate value out of range"));
10770 inst.instruction |= inst.operands[0].imm;
10771 }
10772
10773 set_it_insn_type (NEUTRAL_IT_INSN);
10774 }
10775
10776 static void
10777 do_t_hlt (void)
10778 {
10779 do_t_bkpt_hlt1 (63);
10780 }
10781
10782 static void
10783 do_t_bkpt (void)
10784 {
10785 do_t_bkpt_hlt1 (255);
10786 }
10787
10788 static void
10789 do_t_branch23 (void)
10790 {
10791 set_it_insn_type_last ();
10792 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10793
10794 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10795 this file. We used to simply ignore the PLT reloc type here --
10796 the branch encoding is now needed to deal with TLSCALL relocs.
10797 So if we see a PLT reloc now, put it back to how it used to be to
10798 keep the preexisting behaviour. */
10799 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10800 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10801
10802 #if defined(OBJ_COFF)
10803 /* If the destination of the branch is a defined symbol which does not have
10804 the THUMB_FUNC attribute, then we must be calling a function which has
10805 the (interfacearm) attribute. We look for the Thumb entry point to that
10806 function and change the branch to refer to that function instead. */
10807 if ( inst.reloc.exp.X_op == O_symbol
10808 && inst.reloc.exp.X_add_symbol != NULL
10809 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10810 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10811 inst.reloc.exp.X_add_symbol =
10812 find_real_start (inst.reloc.exp.X_add_symbol);
10813 #endif
10814 }
10815
10816 static void
10817 do_t_bx (void)
10818 {
10819 set_it_insn_type_last ();
10820 inst.instruction |= inst.operands[0].reg << 3;
10821 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10822 should cause the alignment to be checked once it is known. This is
10823 because BX PC only works if the instruction is word aligned. */
10824 }
10825
10826 static void
10827 do_t_bxj (void)
10828 {
10829 int Rm;
10830
10831 set_it_insn_type_last ();
10832 Rm = inst.operands[0].reg;
10833 reject_bad_reg (Rm);
10834 inst.instruction |= Rm << 16;
10835 }
10836
10837 static void
10838 do_t_clz (void)
10839 {
10840 unsigned Rd;
10841 unsigned Rm;
10842
10843 Rd = inst.operands[0].reg;
10844 Rm = inst.operands[1].reg;
10845
10846 reject_bad_reg (Rd);
10847 reject_bad_reg (Rm);
10848
10849 inst.instruction |= Rd << 8;
10850 inst.instruction |= Rm << 16;
10851 inst.instruction |= Rm;
10852 }
10853
10854 static void
10855 do_t_cps (void)
10856 {
10857 set_it_insn_type (OUTSIDE_IT_INSN);
10858 inst.instruction |= inst.operands[0].imm;
10859 }
10860
10861 static void
10862 do_t_cpsi (void)
10863 {
10864 set_it_insn_type (OUTSIDE_IT_INSN);
10865 if (unified_syntax
10866 && (inst.operands[1].present || inst.size_req == 4)
10867 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10868 {
10869 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10870 inst.instruction = 0xf3af8000;
10871 inst.instruction |= imod << 9;
10872 inst.instruction |= inst.operands[0].imm << 5;
10873 if (inst.operands[1].present)
10874 inst.instruction |= 0x100 | inst.operands[1].imm;
10875 }
10876 else
10877 {
10878 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10879 && (inst.operands[0].imm & 4),
10880 _("selected processor does not support 'A' form "
10881 "of this instruction"));
10882 constraint (inst.operands[1].present || inst.size_req == 4,
10883 _("Thumb does not support the 2-argument "
10884 "form of this instruction"));
10885 inst.instruction |= inst.operands[0].imm;
10886 }
10887 }
10888
10889 /* THUMB CPY instruction (argument parse). */
10890
10891 static void
10892 do_t_cpy (void)
10893 {
10894 if (inst.size_req == 4)
10895 {
10896 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10897 inst.instruction |= inst.operands[0].reg << 8;
10898 inst.instruction |= inst.operands[1].reg;
10899 }
10900 else
10901 {
10902 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10903 inst.instruction |= (inst.operands[0].reg & 0x7);
10904 inst.instruction |= inst.operands[1].reg << 3;
10905 }
10906 }
10907
10908 static void
10909 do_t_cbz (void)
10910 {
10911 set_it_insn_type (OUTSIDE_IT_INSN);
10912 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10913 inst.instruction |= inst.operands[0].reg;
10914 inst.reloc.pc_rel = 1;
10915 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10916 }
10917
10918 static void
10919 do_t_dbg (void)
10920 {
10921 inst.instruction |= inst.operands[0].imm;
10922 }
10923
10924 static void
10925 do_t_div (void)
10926 {
10927 unsigned Rd, Rn, Rm;
10928
10929 Rd = inst.operands[0].reg;
10930 Rn = (inst.operands[1].present
10931 ? inst.operands[1].reg : Rd);
10932 Rm = inst.operands[2].reg;
10933
10934 reject_bad_reg (Rd);
10935 reject_bad_reg (Rn);
10936 reject_bad_reg (Rm);
10937
10938 inst.instruction |= Rd << 8;
10939 inst.instruction |= Rn << 16;
10940 inst.instruction |= Rm;
10941 }
10942
10943 static void
10944 do_t_hint (void)
10945 {
10946 if (unified_syntax && inst.size_req == 4)
10947 inst.instruction = THUMB_OP32 (inst.instruction);
10948 else
10949 inst.instruction = THUMB_OP16 (inst.instruction);
10950 }
10951
10952 static void
10953 do_t_it (void)
10954 {
10955 unsigned int cond = inst.operands[0].imm;
10956
10957 set_it_insn_type (IT_INSN);
10958 now_it.mask = (inst.instruction & 0xf) | 0x10;
10959 now_it.cc = cond;
10960 now_it.warn_deprecated = FALSE;
10961
10962 /* If the condition is a negative condition, invert the mask. */
10963 if ((cond & 0x1) == 0x0)
10964 {
10965 unsigned int mask = inst.instruction & 0x000f;
10966
10967 if ((mask & 0x7) == 0)
10968 {
10969 /* No conversion needed. */
10970 now_it.block_length = 1;
10971 }
10972 else if ((mask & 0x3) == 0)
10973 {
10974 mask ^= 0x8;
10975 now_it.block_length = 2;
10976 }
10977 else if ((mask & 0x1) == 0)
10978 {
10979 mask ^= 0xC;
10980 now_it.block_length = 3;
10981 }
10982 else
10983 {
10984 mask ^= 0xE;
10985 now_it.block_length = 4;
10986 }
10987
10988 inst.instruction &= 0xfff0;
10989 inst.instruction |= mask;
10990 }
10991
10992 inst.instruction |= cond << 4;
10993 }
10994
10995 /* Helper function used for both push/pop and ldm/stm. */
10996 static void
10997 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10998 {
10999 bfd_boolean load;
11000
11001 load = (inst.instruction & (1 << 20)) != 0;
11002
11003 if (mask & (1 << 13))
11004 inst.error = _("SP not allowed in register list");
11005
11006 if ((mask & (1 << base)) != 0
11007 && writeback)
11008 inst.error = _("having the base register in the register list when "
11009 "using write back is UNPREDICTABLE");
11010
11011 if (load)
11012 {
11013 if (mask & (1 << 15))
11014 {
11015 if (mask & (1 << 14))
11016 inst.error = _("LR and PC should not both be in register list");
11017 else
11018 set_it_insn_type_last ();
11019 }
11020 }
11021 else
11022 {
11023 if (mask & (1 << 15))
11024 inst.error = _("PC not allowed in register list");
11025 }
11026
11027 if ((mask & (mask - 1)) == 0)
11028 {
11029 /* Single register transfers implemented as str/ldr. */
11030 if (writeback)
11031 {
11032 if (inst.instruction & (1 << 23))
11033 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11034 else
11035 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11036 }
11037 else
11038 {
11039 if (inst.instruction & (1 << 23))
11040 inst.instruction = 0x00800000; /* ia -> [base] */
11041 else
11042 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11043 }
11044
11045 inst.instruction |= 0xf8400000;
11046 if (load)
11047 inst.instruction |= 0x00100000;
11048
11049 mask = ffs (mask) - 1;
11050 mask <<= 12;
11051 }
11052 else if (writeback)
11053 inst.instruction |= WRITE_BACK;
11054
11055 inst.instruction |= mask;
11056 inst.instruction |= base << 16;
11057 }
11058
11059 static void
11060 do_t_ldmstm (void)
11061 {
11062 /* This really doesn't seem worth it. */
11063 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11064 _("expression too complex"));
11065 constraint (inst.operands[1].writeback,
11066 _("Thumb load/store multiple does not support {reglist}^"));
11067
11068 if (unified_syntax)
11069 {
11070 bfd_boolean narrow;
11071 unsigned mask;
11072
11073 narrow = FALSE;
11074 /* See if we can use a 16-bit instruction. */
11075 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11076 && inst.size_req != 4
11077 && !(inst.operands[1].imm & ~0xff))
11078 {
11079 mask = 1 << inst.operands[0].reg;
11080
11081 if (inst.operands[0].reg <= 7)
11082 {
11083 if (inst.instruction == T_MNEM_stmia
11084 ? inst.operands[0].writeback
11085 : (inst.operands[0].writeback
11086 == !(inst.operands[1].imm & mask)))
11087 {
11088 if (inst.instruction == T_MNEM_stmia
11089 && (inst.operands[1].imm & mask)
11090 && (inst.operands[1].imm & (mask - 1)))
11091 as_warn (_("value stored for r%d is UNKNOWN"),
11092 inst.operands[0].reg);
11093
11094 inst.instruction = THUMB_OP16 (inst.instruction);
11095 inst.instruction |= inst.operands[0].reg << 8;
11096 inst.instruction |= inst.operands[1].imm;
11097 narrow = TRUE;
11098 }
11099 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11100 {
11101 /* This means 1 register in reg list one of 3 situations:
11102 1. Instruction is stmia, but without writeback.
11103 2. lmdia without writeback, but with Rn not in
11104 reglist.
11105 3. ldmia with writeback, but with Rn in reglist.
11106 Case 3 is UNPREDICTABLE behaviour, so we handle
11107 case 1 and 2 which can be converted into a 16-bit
11108 str or ldr. The SP cases are handled below. */
11109 unsigned long opcode;
11110 /* First, record an error for Case 3. */
11111 if (inst.operands[1].imm & mask
11112 && inst.operands[0].writeback)
11113 inst.error =
11114 _("having the base register in the register list when "
11115 "using write back is UNPREDICTABLE");
11116
11117 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11118 : T_MNEM_ldr);
11119 inst.instruction = THUMB_OP16 (opcode);
11120 inst.instruction |= inst.operands[0].reg << 3;
11121 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11122 narrow = TRUE;
11123 }
11124 }
11125 else if (inst.operands[0] .reg == REG_SP)
11126 {
11127 if (inst.operands[0].writeback)
11128 {
11129 inst.instruction =
11130 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11131 ? T_MNEM_push : T_MNEM_pop);
11132 inst.instruction |= inst.operands[1].imm;
11133 narrow = TRUE;
11134 }
11135 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11136 {
11137 inst.instruction =
11138 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11139 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11140 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11141 narrow = TRUE;
11142 }
11143 }
11144 }
11145
11146 if (!narrow)
11147 {
11148 if (inst.instruction < 0xffff)
11149 inst.instruction = THUMB_OP32 (inst.instruction);
11150
11151 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11152 inst.operands[0].writeback);
11153 }
11154 }
11155 else
11156 {
11157 constraint (inst.operands[0].reg > 7
11158 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11159 constraint (inst.instruction != T_MNEM_ldmia
11160 && inst.instruction != T_MNEM_stmia,
11161 _("Thumb-2 instruction only valid in unified syntax"));
11162 if (inst.instruction == T_MNEM_stmia)
11163 {
11164 if (!inst.operands[0].writeback)
11165 as_warn (_("this instruction will write back the base register"));
11166 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11167 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11168 as_warn (_("value stored for r%d is UNKNOWN"),
11169 inst.operands[0].reg);
11170 }
11171 else
11172 {
11173 if (!inst.operands[0].writeback
11174 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11175 as_warn (_("this instruction will write back the base register"));
11176 else if (inst.operands[0].writeback
11177 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11178 as_warn (_("this instruction will not write back the base register"));
11179 }
11180
11181 inst.instruction = THUMB_OP16 (inst.instruction);
11182 inst.instruction |= inst.operands[0].reg << 8;
11183 inst.instruction |= inst.operands[1].imm;
11184 }
11185 }
11186
11187 static void
11188 do_t_ldrex (void)
11189 {
11190 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11191 || inst.operands[1].postind || inst.operands[1].writeback
11192 || inst.operands[1].immisreg || inst.operands[1].shifted
11193 || inst.operands[1].negative,
11194 BAD_ADDR_MODE);
11195
11196 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11197
11198 inst.instruction |= inst.operands[0].reg << 12;
11199 inst.instruction |= inst.operands[1].reg << 16;
11200 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11201 }
11202
11203 static void
11204 do_t_ldrexd (void)
11205 {
11206 if (!inst.operands[1].present)
11207 {
11208 constraint (inst.operands[0].reg == REG_LR,
11209 _("r14 not allowed as first register "
11210 "when second register is omitted"));
11211 inst.operands[1].reg = inst.operands[0].reg + 1;
11212 }
11213 constraint (inst.operands[0].reg == inst.operands[1].reg,
11214 BAD_OVERLAP);
11215
11216 inst.instruction |= inst.operands[0].reg << 12;
11217 inst.instruction |= inst.operands[1].reg << 8;
11218 inst.instruction |= inst.operands[2].reg << 16;
11219 }
11220
11221 static void
11222 do_t_ldst (void)
11223 {
11224 unsigned long opcode;
11225 int Rn;
11226
11227 if (inst.operands[0].isreg
11228 && !inst.operands[0].preind
11229 && inst.operands[0].reg == REG_PC)
11230 set_it_insn_type_last ();
11231
11232 opcode = inst.instruction;
11233 if (unified_syntax)
11234 {
11235 if (!inst.operands[1].isreg)
11236 {
11237 if (opcode <= 0xffff)
11238 inst.instruction = THUMB_OP32 (opcode);
11239 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11240 return;
11241 }
11242 if (inst.operands[1].isreg
11243 && !inst.operands[1].writeback
11244 && !inst.operands[1].shifted && !inst.operands[1].postind
11245 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11246 && opcode <= 0xffff
11247 && inst.size_req != 4)
11248 {
11249 /* Insn may have a 16-bit form. */
11250 Rn = inst.operands[1].reg;
11251 if (inst.operands[1].immisreg)
11252 {
11253 inst.instruction = THUMB_OP16 (opcode);
11254 /* [Rn, Rik] */
11255 if (Rn <= 7 && inst.operands[1].imm <= 7)
11256 goto op16;
11257 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11258 reject_bad_reg (inst.operands[1].imm);
11259 }
11260 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11261 && opcode != T_MNEM_ldrsb)
11262 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11263 || (Rn == REG_SP && opcode == T_MNEM_str))
11264 {
11265 /* [Rn, #const] */
11266 if (Rn > 7)
11267 {
11268 if (Rn == REG_PC)
11269 {
11270 if (inst.reloc.pc_rel)
11271 opcode = T_MNEM_ldr_pc2;
11272 else
11273 opcode = T_MNEM_ldr_pc;
11274 }
11275 else
11276 {
11277 if (opcode == T_MNEM_ldr)
11278 opcode = T_MNEM_ldr_sp;
11279 else
11280 opcode = T_MNEM_str_sp;
11281 }
11282 inst.instruction = inst.operands[0].reg << 8;
11283 }
11284 else
11285 {
11286 inst.instruction = inst.operands[0].reg;
11287 inst.instruction |= inst.operands[1].reg << 3;
11288 }
11289 inst.instruction |= THUMB_OP16 (opcode);
11290 if (inst.size_req == 2)
11291 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11292 else
11293 inst.relax = opcode;
11294 return;
11295 }
11296 }
11297 /* Definitely a 32-bit variant. */
11298
11299 /* Warning for Erratum 752419. */
11300 if (opcode == T_MNEM_ldr
11301 && inst.operands[0].reg == REG_SP
11302 && inst.operands[1].writeback == 1
11303 && !inst.operands[1].immisreg)
11304 {
11305 if (no_cpu_selected ()
11306 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11307 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11308 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11309 as_warn (_("This instruction may be unpredictable "
11310 "if executed on M-profile cores "
11311 "with interrupts enabled."));
11312 }
11313
11314 /* Do some validations regarding addressing modes. */
11315 if (inst.operands[1].immisreg)
11316 reject_bad_reg (inst.operands[1].imm);
11317
11318 constraint (inst.operands[1].writeback == 1
11319 && inst.operands[0].reg == inst.operands[1].reg,
11320 BAD_OVERLAP);
11321
11322 inst.instruction = THUMB_OP32 (opcode);
11323 inst.instruction |= inst.operands[0].reg << 12;
11324 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11325 check_ldr_r15_aligned ();
11326 return;
11327 }
11328
11329 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11330
11331 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11332 {
11333 /* Only [Rn,Rm] is acceptable. */
11334 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11335 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11336 || inst.operands[1].postind || inst.operands[1].shifted
11337 || inst.operands[1].negative,
11338 _("Thumb does not support this addressing mode"));
11339 inst.instruction = THUMB_OP16 (inst.instruction);
11340 goto op16;
11341 }
11342
11343 inst.instruction = THUMB_OP16 (inst.instruction);
11344 if (!inst.operands[1].isreg)
11345 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11346 return;
11347
11348 constraint (!inst.operands[1].preind
11349 || inst.operands[1].shifted
11350 || inst.operands[1].writeback,
11351 _("Thumb does not support this addressing mode"));
11352 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11353 {
11354 constraint (inst.instruction & 0x0600,
11355 _("byte or halfword not valid for base register"));
11356 constraint (inst.operands[1].reg == REG_PC
11357 && !(inst.instruction & THUMB_LOAD_BIT),
11358 _("r15 based store not allowed"));
11359 constraint (inst.operands[1].immisreg,
11360 _("invalid base register for register offset"));
11361
11362 if (inst.operands[1].reg == REG_PC)
11363 inst.instruction = T_OPCODE_LDR_PC;
11364 else if (inst.instruction & THUMB_LOAD_BIT)
11365 inst.instruction = T_OPCODE_LDR_SP;
11366 else
11367 inst.instruction = T_OPCODE_STR_SP;
11368
11369 inst.instruction |= inst.operands[0].reg << 8;
11370 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11371 return;
11372 }
11373
11374 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11375 if (!inst.operands[1].immisreg)
11376 {
11377 /* Immediate offset. */
11378 inst.instruction |= inst.operands[0].reg;
11379 inst.instruction |= inst.operands[1].reg << 3;
11380 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11381 return;
11382 }
11383
11384 /* Register offset. */
11385 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11386 constraint (inst.operands[1].negative,
11387 _("Thumb does not support this addressing mode"));
11388
11389 op16:
11390 switch (inst.instruction)
11391 {
11392 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11393 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11394 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11395 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11396 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11397 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11398 case 0x5600 /* ldrsb */:
11399 case 0x5e00 /* ldrsh */: break;
11400 default: abort ();
11401 }
11402
11403 inst.instruction |= inst.operands[0].reg;
11404 inst.instruction |= inst.operands[1].reg << 3;
11405 inst.instruction |= inst.operands[1].imm << 6;
11406 }
11407
11408 static void
11409 do_t_ldstd (void)
11410 {
11411 if (!inst.operands[1].present)
11412 {
11413 inst.operands[1].reg = inst.operands[0].reg + 1;
11414 constraint (inst.operands[0].reg == REG_LR,
11415 _("r14 not allowed here"));
11416 constraint (inst.operands[0].reg == REG_R12,
11417 _("r12 not allowed here"));
11418 }
11419
11420 if (inst.operands[2].writeback
11421 && (inst.operands[0].reg == inst.operands[2].reg
11422 || inst.operands[1].reg == inst.operands[2].reg))
11423 as_warn (_("base register written back, and overlaps "
11424 "one of transfer registers"));
11425
11426 inst.instruction |= inst.operands[0].reg << 12;
11427 inst.instruction |= inst.operands[1].reg << 8;
11428 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11429 }
11430
11431 static void
11432 do_t_ldstt (void)
11433 {
11434 inst.instruction |= inst.operands[0].reg << 12;
11435 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11436 }
11437
11438 static void
11439 do_t_mla (void)
11440 {
11441 unsigned Rd, Rn, Rm, Ra;
11442
11443 Rd = inst.operands[0].reg;
11444 Rn = inst.operands[1].reg;
11445 Rm = inst.operands[2].reg;
11446 Ra = inst.operands[3].reg;
11447
11448 reject_bad_reg (Rd);
11449 reject_bad_reg (Rn);
11450 reject_bad_reg (Rm);
11451 reject_bad_reg (Ra);
11452
11453 inst.instruction |= Rd << 8;
11454 inst.instruction |= Rn << 16;
11455 inst.instruction |= Rm;
11456 inst.instruction |= Ra << 12;
11457 }
11458
11459 static void
11460 do_t_mlal (void)
11461 {
11462 unsigned RdLo, RdHi, Rn, Rm;
11463
11464 RdLo = inst.operands[0].reg;
11465 RdHi = inst.operands[1].reg;
11466 Rn = inst.operands[2].reg;
11467 Rm = inst.operands[3].reg;
11468
11469 reject_bad_reg (RdLo);
11470 reject_bad_reg (RdHi);
11471 reject_bad_reg (Rn);
11472 reject_bad_reg (Rm);
11473
11474 inst.instruction |= RdLo << 12;
11475 inst.instruction |= RdHi << 8;
11476 inst.instruction |= Rn << 16;
11477 inst.instruction |= Rm;
11478 }
11479
11480 static void
11481 do_t_mov_cmp (void)
11482 {
11483 unsigned Rn, Rm;
11484
11485 Rn = inst.operands[0].reg;
11486 Rm = inst.operands[1].reg;
11487
11488 if (Rn == REG_PC)
11489 set_it_insn_type_last ();
11490
11491 if (unified_syntax)
11492 {
11493 int r0off = (inst.instruction == T_MNEM_mov
11494 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11495 unsigned long opcode;
11496 bfd_boolean narrow;
11497 bfd_boolean low_regs;
11498
11499 low_regs = (Rn <= 7 && Rm <= 7);
11500 opcode = inst.instruction;
11501 if (in_it_block ())
11502 narrow = opcode != T_MNEM_movs;
11503 else
11504 narrow = opcode != T_MNEM_movs || low_regs;
11505 if (inst.size_req == 4
11506 || inst.operands[1].shifted)
11507 narrow = FALSE;
11508
11509 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11510 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11511 && !inst.operands[1].shifted
11512 && Rn == REG_PC
11513 && Rm == REG_LR)
11514 {
11515 inst.instruction = T2_SUBS_PC_LR;
11516 return;
11517 }
11518
11519 if (opcode == T_MNEM_cmp)
11520 {
11521 constraint (Rn == REG_PC, BAD_PC);
11522 if (narrow)
11523 {
11524 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11525 but valid. */
11526 warn_deprecated_sp (Rm);
11527 /* R15 was documented as a valid choice for Rm in ARMv6,
11528 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11529 tools reject R15, so we do too. */
11530 constraint (Rm == REG_PC, BAD_PC);
11531 }
11532 else
11533 reject_bad_reg (Rm);
11534 }
11535 else if (opcode == T_MNEM_mov
11536 || opcode == T_MNEM_movs)
11537 {
11538 if (inst.operands[1].isreg)
11539 {
11540 if (opcode == T_MNEM_movs)
11541 {
11542 reject_bad_reg (Rn);
11543 reject_bad_reg (Rm);
11544 }
11545 else if (narrow)
11546 {
11547 /* This is mov.n. */
11548 if ((Rn == REG_SP || Rn == REG_PC)
11549 && (Rm == REG_SP || Rm == REG_PC))
11550 {
11551 as_warn (_("Use of r%u as a source register is "
11552 "deprecated when r%u is the destination "
11553 "register."), Rm, Rn);
11554 }
11555 }
11556 else
11557 {
11558 /* This is mov.w. */
11559 constraint (Rn == REG_PC, BAD_PC);
11560 constraint (Rm == REG_PC, BAD_PC);
11561 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11562 }
11563 }
11564 else
11565 reject_bad_reg (Rn);
11566 }
11567
11568 if (!inst.operands[1].isreg)
11569 {
11570 /* Immediate operand. */
11571 if (!in_it_block () && opcode == T_MNEM_mov)
11572 narrow = 0;
11573 if (low_regs && narrow)
11574 {
11575 inst.instruction = THUMB_OP16 (opcode);
11576 inst.instruction |= Rn << 8;
11577 if (inst.size_req == 2)
11578 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11579 else
11580 inst.relax = opcode;
11581 }
11582 else
11583 {
11584 inst.instruction = THUMB_OP32 (inst.instruction);
11585 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11586 inst.instruction |= Rn << r0off;
11587 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11588 }
11589 }
11590 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11591 && (inst.instruction == T_MNEM_mov
11592 || inst.instruction == T_MNEM_movs))
11593 {
11594 /* Register shifts are encoded as separate shift instructions. */
11595 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11596
11597 if (in_it_block ())
11598 narrow = !flags;
11599 else
11600 narrow = flags;
11601
11602 if (inst.size_req == 4)
11603 narrow = FALSE;
11604
11605 if (!low_regs || inst.operands[1].imm > 7)
11606 narrow = FALSE;
11607
11608 if (Rn != Rm)
11609 narrow = FALSE;
11610
11611 switch (inst.operands[1].shift_kind)
11612 {
11613 case SHIFT_LSL:
11614 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11615 break;
11616 case SHIFT_ASR:
11617 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11618 break;
11619 case SHIFT_LSR:
11620 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11621 break;
11622 case SHIFT_ROR:
11623 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11624 break;
11625 default:
11626 abort ();
11627 }
11628
11629 inst.instruction = opcode;
11630 if (narrow)
11631 {
11632 inst.instruction |= Rn;
11633 inst.instruction |= inst.operands[1].imm << 3;
11634 }
11635 else
11636 {
11637 if (flags)
11638 inst.instruction |= CONDS_BIT;
11639
11640 inst.instruction |= Rn << 8;
11641 inst.instruction |= Rm << 16;
11642 inst.instruction |= inst.operands[1].imm;
11643 }
11644 }
11645 else if (!narrow)
11646 {
11647 /* Some mov with immediate shift have narrow variants.
11648 Register shifts are handled above. */
11649 if (low_regs && inst.operands[1].shifted
11650 && (inst.instruction == T_MNEM_mov
11651 || inst.instruction == T_MNEM_movs))
11652 {
11653 if (in_it_block ())
11654 narrow = (inst.instruction == T_MNEM_mov);
11655 else
11656 narrow = (inst.instruction == T_MNEM_movs);
11657 }
11658
11659 if (narrow)
11660 {
11661 switch (inst.operands[1].shift_kind)
11662 {
11663 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11664 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11665 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11666 default: narrow = FALSE; break;
11667 }
11668 }
11669
11670 if (narrow)
11671 {
11672 inst.instruction |= Rn;
11673 inst.instruction |= Rm << 3;
11674 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11675 }
11676 else
11677 {
11678 inst.instruction = THUMB_OP32 (inst.instruction);
11679 inst.instruction |= Rn << r0off;
11680 encode_thumb32_shifted_operand (1);
11681 }
11682 }
11683 else
11684 switch (inst.instruction)
11685 {
11686 case T_MNEM_mov:
11687 /* In v4t or v5t a move of two lowregs produces unpredictable
11688 results. Don't allow this. */
11689 if (low_regs)
11690 {
11691 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11692 "MOV Rd, Rs with two low registers is not "
11693 "permitted on this architecture");
11694 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11695 arm_ext_v6);
11696 }
11697
11698 inst.instruction = T_OPCODE_MOV_HR;
11699 inst.instruction |= (Rn & 0x8) << 4;
11700 inst.instruction |= (Rn & 0x7);
11701 inst.instruction |= Rm << 3;
11702 break;
11703
11704 case T_MNEM_movs:
11705 /* We know we have low registers at this point.
11706 Generate LSLS Rd, Rs, #0. */
11707 inst.instruction = T_OPCODE_LSL_I;
11708 inst.instruction |= Rn;
11709 inst.instruction |= Rm << 3;
11710 break;
11711
11712 case T_MNEM_cmp:
11713 if (low_regs)
11714 {
11715 inst.instruction = T_OPCODE_CMP_LR;
11716 inst.instruction |= Rn;
11717 inst.instruction |= Rm << 3;
11718 }
11719 else
11720 {
11721 inst.instruction = T_OPCODE_CMP_HR;
11722 inst.instruction |= (Rn & 0x8) << 4;
11723 inst.instruction |= (Rn & 0x7);
11724 inst.instruction |= Rm << 3;
11725 }
11726 break;
11727 }
11728 return;
11729 }
11730
11731 inst.instruction = THUMB_OP16 (inst.instruction);
11732
11733 /* PR 10443: Do not silently ignore shifted operands. */
11734 constraint (inst.operands[1].shifted,
11735 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11736
11737 if (inst.operands[1].isreg)
11738 {
11739 if (Rn < 8 && Rm < 8)
11740 {
11741 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11742 since a MOV instruction produces unpredictable results. */
11743 if (inst.instruction == T_OPCODE_MOV_I8)
11744 inst.instruction = T_OPCODE_ADD_I3;
11745 else
11746 inst.instruction = T_OPCODE_CMP_LR;
11747
11748 inst.instruction |= Rn;
11749 inst.instruction |= Rm << 3;
11750 }
11751 else
11752 {
11753 if (inst.instruction == T_OPCODE_MOV_I8)
11754 inst.instruction = T_OPCODE_MOV_HR;
11755 else
11756 inst.instruction = T_OPCODE_CMP_HR;
11757 do_t_cpy ();
11758 }
11759 }
11760 else
11761 {
11762 constraint (Rn > 7,
11763 _("only lo regs allowed with immediate"));
11764 inst.instruction |= Rn << 8;
11765 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11766 }
11767 }
11768
11769 static void
11770 do_t_mov16 (void)
11771 {
11772 unsigned Rd;
11773 bfd_vma imm;
11774 bfd_boolean top;
11775
11776 top = (inst.instruction & 0x00800000) != 0;
11777 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11778 {
11779 constraint (top, _(":lower16: not allowed this instruction"));
11780 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11781 }
11782 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11783 {
11784 constraint (!top, _(":upper16: not allowed this instruction"));
11785 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11786 }
11787
11788 Rd = inst.operands[0].reg;
11789 reject_bad_reg (Rd);
11790
11791 inst.instruction |= Rd << 8;
11792 if (inst.reloc.type == BFD_RELOC_UNUSED)
11793 {
11794 imm = inst.reloc.exp.X_add_number;
11795 inst.instruction |= (imm & 0xf000) << 4;
11796 inst.instruction |= (imm & 0x0800) << 15;
11797 inst.instruction |= (imm & 0x0700) << 4;
11798 inst.instruction |= (imm & 0x00ff);
11799 }
11800 }
11801
11802 static void
11803 do_t_mvn_tst (void)
11804 {
11805 unsigned Rn, Rm;
11806
11807 Rn = inst.operands[0].reg;
11808 Rm = inst.operands[1].reg;
11809
11810 if (inst.instruction == T_MNEM_cmp
11811 || inst.instruction == T_MNEM_cmn)
11812 constraint (Rn == REG_PC, BAD_PC);
11813 else
11814 reject_bad_reg (Rn);
11815 reject_bad_reg (Rm);
11816
11817 if (unified_syntax)
11818 {
11819 int r0off = (inst.instruction == T_MNEM_mvn
11820 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11821 bfd_boolean narrow;
11822
11823 if (inst.size_req == 4
11824 || inst.instruction > 0xffff
11825 || inst.operands[1].shifted
11826 || Rn > 7 || Rm > 7)
11827 narrow = FALSE;
11828 else if (inst.instruction == T_MNEM_cmn
11829 || inst.instruction == T_MNEM_tst)
11830 narrow = TRUE;
11831 else if (THUMB_SETS_FLAGS (inst.instruction))
11832 narrow = !in_it_block ();
11833 else
11834 narrow = in_it_block ();
11835
11836 if (!inst.operands[1].isreg)
11837 {
11838 /* For an immediate, we always generate a 32-bit opcode;
11839 section relaxation will shrink it later if possible. */
11840 if (inst.instruction < 0xffff)
11841 inst.instruction = THUMB_OP32 (inst.instruction);
11842 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11843 inst.instruction |= Rn << r0off;
11844 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11845 }
11846 else
11847 {
11848 /* See if we can do this with a 16-bit instruction. */
11849 if (narrow)
11850 {
11851 inst.instruction = THUMB_OP16 (inst.instruction);
11852 inst.instruction |= Rn;
11853 inst.instruction |= Rm << 3;
11854 }
11855 else
11856 {
11857 constraint (inst.operands[1].shifted
11858 && inst.operands[1].immisreg,
11859 _("shift must be constant"));
11860 if (inst.instruction < 0xffff)
11861 inst.instruction = THUMB_OP32 (inst.instruction);
11862 inst.instruction |= Rn << r0off;
11863 encode_thumb32_shifted_operand (1);
11864 }
11865 }
11866 }
11867 else
11868 {
11869 constraint (inst.instruction > 0xffff
11870 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11871 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11872 _("unshifted register required"));
11873 constraint (Rn > 7 || Rm > 7,
11874 BAD_HIREG);
11875
11876 inst.instruction = THUMB_OP16 (inst.instruction);
11877 inst.instruction |= Rn;
11878 inst.instruction |= Rm << 3;
11879 }
11880 }
11881
11882 static void
11883 do_t_mrs (void)
11884 {
11885 unsigned Rd;
11886
11887 if (do_vfp_nsyn_mrs () == SUCCESS)
11888 return;
11889
11890 Rd = inst.operands[0].reg;
11891 reject_bad_reg (Rd);
11892 inst.instruction |= Rd << 8;
11893
11894 if (inst.operands[1].isreg)
11895 {
11896 unsigned br = inst.operands[1].reg;
11897 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11898 as_bad (_("bad register for mrs"));
11899
11900 inst.instruction |= br & (0xf << 16);
11901 inst.instruction |= (br & 0x300) >> 4;
11902 inst.instruction |= (br & SPSR_BIT) >> 2;
11903 }
11904 else
11905 {
11906 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11907
11908 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11909 {
11910 /* PR gas/12698: The constraint is only applied for m_profile.
11911 If the user has specified -march=all, we want to ignore it as
11912 we are building for any CPU type, including non-m variants. */
11913 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11914 constraint ((flags != 0) && m_profile, _("selected processor does "
11915 "not support requested special purpose register"));
11916 }
11917 else
11918 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11919 devices). */
11920 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11921 _("'APSR', 'CPSR' or 'SPSR' expected"));
11922
11923 inst.instruction |= (flags & SPSR_BIT) >> 2;
11924 inst.instruction |= inst.operands[1].imm & 0xff;
11925 inst.instruction |= 0xf0000;
11926 }
11927 }
11928
11929 static void
11930 do_t_msr (void)
11931 {
11932 int flags;
11933 unsigned Rn;
11934
11935 if (do_vfp_nsyn_msr () == SUCCESS)
11936 return;
11937
11938 constraint (!inst.operands[1].isreg,
11939 _("Thumb encoding does not support an immediate here"));
11940
11941 if (inst.operands[0].isreg)
11942 flags = (int)(inst.operands[0].reg);
11943 else
11944 flags = inst.operands[0].imm;
11945
11946 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11947 {
11948 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11949
11950 /* PR gas/12698: The constraint is only applied for m_profile.
11951 If the user has specified -march=all, we want to ignore it as
11952 we are building for any CPU type, including non-m variants. */
11953 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11954 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11955 && (bits & ~(PSR_s | PSR_f)) != 0)
11956 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11957 && bits != PSR_f)) && m_profile,
11958 _("selected processor does not support requested special "
11959 "purpose register"));
11960 }
11961 else
11962 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11963 "requested special purpose register"));
11964
11965 Rn = inst.operands[1].reg;
11966 reject_bad_reg (Rn);
11967
11968 inst.instruction |= (flags & SPSR_BIT) >> 2;
11969 inst.instruction |= (flags & 0xf0000) >> 8;
11970 inst.instruction |= (flags & 0x300) >> 4;
11971 inst.instruction |= (flags & 0xff);
11972 inst.instruction |= Rn << 16;
11973 }
11974
11975 static void
11976 do_t_mul (void)
11977 {
11978 bfd_boolean narrow;
11979 unsigned Rd, Rn, Rm;
11980
11981 if (!inst.operands[2].present)
11982 inst.operands[2].reg = inst.operands[0].reg;
11983
11984 Rd = inst.operands[0].reg;
11985 Rn = inst.operands[1].reg;
11986 Rm = inst.operands[2].reg;
11987
11988 if (unified_syntax)
11989 {
11990 if (inst.size_req == 4
11991 || (Rd != Rn
11992 && Rd != Rm)
11993 || Rn > 7
11994 || Rm > 7)
11995 narrow = FALSE;
11996 else if (inst.instruction == T_MNEM_muls)
11997 narrow = !in_it_block ();
11998 else
11999 narrow = in_it_block ();
12000 }
12001 else
12002 {
12003 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12004 constraint (Rn > 7 || Rm > 7,
12005 BAD_HIREG);
12006 narrow = TRUE;
12007 }
12008
12009 if (narrow)
12010 {
12011 /* 16-bit MULS/Conditional MUL. */
12012 inst.instruction = THUMB_OP16 (inst.instruction);
12013 inst.instruction |= Rd;
12014
12015 if (Rd == Rn)
12016 inst.instruction |= Rm << 3;
12017 else if (Rd == Rm)
12018 inst.instruction |= Rn << 3;
12019 else
12020 constraint (1, _("dest must overlap one source register"));
12021 }
12022 else
12023 {
12024 constraint (inst.instruction != T_MNEM_mul,
12025 _("Thumb-2 MUL must not set flags"));
12026 /* 32-bit MUL. */
12027 inst.instruction = THUMB_OP32 (inst.instruction);
12028 inst.instruction |= Rd << 8;
12029 inst.instruction |= Rn << 16;
12030 inst.instruction |= Rm << 0;
12031
12032 reject_bad_reg (Rd);
12033 reject_bad_reg (Rn);
12034 reject_bad_reg (Rm);
12035 }
12036 }
12037
12038 static void
12039 do_t_mull (void)
12040 {
12041 unsigned RdLo, RdHi, Rn, Rm;
12042
12043 RdLo = inst.operands[0].reg;
12044 RdHi = inst.operands[1].reg;
12045 Rn = inst.operands[2].reg;
12046 Rm = inst.operands[3].reg;
12047
12048 reject_bad_reg (RdLo);
12049 reject_bad_reg (RdHi);
12050 reject_bad_reg (Rn);
12051 reject_bad_reg (Rm);
12052
12053 inst.instruction |= RdLo << 12;
12054 inst.instruction |= RdHi << 8;
12055 inst.instruction |= Rn << 16;
12056 inst.instruction |= Rm;
12057
12058 if (RdLo == RdHi)
12059 as_tsktsk (_("rdhi and rdlo must be different"));
12060 }
12061
12062 static void
12063 do_t_nop (void)
12064 {
12065 set_it_insn_type (NEUTRAL_IT_INSN);
12066
12067 if (unified_syntax)
12068 {
12069 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12070 {
12071 inst.instruction = THUMB_OP32 (inst.instruction);
12072 inst.instruction |= inst.operands[0].imm;
12073 }
12074 else
12075 {
12076 /* PR9722: Check for Thumb2 availability before
12077 generating a thumb2 nop instruction. */
12078 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12079 {
12080 inst.instruction = THUMB_OP16 (inst.instruction);
12081 inst.instruction |= inst.operands[0].imm << 4;
12082 }
12083 else
12084 inst.instruction = 0x46c0;
12085 }
12086 }
12087 else
12088 {
12089 constraint (inst.operands[0].present,
12090 _("Thumb does not support NOP with hints"));
12091 inst.instruction = 0x46c0;
12092 }
12093 }
12094
12095 static void
12096 do_t_neg (void)
12097 {
12098 if (unified_syntax)
12099 {
12100 bfd_boolean narrow;
12101
12102 if (THUMB_SETS_FLAGS (inst.instruction))
12103 narrow = !in_it_block ();
12104 else
12105 narrow = in_it_block ();
12106 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12107 narrow = FALSE;
12108 if (inst.size_req == 4)
12109 narrow = FALSE;
12110
12111 if (!narrow)
12112 {
12113 inst.instruction = THUMB_OP32 (inst.instruction);
12114 inst.instruction |= inst.operands[0].reg << 8;
12115 inst.instruction |= inst.operands[1].reg << 16;
12116 }
12117 else
12118 {
12119 inst.instruction = THUMB_OP16 (inst.instruction);
12120 inst.instruction |= inst.operands[0].reg;
12121 inst.instruction |= inst.operands[1].reg << 3;
12122 }
12123 }
12124 else
12125 {
12126 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12127 BAD_HIREG);
12128 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12129
12130 inst.instruction = THUMB_OP16 (inst.instruction);
12131 inst.instruction |= inst.operands[0].reg;
12132 inst.instruction |= inst.operands[1].reg << 3;
12133 }
12134 }
12135
12136 static void
12137 do_t_orn (void)
12138 {
12139 unsigned Rd, Rn;
12140
12141 Rd = inst.operands[0].reg;
12142 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12143
12144 reject_bad_reg (Rd);
12145 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12146 reject_bad_reg (Rn);
12147
12148 inst.instruction |= Rd << 8;
12149 inst.instruction |= Rn << 16;
12150
12151 if (!inst.operands[2].isreg)
12152 {
12153 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12154 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12155 }
12156 else
12157 {
12158 unsigned Rm;
12159
12160 Rm = inst.operands[2].reg;
12161 reject_bad_reg (Rm);
12162
12163 constraint (inst.operands[2].shifted
12164 && inst.operands[2].immisreg,
12165 _("shift must be constant"));
12166 encode_thumb32_shifted_operand (2);
12167 }
12168 }
12169
12170 static void
12171 do_t_pkhbt (void)
12172 {
12173 unsigned Rd, Rn, Rm;
12174
12175 Rd = inst.operands[0].reg;
12176 Rn = inst.operands[1].reg;
12177 Rm = inst.operands[2].reg;
12178
12179 reject_bad_reg (Rd);
12180 reject_bad_reg (Rn);
12181 reject_bad_reg (Rm);
12182
12183 inst.instruction |= Rd << 8;
12184 inst.instruction |= Rn << 16;
12185 inst.instruction |= Rm;
12186 if (inst.operands[3].present)
12187 {
12188 unsigned int val = inst.reloc.exp.X_add_number;
12189 constraint (inst.reloc.exp.X_op != O_constant,
12190 _("expression too complex"));
12191 inst.instruction |= (val & 0x1c) << 10;
12192 inst.instruction |= (val & 0x03) << 6;
12193 }
12194 }
12195
12196 static void
12197 do_t_pkhtb (void)
12198 {
12199 if (!inst.operands[3].present)
12200 {
12201 unsigned Rtmp;
12202
12203 inst.instruction &= ~0x00000020;
12204
12205 /* PR 10168. Swap the Rm and Rn registers. */
12206 Rtmp = inst.operands[1].reg;
12207 inst.operands[1].reg = inst.operands[2].reg;
12208 inst.operands[2].reg = Rtmp;
12209 }
12210 do_t_pkhbt ();
12211 }
12212
12213 static void
12214 do_t_pld (void)
12215 {
12216 if (inst.operands[0].immisreg)
12217 reject_bad_reg (inst.operands[0].imm);
12218
12219 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12220 }
12221
12222 static void
12223 do_t_push_pop (void)
12224 {
12225 unsigned mask;
12226
12227 constraint (inst.operands[0].writeback,
12228 _("push/pop do not support {reglist}^"));
12229 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12230 _("expression too complex"));
12231
12232 mask = inst.operands[0].imm;
12233 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12234 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12235 else if (inst.size_req != 4
12236 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12237 ? REG_LR : REG_PC)))
12238 {
12239 inst.instruction = THUMB_OP16 (inst.instruction);
12240 inst.instruction |= THUMB_PP_PC_LR;
12241 inst.instruction |= mask & 0xff;
12242 }
12243 else if (unified_syntax)
12244 {
12245 inst.instruction = THUMB_OP32 (inst.instruction);
12246 encode_thumb2_ldmstm (13, mask, TRUE);
12247 }
12248 else
12249 {
12250 inst.error = _("invalid register list to push/pop instruction");
12251 return;
12252 }
12253 }
12254
12255 static void
12256 do_t_rbit (void)
12257 {
12258 unsigned Rd, Rm;
12259
12260 Rd = inst.operands[0].reg;
12261 Rm = inst.operands[1].reg;
12262
12263 reject_bad_reg (Rd);
12264 reject_bad_reg (Rm);
12265
12266 inst.instruction |= Rd << 8;
12267 inst.instruction |= Rm << 16;
12268 inst.instruction |= Rm;
12269 }
12270
12271 static void
12272 do_t_rev (void)
12273 {
12274 unsigned Rd, Rm;
12275
12276 Rd = inst.operands[0].reg;
12277 Rm = inst.operands[1].reg;
12278
12279 reject_bad_reg (Rd);
12280 reject_bad_reg (Rm);
12281
12282 if (Rd <= 7 && Rm <= 7
12283 && inst.size_req != 4)
12284 {
12285 inst.instruction = THUMB_OP16 (inst.instruction);
12286 inst.instruction |= Rd;
12287 inst.instruction |= Rm << 3;
12288 }
12289 else if (unified_syntax)
12290 {
12291 inst.instruction = THUMB_OP32 (inst.instruction);
12292 inst.instruction |= Rd << 8;
12293 inst.instruction |= Rm << 16;
12294 inst.instruction |= Rm;
12295 }
12296 else
12297 inst.error = BAD_HIREG;
12298 }
12299
12300 static void
12301 do_t_rrx (void)
12302 {
12303 unsigned Rd, Rm;
12304
12305 Rd = inst.operands[0].reg;
12306 Rm = inst.operands[1].reg;
12307
12308 reject_bad_reg (Rd);
12309 reject_bad_reg (Rm);
12310
12311 inst.instruction |= Rd << 8;
12312 inst.instruction |= Rm;
12313 }
12314
12315 static void
12316 do_t_rsb (void)
12317 {
12318 unsigned Rd, Rs;
12319
12320 Rd = inst.operands[0].reg;
12321 Rs = (inst.operands[1].present
12322 ? inst.operands[1].reg /* Rd, Rs, foo */
12323 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12324
12325 reject_bad_reg (Rd);
12326 reject_bad_reg (Rs);
12327 if (inst.operands[2].isreg)
12328 reject_bad_reg (inst.operands[2].reg);
12329
12330 inst.instruction |= Rd << 8;
12331 inst.instruction |= Rs << 16;
12332 if (!inst.operands[2].isreg)
12333 {
12334 bfd_boolean narrow;
12335
12336 if ((inst.instruction & 0x00100000) != 0)
12337 narrow = !in_it_block ();
12338 else
12339 narrow = in_it_block ();
12340
12341 if (Rd > 7 || Rs > 7)
12342 narrow = FALSE;
12343
12344 if (inst.size_req == 4 || !unified_syntax)
12345 narrow = FALSE;
12346
12347 if (inst.reloc.exp.X_op != O_constant
12348 || inst.reloc.exp.X_add_number != 0)
12349 narrow = FALSE;
12350
12351 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12352 relaxation, but it doesn't seem worth the hassle. */
12353 if (narrow)
12354 {
12355 inst.reloc.type = BFD_RELOC_UNUSED;
12356 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12357 inst.instruction |= Rs << 3;
12358 inst.instruction |= Rd;
12359 }
12360 else
12361 {
12362 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12363 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12364 }
12365 }
12366 else
12367 encode_thumb32_shifted_operand (2);
12368 }
12369
12370 static void
12371 do_t_setend (void)
12372 {
12373 if (warn_on_deprecated
12374 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12375 as_warn (_("setend use is deprecated for ARMv8"));
12376
12377 set_it_insn_type (OUTSIDE_IT_INSN);
12378 if (inst.operands[0].imm)
12379 inst.instruction |= 0x8;
12380 }
12381
12382 static void
12383 do_t_shift (void)
12384 {
12385 if (!inst.operands[1].present)
12386 inst.operands[1].reg = inst.operands[0].reg;
12387
12388 if (unified_syntax)
12389 {
12390 bfd_boolean narrow;
12391 int shift_kind;
12392
12393 switch (inst.instruction)
12394 {
12395 case T_MNEM_asr:
12396 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12397 case T_MNEM_lsl:
12398 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12399 case T_MNEM_lsr:
12400 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12401 case T_MNEM_ror:
12402 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12403 default: abort ();
12404 }
12405
12406 if (THUMB_SETS_FLAGS (inst.instruction))
12407 narrow = !in_it_block ();
12408 else
12409 narrow = in_it_block ();
12410 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12411 narrow = FALSE;
12412 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12413 narrow = FALSE;
12414 if (inst.operands[2].isreg
12415 && (inst.operands[1].reg != inst.operands[0].reg
12416 || inst.operands[2].reg > 7))
12417 narrow = FALSE;
12418 if (inst.size_req == 4)
12419 narrow = FALSE;
12420
12421 reject_bad_reg (inst.operands[0].reg);
12422 reject_bad_reg (inst.operands[1].reg);
12423
12424 if (!narrow)
12425 {
12426 if (inst.operands[2].isreg)
12427 {
12428 reject_bad_reg (inst.operands[2].reg);
12429 inst.instruction = THUMB_OP32 (inst.instruction);
12430 inst.instruction |= inst.operands[0].reg << 8;
12431 inst.instruction |= inst.operands[1].reg << 16;
12432 inst.instruction |= inst.operands[2].reg;
12433
12434 /* PR 12854: Error on extraneous shifts. */
12435 constraint (inst.operands[2].shifted,
12436 _("extraneous shift as part of operand to shift insn"));
12437 }
12438 else
12439 {
12440 inst.operands[1].shifted = 1;
12441 inst.operands[1].shift_kind = shift_kind;
12442 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12443 ? T_MNEM_movs : T_MNEM_mov);
12444 inst.instruction |= inst.operands[0].reg << 8;
12445 encode_thumb32_shifted_operand (1);
12446 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12447 inst.reloc.type = BFD_RELOC_UNUSED;
12448 }
12449 }
12450 else
12451 {
12452 if (inst.operands[2].isreg)
12453 {
12454 switch (shift_kind)
12455 {
12456 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12457 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12458 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12459 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12460 default: abort ();
12461 }
12462
12463 inst.instruction |= inst.operands[0].reg;
12464 inst.instruction |= inst.operands[2].reg << 3;
12465
12466 /* PR 12854: Error on extraneous shifts. */
12467 constraint (inst.operands[2].shifted,
12468 _("extraneous shift as part of operand to shift insn"));
12469 }
12470 else
12471 {
12472 switch (shift_kind)
12473 {
12474 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12475 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12476 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12477 default: abort ();
12478 }
12479 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12480 inst.instruction |= inst.operands[0].reg;
12481 inst.instruction |= inst.operands[1].reg << 3;
12482 }
12483 }
12484 }
12485 else
12486 {
12487 constraint (inst.operands[0].reg > 7
12488 || inst.operands[1].reg > 7, BAD_HIREG);
12489 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12490
12491 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12492 {
12493 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12494 constraint (inst.operands[0].reg != inst.operands[1].reg,
12495 _("source1 and dest must be same register"));
12496
12497 switch (inst.instruction)
12498 {
12499 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12500 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12501 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12502 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12503 default: abort ();
12504 }
12505
12506 inst.instruction |= inst.operands[0].reg;
12507 inst.instruction |= inst.operands[2].reg << 3;
12508
12509 /* PR 12854: Error on extraneous shifts. */
12510 constraint (inst.operands[2].shifted,
12511 _("extraneous shift as part of operand to shift insn"));
12512 }
12513 else
12514 {
12515 switch (inst.instruction)
12516 {
12517 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12518 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12519 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12520 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12521 default: abort ();
12522 }
12523 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12524 inst.instruction |= inst.operands[0].reg;
12525 inst.instruction |= inst.operands[1].reg << 3;
12526 }
12527 }
12528 }
12529
12530 static void
12531 do_t_simd (void)
12532 {
12533 unsigned Rd, Rn, Rm;
12534
12535 Rd = inst.operands[0].reg;
12536 Rn = inst.operands[1].reg;
12537 Rm = inst.operands[2].reg;
12538
12539 reject_bad_reg (Rd);
12540 reject_bad_reg (Rn);
12541 reject_bad_reg (Rm);
12542
12543 inst.instruction |= Rd << 8;
12544 inst.instruction |= Rn << 16;
12545 inst.instruction |= Rm;
12546 }
12547
12548 static void
12549 do_t_simd2 (void)
12550 {
12551 unsigned Rd, Rn, Rm;
12552
12553 Rd = inst.operands[0].reg;
12554 Rm = inst.operands[1].reg;
12555 Rn = inst.operands[2].reg;
12556
12557 reject_bad_reg (Rd);
12558 reject_bad_reg (Rn);
12559 reject_bad_reg (Rm);
12560
12561 inst.instruction |= Rd << 8;
12562 inst.instruction |= Rn << 16;
12563 inst.instruction |= Rm;
12564 }
12565
12566 static void
12567 do_t_smc (void)
12568 {
12569 unsigned int value = inst.reloc.exp.X_add_number;
12570 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12571 _("SMC is not permitted on this architecture"));
12572 constraint (inst.reloc.exp.X_op != O_constant,
12573 _("expression too complex"));
12574 inst.reloc.type = BFD_RELOC_UNUSED;
12575 inst.instruction |= (value & 0xf000) >> 12;
12576 inst.instruction |= (value & 0x0ff0);
12577 inst.instruction |= (value & 0x000f) << 16;
12578 /* PR gas/15623: SMC instructions must be last in an IT block. */
12579 set_it_insn_type_last ();
12580 }
12581
12582 static void
12583 do_t_hvc (void)
12584 {
12585 unsigned int value = inst.reloc.exp.X_add_number;
12586
12587 inst.reloc.type = BFD_RELOC_UNUSED;
12588 inst.instruction |= (value & 0x0fff);
12589 inst.instruction |= (value & 0xf000) << 4;
12590 }
12591
12592 static void
12593 do_t_ssat_usat (int bias)
12594 {
12595 unsigned Rd, Rn;
12596
12597 Rd = inst.operands[0].reg;
12598 Rn = inst.operands[2].reg;
12599
12600 reject_bad_reg (Rd);
12601 reject_bad_reg (Rn);
12602
12603 inst.instruction |= Rd << 8;
12604 inst.instruction |= inst.operands[1].imm - bias;
12605 inst.instruction |= Rn << 16;
12606
12607 if (inst.operands[3].present)
12608 {
12609 offsetT shift_amount = inst.reloc.exp.X_add_number;
12610
12611 inst.reloc.type = BFD_RELOC_UNUSED;
12612
12613 constraint (inst.reloc.exp.X_op != O_constant,
12614 _("expression too complex"));
12615
12616 if (shift_amount != 0)
12617 {
12618 constraint (shift_amount > 31,
12619 _("shift expression is too large"));
12620
12621 if (inst.operands[3].shift_kind == SHIFT_ASR)
12622 inst.instruction |= 0x00200000; /* sh bit. */
12623
12624 inst.instruction |= (shift_amount & 0x1c) << 10;
12625 inst.instruction |= (shift_amount & 0x03) << 6;
12626 }
12627 }
12628 }
12629
12630 static void
12631 do_t_ssat (void)
12632 {
12633 do_t_ssat_usat (1);
12634 }
12635
12636 static void
12637 do_t_ssat16 (void)
12638 {
12639 unsigned Rd, Rn;
12640
12641 Rd = inst.operands[0].reg;
12642 Rn = inst.operands[2].reg;
12643
12644 reject_bad_reg (Rd);
12645 reject_bad_reg (Rn);
12646
12647 inst.instruction |= Rd << 8;
12648 inst.instruction |= inst.operands[1].imm - 1;
12649 inst.instruction |= Rn << 16;
12650 }
12651
12652 static void
12653 do_t_strex (void)
12654 {
12655 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12656 || inst.operands[2].postind || inst.operands[2].writeback
12657 || inst.operands[2].immisreg || inst.operands[2].shifted
12658 || inst.operands[2].negative,
12659 BAD_ADDR_MODE);
12660
12661 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12662
12663 inst.instruction |= inst.operands[0].reg << 8;
12664 inst.instruction |= inst.operands[1].reg << 12;
12665 inst.instruction |= inst.operands[2].reg << 16;
12666 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12667 }
12668
12669 static void
12670 do_t_strexd (void)
12671 {
12672 if (!inst.operands[2].present)
12673 inst.operands[2].reg = inst.operands[1].reg + 1;
12674
12675 constraint (inst.operands[0].reg == inst.operands[1].reg
12676 || inst.operands[0].reg == inst.operands[2].reg
12677 || inst.operands[0].reg == inst.operands[3].reg,
12678 BAD_OVERLAP);
12679
12680 inst.instruction |= inst.operands[0].reg;
12681 inst.instruction |= inst.operands[1].reg << 12;
12682 inst.instruction |= inst.operands[2].reg << 8;
12683 inst.instruction |= inst.operands[3].reg << 16;
12684 }
12685
12686 static void
12687 do_t_sxtah (void)
12688 {
12689 unsigned Rd, Rn, Rm;
12690
12691 Rd = inst.operands[0].reg;
12692 Rn = inst.operands[1].reg;
12693 Rm = inst.operands[2].reg;
12694
12695 reject_bad_reg (Rd);
12696 reject_bad_reg (Rn);
12697 reject_bad_reg (Rm);
12698
12699 inst.instruction |= Rd << 8;
12700 inst.instruction |= Rn << 16;
12701 inst.instruction |= Rm;
12702 inst.instruction |= inst.operands[3].imm << 4;
12703 }
12704
12705 static void
12706 do_t_sxth (void)
12707 {
12708 unsigned Rd, Rm;
12709
12710 Rd = inst.operands[0].reg;
12711 Rm = inst.operands[1].reg;
12712
12713 reject_bad_reg (Rd);
12714 reject_bad_reg (Rm);
12715
12716 if (inst.instruction <= 0xffff
12717 && inst.size_req != 4
12718 && Rd <= 7 && Rm <= 7
12719 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12720 {
12721 inst.instruction = THUMB_OP16 (inst.instruction);
12722 inst.instruction |= Rd;
12723 inst.instruction |= Rm << 3;
12724 }
12725 else if (unified_syntax)
12726 {
12727 if (inst.instruction <= 0xffff)
12728 inst.instruction = THUMB_OP32 (inst.instruction);
12729 inst.instruction |= Rd << 8;
12730 inst.instruction |= Rm;
12731 inst.instruction |= inst.operands[2].imm << 4;
12732 }
12733 else
12734 {
12735 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12736 _("Thumb encoding does not support rotation"));
12737 constraint (1, BAD_HIREG);
12738 }
12739 }
12740
12741 static void
12742 do_t_swi (void)
12743 {
12744 /* We have to do the following check manually as ARM_EXT_OS only applies
12745 to ARM_EXT_V6M. */
12746 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12747 {
12748 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12749 /* This only applies to the v6m howver, not later architectures. */
12750 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12751 as_bad (_("SVC is not permitted on this architecture"));
12752 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12753 }
12754
12755 inst.reloc.type = BFD_RELOC_ARM_SWI;
12756 }
12757
12758 static void
12759 do_t_tb (void)
12760 {
12761 unsigned Rn, Rm;
12762 int half;
12763
12764 half = (inst.instruction & 0x10) != 0;
12765 set_it_insn_type_last ();
12766 constraint (inst.operands[0].immisreg,
12767 _("instruction requires register index"));
12768
12769 Rn = inst.operands[0].reg;
12770 Rm = inst.operands[0].imm;
12771
12772 constraint (Rn == REG_SP, BAD_SP);
12773 reject_bad_reg (Rm);
12774
12775 constraint (!half && inst.operands[0].shifted,
12776 _("instruction does not allow shifted index"));
12777 inst.instruction |= (Rn << 16) | Rm;
12778 }
12779
12780 static void
12781 do_t_udf (void)
12782 {
12783 if (!inst.operands[0].present)
12784 inst.operands[0].imm = 0;
12785
12786 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12787 {
12788 constraint (inst.size_req == 2,
12789 _("immediate value out of range"));
12790 inst.instruction = THUMB_OP32 (inst.instruction);
12791 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12792 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12793 }
12794 else
12795 {
12796 inst.instruction = THUMB_OP16 (inst.instruction);
12797 inst.instruction |= inst.operands[0].imm;
12798 }
12799
12800 set_it_insn_type (NEUTRAL_IT_INSN);
12801 }
12802
12803
12804 static void
12805 do_t_usat (void)
12806 {
12807 do_t_ssat_usat (0);
12808 }
12809
12810 static void
12811 do_t_usat16 (void)
12812 {
12813 unsigned Rd, Rn;
12814
12815 Rd = inst.operands[0].reg;
12816 Rn = inst.operands[2].reg;
12817
12818 reject_bad_reg (Rd);
12819 reject_bad_reg (Rn);
12820
12821 inst.instruction |= Rd << 8;
12822 inst.instruction |= inst.operands[1].imm;
12823 inst.instruction |= Rn << 16;
12824 }
12825
12826 /* Neon instruction encoder helpers. */
12827
12828 /* Encodings for the different types for various Neon opcodes. */
12829
12830 /* An "invalid" code for the following tables. */
12831 #define N_INV -1u
12832
12833 struct neon_tab_entry
12834 {
12835 unsigned integer;
12836 unsigned float_or_poly;
12837 unsigned scalar_or_imm;
12838 };
12839
12840 /* Map overloaded Neon opcodes to their respective encodings. */
12841 #define NEON_ENC_TAB \
12842 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12843 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12844 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12845 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12846 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12847 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12848 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12849 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12850 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12851 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12852 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12853 /* Register variants of the following two instructions are encoded as
12854 vcge / vcgt with the operands reversed. */ \
12855 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12856 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12857 X(vfma, N_INV, 0x0000c10, N_INV), \
12858 X(vfms, N_INV, 0x0200c10, N_INV), \
12859 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12860 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12861 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12862 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12863 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12864 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12865 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12866 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12867 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12868 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12869 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12870 X(vshl, 0x0000400, N_INV, 0x0800510), \
12871 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12872 X(vand, 0x0000110, N_INV, 0x0800030), \
12873 X(vbic, 0x0100110, N_INV, 0x0800030), \
12874 X(veor, 0x1000110, N_INV, N_INV), \
12875 X(vorn, 0x0300110, N_INV, 0x0800010), \
12876 X(vorr, 0x0200110, N_INV, 0x0800010), \
12877 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12878 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12879 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12880 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12881 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12882 X(vst1, 0x0000000, 0x0800000, N_INV), \
12883 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12884 X(vst2, 0x0000100, 0x0800100, N_INV), \
12885 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12886 X(vst3, 0x0000200, 0x0800200, N_INV), \
12887 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12888 X(vst4, 0x0000300, 0x0800300, N_INV), \
12889 X(vmovn, 0x1b20200, N_INV, N_INV), \
12890 X(vtrn, 0x1b20080, N_INV, N_INV), \
12891 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12892 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12893 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12894 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12895 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12896 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12897 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12898 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12899 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12900 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12901 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12902 X(vseleq, 0xe000a00, N_INV, N_INV), \
12903 X(vselvs, 0xe100a00, N_INV, N_INV), \
12904 X(vselge, 0xe200a00, N_INV, N_INV), \
12905 X(vselgt, 0xe300a00, N_INV, N_INV), \
12906 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12907 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12908 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12909 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12910 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12911 X(aes, 0x3b00300, N_INV, N_INV), \
12912 X(sha3op, 0x2000c00, N_INV, N_INV), \
12913 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12914 X(sha2op, 0x3ba0380, N_INV, N_INV)
12915
12916 enum neon_opc
12917 {
12918 #define X(OPC,I,F,S) N_MNEM_##OPC
12919 NEON_ENC_TAB
12920 #undef X
12921 };
12922
12923 static const struct neon_tab_entry neon_enc_tab[] =
12924 {
12925 #define X(OPC,I,F,S) { (I), (F), (S) }
12926 NEON_ENC_TAB
12927 #undef X
12928 };
12929
12930 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12931 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12932 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12933 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12934 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12935 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12936 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12937 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12938 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12939 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12940 #define NEON_ENC_SINGLE_(X) \
12941 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12942 #define NEON_ENC_DOUBLE_(X) \
12943 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12944 #define NEON_ENC_FPV8_(X) \
12945 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12946
12947 #define NEON_ENCODE(type, inst) \
12948 do \
12949 { \
12950 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12951 inst.is_neon = 1; \
12952 } \
12953 while (0)
12954
12955 #define check_neon_suffixes \
12956 do \
12957 { \
12958 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12959 { \
12960 as_bad (_("invalid neon suffix for non neon instruction")); \
12961 return; \
12962 } \
12963 } \
12964 while (0)
12965
12966 /* Define shapes for instruction operands. The following mnemonic characters
12967 are used in this table:
12968
12969 F - VFP S<n> register
12970 D - Neon D<n> register
12971 Q - Neon Q<n> register
12972 I - Immediate
12973 S - Scalar
12974 R - ARM register
12975 L - D<n> register list
12976
12977 This table is used to generate various data:
12978 - enumerations of the form NS_DDR to be used as arguments to
12979 neon_select_shape.
12980 - a table classifying shapes into single, double, quad, mixed.
12981 - a table used to drive neon_select_shape. */
12982
12983 #define NEON_SHAPE_DEF \
12984 X(3, (D, D, D), DOUBLE), \
12985 X(3, (Q, Q, Q), QUAD), \
12986 X(3, (D, D, I), DOUBLE), \
12987 X(3, (Q, Q, I), QUAD), \
12988 X(3, (D, D, S), DOUBLE), \
12989 X(3, (Q, Q, S), QUAD), \
12990 X(2, (D, D), DOUBLE), \
12991 X(2, (Q, Q), QUAD), \
12992 X(2, (D, S), DOUBLE), \
12993 X(2, (Q, S), QUAD), \
12994 X(2, (D, R), DOUBLE), \
12995 X(2, (Q, R), QUAD), \
12996 X(2, (D, I), DOUBLE), \
12997 X(2, (Q, I), QUAD), \
12998 X(3, (D, L, D), DOUBLE), \
12999 X(2, (D, Q), MIXED), \
13000 X(2, (Q, D), MIXED), \
13001 X(3, (D, Q, I), MIXED), \
13002 X(3, (Q, D, I), MIXED), \
13003 X(3, (Q, D, D), MIXED), \
13004 X(3, (D, Q, Q), MIXED), \
13005 X(3, (Q, Q, D), MIXED), \
13006 X(3, (Q, D, S), MIXED), \
13007 X(3, (D, Q, S), MIXED), \
13008 X(4, (D, D, D, I), DOUBLE), \
13009 X(4, (Q, Q, Q, I), QUAD), \
13010 X(2, (F, F), SINGLE), \
13011 X(3, (F, F, F), SINGLE), \
13012 X(2, (F, I), SINGLE), \
13013 X(2, (F, D), MIXED), \
13014 X(2, (D, F), MIXED), \
13015 X(3, (F, F, I), MIXED), \
13016 X(4, (R, R, F, F), SINGLE), \
13017 X(4, (F, F, R, R), SINGLE), \
13018 X(3, (D, R, R), DOUBLE), \
13019 X(3, (R, R, D), DOUBLE), \
13020 X(2, (S, R), SINGLE), \
13021 X(2, (R, S), SINGLE), \
13022 X(2, (F, R), SINGLE), \
13023 X(2, (R, F), SINGLE)
13024
13025 #define S2(A,B) NS_##A##B
13026 #define S3(A,B,C) NS_##A##B##C
13027 #define S4(A,B,C,D) NS_##A##B##C##D
13028
13029 #define X(N, L, C) S##N L
13030
13031 enum neon_shape
13032 {
13033 NEON_SHAPE_DEF,
13034 NS_NULL
13035 };
13036
13037 #undef X
13038 #undef S2
13039 #undef S3
13040 #undef S4
13041
13042 enum neon_shape_class
13043 {
13044 SC_SINGLE,
13045 SC_DOUBLE,
13046 SC_QUAD,
13047 SC_MIXED
13048 };
13049
13050 #define X(N, L, C) SC_##C
13051
13052 static enum neon_shape_class neon_shape_class[] =
13053 {
13054 NEON_SHAPE_DEF
13055 };
13056
13057 #undef X
13058
13059 enum neon_shape_el
13060 {
13061 SE_F,
13062 SE_D,
13063 SE_Q,
13064 SE_I,
13065 SE_S,
13066 SE_R,
13067 SE_L
13068 };
13069
13070 /* Register widths of above. */
13071 static unsigned neon_shape_el_size[] =
13072 {
13073 32,
13074 64,
13075 128,
13076 0,
13077 32,
13078 32,
13079 0
13080 };
13081
13082 struct neon_shape_info
13083 {
13084 unsigned els;
13085 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13086 };
13087
13088 #define S2(A,B) { SE_##A, SE_##B }
13089 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13090 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13091
13092 #define X(N, L, C) { N, S##N L }
13093
13094 static struct neon_shape_info neon_shape_tab[] =
13095 {
13096 NEON_SHAPE_DEF
13097 };
13098
13099 #undef X
13100 #undef S2
13101 #undef S3
13102 #undef S4
13103
13104 /* Bit masks used in type checking given instructions.
13105 'N_EQK' means the type must be the same as (or based on in some way) the key
13106 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13107 set, various other bits can be set as well in order to modify the meaning of
13108 the type constraint. */
13109
13110 enum neon_type_mask
13111 {
13112 N_S8 = 0x0000001,
13113 N_S16 = 0x0000002,
13114 N_S32 = 0x0000004,
13115 N_S64 = 0x0000008,
13116 N_U8 = 0x0000010,
13117 N_U16 = 0x0000020,
13118 N_U32 = 0x0000040,
13119 N_U64 = 0x0000080,
13120 N_I8 = 0x0000100,
13121 N_I16 = 0x0000200,
13122 N_I32 = 0x0000400,
13123 N_I64 = 0x0000800,
13124 N_8 = 0x0001000,
13125 N_16 = 0x0002000,
13126 N_32 = 0x0004000,
13127 N_64 = 0x0008000,
13128 N_P8 = 0x0010000,
13129 N_P16 = 0x0020000,
13130 N_F16 = 0x0040000,
13131 N_F32 = 0x0080000,
13132 N_F64 = 0x0100000,
13133 N_P64 = 0x0200000,
13134 N_KEY = 0x1000000, /* Key element (main type specifier). */
13135 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13136 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13137 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13138 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13139 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13140 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13141 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13142 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13143 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13144 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13145 N_UTYP = 0,
13146 N_MAX_NONSPECIAL = N_P64
13147 };
13148
13149 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13150
13151 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13152 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13153 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13154 #define N_SUF_32 (N_SU_32 | N_F32)
13155 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13156 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13157
13158 /* Pass this as the first type argument to neon_check_type to ignore types
13159 altogether. */
13160 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13161
13162 /* Select a "shape" for the current instruction (describing register types or
13163 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13164 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13165 function of operand parsing, so this function doesn't need to be called.
13166 Shapes should be listed in order of decreasing length. */
13167
13168 static enum neon_shape
13169 neon_select_shape (enum neon_shape shape, ...)
13170 {
13171 va_list ap;
13172 enum neon_shape first_shape = shape;
13173
13174 /* Fix missing optional operands. FIXME: we don't know at this point how
13175 many arguments we should have, so this makes the assumption that we have
13176 > 1. This is true of all current Neon opcodes, I think, but may not be
13177 true in the future. */
13178 if (!inst.operands[1].present)
13179 inst.operands[1] = inst.operands[0];
13180
13181 va_start (ap, shape);
13182
13183 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13184 {
13185 unsigned j;
13186 int matches = 1;
13187
13188 for (j = 0; j < neon_shape_tab[shape].els; j++)
13189 {
13190 if (!inst.operands[j].present)
13191 {
13192 matches = 0;
13193 break;
13194 }
13195
13196 switch (neon_shape_tab[shape].el[j])
13197 {
13198 case SE_F:
13199 if (!(inst.operands[j].isreg
13200 && inst.operands[j].isvec
13201 && inst.operands[j].issingle
13202 && !inst.operands[j].isquad))
13203 matches = 0;
13204 break;
13205
13206 case SE_D:
13207 if (!(inst.operands[j].isreg
13208 && inst.operands[j].isvec
13209 && !inst.operands[j].isquad
13210 && !inst.operands[j].issingle))
13211 matches = 0;
13212 break;
13213
13214 case SE_R:
13215 if (!(inst.operands[j].isreg
13216 && !inst.operands[j].isvec))
13217 matches = 0;
13218 break;
13219
13220 case SE_Q:
13221 if (!(inst.operands[j].isreg
13222 && inst.operands[j].isvec
13223 && inst.operands[j].isquad
13224 && !inst.operands[j].issingle))
13225 matches = 0;
13226 break;
13227
13228 case SE_I:
13229 if (!(!inst.operands[j].isreg
13230 && !inst.operands[j].isscalar))
13231 matches = 0;
13232 break;
13233
13234 case SE_S:
13235 if (!(!inst.operands[j].isreg
13236 && inst.operands[j].isscalar))
13237 matches = 0;
13238 break;
13239
13240 case SE_L:
13241 break;
13242 }
13243 if (!matches)
13244 break;
13245 }
13246 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13247 /* We've matched all the entries in the shape table, and we don't
13248 have any left over operands which have not been matched. */
13249 break;
13250 }
13251
13252 va_end (ap);
13253
13254 if (shape == NS_NULL && first_shape != NS_NULL)
13255 first_error (_("invalid instruction shape"));
13256
13257 return shape;
13258 }
13259
13260 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13261 means the Q bit should be set). */
13262
13263 static int
13264 neon_quad (enum neon_shape shape)
13265 {
13266 return neon_shape_class[shape] == SC_QUAD;
13267 }
13268
13269 static void
13270 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13271 unsigned *g_size)
13272 {
13273 /* Allow modification to be made to types which are constrained to be
13274 based on the key element, based on bits set alongside N_EQK. */
13275 if ((typebits & N_EQK) != 0)
13276 {
13277 if ((typebits & N_HLF) != 0)
13278 *g_size /= 2;
13279 else if ((typebits & N_DBL) != 0)
13280 *g_size *= 2;
13281 if ((typebits & N_SGN) != 0)
13282 *g_type = NT_signed;
13283 else if ((typebits & N_UNS) != 0)
13284 *g_type = NT_unsigned;
13285 else if ((typebits & N_INT) != 0)
13286 *g_type = NT_integer;
13287 else if ((typebits & N_FLT) != 0)
13288 *g_type = NT_float;
13289 else if ((typebits & N_SIZ) != 0)
13290 *g_type = NT_untyped;
13291 }
13292 }
13293
13294 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13295 operand type, i.e. the single type specified in a Neon instruction when it
13296 is the only one given. */
13297
13298 static struct neon_type_el
13299 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13300 {
13301 struct neon_type_el dest = *key;
13302
13303 gas_assert ((thisarg & N_EQK) != 0);
13304
13305 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13306
13307 return dest;
13308 }
13309
13310 /* Convert Neon type and size into compact bitmask representation. */
13311
13312 static enum neon_type_mask
13313 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13314 {
13315 switch (type)
13316 {
13317 case NT_untyped:
13318 switch (size)
13319 {
13320 case 8: return N_8;
13321 case 16: return N_16;
13322 case 32: return N_32;
13323 case 64: return N_64;
13324 default: ;
13325 }
13326 break;
13327
13328 case NT_integer:
13329 switch (size)
13330 {
13331 case 8: return N_I8;
13332 case 16: return N_I16;
13333 case 32: return N_I32;
13334 case 64: return N_I64;
13335 default: ;
13336 }
13337 break;
13338
13339 case NT_float:
13340 switch (size)
13341 {
13342 case 16: return N_F16;
13343 case 32: return N_F32;
13344 case 64: return N_F64;
13345 default: ;
13346 }
13347 break;
13348
13349 case NT_poly:
13350 switch (size)
13351 {
13352 case 8: return N_P8;
13353 case 16: return N_P16;
13354 case 64: return N_P64;
13355 default: ;
13356 }
13357 break;
13358
13359 case NT_signed:
13360 switch (size)
13361 {
13362 case 8: return N_S8;
13363 case 16: return N_S16;
13364 case 32: return N_S32;
13365 case 64: return N_S64;
13366 default: ;
13367 }
13368 break;
13369
13370 case NT_unsigned:
13371 switch (size)
13372 {
13373 case 8: return N_U8;
13374 case 16: return N_U16;
13375 case 32: return N_U32;
13376 case 64: return N_U64;
13377 default: ;
13378 }
13379 break;
13380
13381 default: ;
13382 }
13383
13384 return N_UTYP;
13385 }
13386
13387 /* Convert compact Neon bitmask type representation to a type and size. Only
13388 handles the case where a single bit is set in the mask. */
13389
13390 static int
13391 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13392 enum neon_type_mask mask)
13393 {
13394 if ((mask & N_EQK) != 0)
13395 return FAIL;
13396
13397 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13398 *size = 8;
13399 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13400 *size = 16;
13401 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13402 *size = 32;
13403 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13404 *size = 64;
13405 else
13406 return FAIL;
13407
13408 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13409 *type = NT_signed;
13410 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13411 *type = NT_unsigned;
13412 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13413 *type = NT_integer;
13414 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13415 *type = NT_untyped;
13416 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13417 *type = NT_poly;
13418 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13419 *type = NT_float;
13420 else
13421 return FAIL;
13422
13423 return SUCCESS;
13424 }
13425
13426 /* Modify a bitmask of allowed types. This is only needed for type
13427 relaxation. */
13428
13429 static unsigned
13430 modify_types_allowed (unsigned allowed, unsigned mods)
13431 {
13432 unsigned size;
13433 enum neon_el_type type;
13434 unsigned destmask;
13435 int i;
13436
13437 destmask = 0;
13438
13439 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13440 {
13441 if (el_type_of_type_chk (&type, &size,
13442 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13443 {
13444 neon_modify_type_size (mods, &type, &size);
13445 destmask |= type_chk_of_el_type (type, size);
13446 }
13447 }
13448
13449 return destmask;
13450 }
13451
13452 /* Check type and return type classification.
13453 The manual states (paraphrase): If one datatype is given, it indicates the
13454 type given in:
13455 - the second operand, if there is one
13456 - the operand, if there is no second operand
13457 - the result, if there are no operands.
13458 This isn't quite good enough though, so we use a concept of a "key" datatype
13459 which is set on a per-instruction basis, which is the one which matters when
13460 only one data type is written.
13461 Note: this function has side-effects (e.g. filling in missing operands). All
13462 Neon instructions should call it before performing bit encoding. */
13463
13464 static struct neon_type_el
13465 neon_check_type (unsigned els, enum neon_shape ns, ...)
13466 {
13467 va_list ap;
13468 unsigned i, pass, key_el = 0;
13469 unsigned types[NEON_MAX_TYPE_ELS];
13470 enum neon_el_type k_type = NT_invtype;
13471 unsigned k_size = -1u;
13472 struct neon_type_el badtype = {NT_invtype, -1};
13473 unsigned key_allowed = 0;
13474
13475 /* Optional registers in Neon instructions are always (not) in operand 1.
13476 Fill in the missing operand here, if it was omitted. */
13477 if (els > 1 && !inst.operands[1].present)
13478 inst.operands[1] = inst.operands[0];
13479
13480 /* Suck up all the varargs. */
13481 va_start (ap, ns);
13482 for (i = 0; i < els; i++)
13483 {
13484 unsigned thisarg = va_arg (ap, unsigned);
13485 if (thisarg == N_IGNORE_TYPE)
13486 {
13487 va_end (ap);
13488 return badtype;
13489 }
13490 types[i] = thisarg;
13491 if ((thisarg & N_KEY) != 0)
13492 key_el = i;
13493 }
13494 va_end (ap);
13495
13496 if (inst.vectype.elems > 0)
13497 for (i = 0; i < els; i++)
13498 if (inst.operands[i].vectype.type != NT_invtype)
13499 {
13500 first_error (_("types specified in both the mnemonic and operands"));
13501 return badtype;
13502 }
13503
13504 /* Duplicate inst.vectype elements here as necessary.
13505 FIXME: No idea if this is exactly the same as the ARM assembler,
13506 particularly when an insn takes one register and one non-register
13507 operand. */
13508 if (inst.vectype.elems == 1 && els > 1)
13509 {
13510 unsigned j;
13511 inst.vectype.elems = els;
13512 inst.vectype.el[key_el] = inst.vectype.el[0];
13513 for (j = 0; j < els; j++)
13514 if (j != key_el)
13515 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13516 types[j]);
13517 }
13518 else if (inst.vectype.elems == 0 && els > 0)
13519 {
13520 unsigned j;
13521 /* No types were given after the mnemonic, so look for types specified
13522 after each operand. We allow some flexibility here; as long as the
13523 "key" operand has a type, we can infer the others. */
13524 for (j = 0; j < els; j++)
13525 if (inst.operands[j].vectype.type != NT_invtype)
13526 inst.vectype.el[j] = inst.operands[j].vectype;
13527
13528 if (inst.operands[key_el].vectype.type != NT_invtype)
13529 {
13530 for (j = 0; j < els; j++)
13531 if (inst.operands[j].vectype.type == NT_invtype)
13532 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13533 types[j]);
13534 }
13535 else
13536 {
13537 first_error (_("operand types can't be inferred"));
13538 return badtype;
13539 }
13540 }
13541 else if (inst.vectype.elems != els)
13542 {
13543 first_error (_("type specifier has the wrong number of parts"));
13544 return badtype;
13545 }
13546
13547 for (pass = 0; pass < 2; pass++)
13548 {
13549 for (i = 0; i < els; i++)
13550 {
13551 unsigned thisarg = types[i];
13552 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13553 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13554 enum neon_el_type g_type = inst.vectype.el[i].type;
13555 unsigned g_size = inst.vectype.el[i].size;
13556
13557 /* Decay more-specific signed & unsigned types to sign-insensitive
13558 integer types if sign-specific variants are unavailable. */
13559 if ((g_type == NT_signed || g_type == NT_unsigned)
13560 && (types_allowed & N_SU_ALL) == 0)
13561 g_type = NT_integer;
13562
13563 /* If only untyped args are allowed, decay any more specific types to
13564 them. Some instructions only care about signs for some element
13565 sizes, so handle that properly. */
13566 if (((types_allowed & N_UNT) == 0)
13567 && ((g_size == 8 && (types_allowed & N_8) != 0)
13568 || (g_size == 16 && (types_allowed & N_16) != 0)
13569 || (g_size == 32 && (types_allowed & N_32) != 0)
13570 || (g_size == 64 && (types_allowed & N_64) != 0)))
13571 g_type = NT_untyped;
13572
13573 if (pass == 0)
13574 {
13575 if ((thisarg & N_KEY) != 0)
13576 {
13577 k_type = g_type;
13578 k_size = g_size;
13579 key_allowed = thisarg & ~N_KEY;
13580 }
13581 }
13582 else
13583 {
13584 if ((thisarg & N_VFP) != 0)
13585 {
13586 enum neon_shape_el regshape;
13587 unsigned regwidth, match;
13588
13589 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13590 if (ns == NS_NULL)
13591 {
13592 first_error (_("invalid instruction shape"));
13593 return badtype;
13594 }
13595 regshape = neon_shape_tab[ns].el[i];
13596 regwidth = neon_shape_el_size[regshape];
13597
13598 /* In VFP mode, operands must match register widths. If we
13599 have a key operand, use its width, else use the width of
13600 the current operand. */
13601 if (k_size != -1u)
13602 match = k_size;
13603 else
13604 match = g_size;
13605
13606 if (regwidth != match)
13607 {
13608 first_error (_("operand size must match register width"));
13609 return badtype;
13610 }
13611 }
13612
13613 if ((thisarg & N_EQK) == 0)
13614 {
13615 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13616
13617 if ((given_type & types_allowed) == 0)
13618 {
13619 first_error (_("bad type in Neon instruction"));
13620 return badtype;
13621 }
13622 }
13623 else
13624 {
13625 enum neon_el_type mod_k_type = k_type;
13626 unsigned mod_k_size = k_size;
13627 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13628 if (g_type != mod_k_type || g_size != mod_k_size)
13629 {
13630 first_error (_("inconsistent types in Neon instruction"));
13631 return badtype;
13632 }
13633 }
13634 }
13635 }
13636 }
13637
13638 return inst.vectype.el[key_el];
13639 }
13640
13641 /* Neon-style VFP instruction forwarding. */
13642
13643 /* Thumb VFP instructions have 0xE in the condition field. */
13644
13645 static void
13646 do_vfp_cond_or_thumb (void)
13647 {
13648 inst.is_neon = 1;
13649
13650 if (thumb_mode)
13651 inst.instruction |= 0xe0000000;
13652 else
13653 inst.instruction |= inst.cond << 28;
13654 }
13655
13656 /* Look up and encode a simple mnemonic, for use as a helper function for the
13657 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13658 etc. It is assumed that operand parsing has already been done, and that the
13659 operands are in the form expected by the given opcode (this isn't necessarily
13660 the same as the form in which they were parsed, hence some massaging must
13661 take place before this function is called).
13662 Checks current arch version against that in the looked-up opcode. */
13663
13664 static void
13665 do_vfp_nsyn_opcode (const char *opname)
13666 {
13667 const struct asm_opcode *opcode;
13668
13669 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13670
13671 if (!opcode)
13672 abort ();
13673
13674 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13675 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13676 _(BAD_FPU));
13677
13678 inst.is_neon = 1;
13679
13680 if (thumb_mode)
13681 {
13682 inst.instruction = opcode->tvalue;
13683 opcode->tencode ();
13684 }
13685 else
13686 {
13687 inst.instruction = (inst.cond << 28) | opcode->avalue;
13688 opcode->aencode ();
13689 }
13690 }
13691
13692 static void
13693 do_vfp_nsyn_add_sub (enum neon_shape rs)
13694 {
13695 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13696
13697 if (rs == NS_FFF)
13698 {
13699 if (is_add)
13700 do_vfp_nsyn_opcode ("fadds");
13701 else
13702 do_vfp_nsyn_opcode ("fsubs");
13703 }
13704 else
13705 {
13706 if (is_add)
13707 do_vfp_nsyn_opcode ("faddd");
13708 else
13709 do_vfp_nsyn_opcode ("fsubd");
13710 }
13711 }
13712
13713 /* Check operand types to see if this is a VFP instruction, and if so call
13714 PFN (). */
13715
13716 static int
13717 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13718 {
13719 enum neon_shape rs;
13720 struct neon_type_el et;
13721
13722 switch (args)
13723 {
13724 case 2:
13725 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13726 et = neon_check_type (2, rs,
13727 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13728 break;
13729
13730 case 3:
13731 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13732 et = neon_check_type (3, rs,
13733 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13734 break;
13735
13736 default:
13737 abort ();
13738 }
13739
13740 if (et.type != NT_invtype)
13741 {
13742 pfn (rs);
13743 return SUCCESS;
13744 }
13745
13746 inst.error = NULL;
13747 return FAIL;
13748 }
13749
13750 static void
13751 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13752 {
13753 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13754
13755 if (rs == NS_FFF)
13756 {
13757 if (is_mla)
13758 do_vfp_nsyn_opcode ("fmacs");
13759 else
13760 do_vfp_nsyn_opcode ("fnmacs");
13761 }
13762 else
13763 {
13764 if (is_mla)
13765 do_vfp_nsyn_opcode ("fmacd");
13766 else
13767 do_vfp_nsyn_opcode ("fnmacd");
13768 }
13769 }
13770
13771 static void
13772 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13773 {
13774 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13775
13776 if (rs == NS_FFF)
13777 {
13778 if (is_fma)
13779 do_vfp_nsyn_opcode ("ffmas");
13780 else
13781 do_vfp_nsyn_opcode ("ffnmas");
13782 }
13783 else
13784 {
13785 if (is_fma)
13786 do_vfp_nsyn_opcode ("ffmad");
13787 else
13788 do_vfp_nsyn_opcode ("ffnmad");
13789 }
13790 }
13791
13792 static void
13793 do_vfp_nsyn_mul (enum neon_shape rs)
13794 {
13795 if (rs == NS_FFF)
13796 do_vfp_nsyn_opcode ("fmuls");
13797 else
13798 do_vfp_nsyn_opcode ("fmuld");
13799 }
13800
13801 static void
13802 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13803 {
13804 int is_neg = (inst.instruction & 0x80) != 0;
13805 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13806
13807 if (rs == NS_FF)
13808 {
13809 if (is_neg)
13810 do_vfp_nsyn_opcode ("fnegs");
13811 else
13812 do_vfp_nsyn_opcode ("fabss");
13813 }
13814 else
13815 {
13816 if (is_neg)
13817 do_vfp_nsyn_opcode ("fnegd");
13818 else
13819 do_vfp_nsyn_opcode ("fabsd");
13820 }
13821 }
13822
13823 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13824 insns belong to Neon, and are handled elsewhere. */
13825
13826 static void
13827 do_vfp_nsyn_ldm_stm (int is_dbmode)
13828 {
13829 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13830 if (is_ldm)
13831 {
13832 if (is_dbmode)
13833 do_vfp_nsyn_opcode ("fldmdbs");
13834 else
13835 do_vfp_nsyn_opcode ("fldmias");
13836 }
13837 else
13838 {
13839 if (is_dbmode)
13840 do_vfp_nsyn_opcode ("fstmdbs");
13841 else
13842 do_vfp_nsyn_opcode ("fstmias");
13843 }
13844 }
13845
13846 static void
13847 do_vfp_nsyn_sqrt (void)
13848 {
13849 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13850 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13851
13852 if (rs == NS_FF)
13853 do_vfp_nsyn_opcode ("fsqrts");
13854 else
13855 do_vfp_nsyn_opcode ("fsqrtd");
13856 }
13857
13858 static void
13859 do_vfp_nsyn_div (void)
13860 {
13861 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13862 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13863 N_F32 | N_F64 | N_KEY | N_VFP);
13864
13865 if (rs == NS_FFF)
13866 do_vfp_nsyn_opcode ("fdivs");
13867 else
13868 do_vfp_nsyn_opcode ("fdivd");
13869 }
13870
13871 static void
13872 do_vfp_nsyn_nmul (void)
13873 {
13874 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13875 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13876 N_F32 | N_F64 | N_KEY | N_VFP);
13877
13878 if (rs == NS_FFF)
13879 {
13880 NEON_ENCODE (SINGLE, inst);
13881 do_vfp_sp_dyadic ();
13882 }
13883 else
13884 {
13885 NEON_ENCODE (DOUBLE, inst);
13886 do_vfp_dp_rd_rn_rm ();
13887 }
13888 do_vfp_cond_or_thumb ();
13889 }
13890
13891 static void
13892 do_vfp_nsyn_cmp (void)
13893 {
13894 if (inst.operands[1].isreg)
13895 {
13896 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13897 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13898
13899 if (rs == NS_FF)
13900 {
13901 NEON_ENCODE (SINGLE, inst);
13902 do_vfp_sp_monadic ();
13903 }
13904 else
13905 {
13906 NEON_ENCODE (DOUBLE, inst);
13907 do_vfp_dp_rd_rm ();
13908 }
13909 }
13910 else
13911 {
13912 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13913 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13914
13915 switch (inst.instruction & 0x0fffffff)
13916 {
13917 case N_MNEM_vcmp:
13918 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13919 break;
13920 case N_MNEM_vcmpe:
13921 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13922 break;
13923 default:
13924 abort ();
13925 }
13926
13927 if (rs == NS_FI)
13928 {
13929 NEON_ENCODE (SINGLE, inst);
13930 do_vfp_sp_compare_z ();
13931 }
13932 else
13933 {
13934 NEON_ENCODE (DOUBLE, inst);
13935 do_vfp_dp_rd ();
13936 }
13937 }
13938 do_vfp_cond_or_thumb ();
13939 }
13940
13941 static void
13942 nsyn_insert_sp (void)
13943 {
13944 inst.operands[1] = inst.operands[0];
13945 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13946 inst.operands[0].reg = REG_SP;
13947 inst.operands[0].isreg = 1;
13948 inst.operands[0].writeback = 1;
13949 inst.operands[0].present = 1;
13950 }
13951
13952 static void
13953 do_vfp_nsyn_push (void)
13954 {
13955 nsyn_insert_sp ();
13956 if (inst.operands[1].issingle)
13957 do_vfp_nsyn_opcode ("fstmdbs");
13958 else
13959 do_vfp_nsyn_opcode ("fstmdbd");
13960 }
13961
13962 static void
13963 do_vfp_nsyn_pop (void)
13964 {
13965 nsyn_insert_sp ();
13966 if (inst.operands[1].issingle)
13967 do_vfp_nsyn_opcode ("fldmias");
13968 else
13969 do_vfp_nsyn_opcode ("fldmiad");
13970 }
13971
13972 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13973 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13974
13975 static void
13976 neon_dp_fixup (struct arm_it* insn)
13977 {
13978 unsigned int i = insn->instruction;
13979 insn->is_neon = 1;
13980
13981 if (thumb_mode)
13982 {
13983 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13984 if (i & (1 << 24))
13985 i |= 1 << 28;
13986
13987 i &= ~(1 << 24);
13988
13989 i |= 0xef000000;
13990 }
13991 else
13992 i |= 0xf2000000;
13993
13994 insn->instruction = i;
13995 }
13996
13997 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13998 (0, 1, 2, 3). */
13999
14000 static unsigned
14001 neon_logbits (unsigned x)
14002 {
14003 return ffs (x) - 4;
14004 }
14005
14006 #define LOW4(R) ((R) & 0xf)
14007 #define HI1(R) (((R) >> 4) & 1)
14008
14009 /* Encode insns with bit pattern:
14010
14011 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14012 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14013
14014 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14015 different meaning for some instruction. */
14016
14017 static void
14018 neon_three_same (int isquad, int ubit, int size)
14019 {
14020 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14021 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14022 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14023 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14024 inst.instruction |= LOW4 (inst.operands[2].reg);
14025 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14026 inst.instruction |= (isquad != 0) << 6;
14027 inst.instruction |= (ubit != 0) << 24;
14028 if (size != -1)
14029 inst.instruction |= neon_logbits (size) << 20;
14030
14031 neon_dp_fixup (&inst);
14032 }
14033
14034 /* Encode instructions of the form:
14035
14036 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14037 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14038
14039 Don't write size if SIZE == -1. */
14040
14041 static void
14042 neon_two_same (int qbit, int ubit, int size)
14043 {
14044 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14045 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14046 inst.instruction |= LOW4 (inst.operands[1].reg);
14047 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14048 inst.instruction |= (qbit != 0) << 6;
14049 inst.instruction |= (ubit != 0) << 24;
14050
14051 if (size != -1)
14052 inst.instruction |= neon_logbits (size) << 18;
14053
14054 neon_dp_fixup (&inst);
14055 }
14056
14057 /* Neon instruction encoders, in approximate order of appearance. */
14058
14059 static void
14060 do_neon_dyadic_i_su (void)
14061 {
14062 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14063 struct neon_type_el et = neon_check_type (3, rs,
14064 N_EQK, N_EQK, N_SU_32 | N_KEY);
14065 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14066 }
14067
14068 static void
14069 do_neon_dyadic_i64_su (void)
14070 {
14071 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14072 struct neon_type_el et = neon_check_type (3, rs,
14073 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14074 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14075 }
14076
14077 static void
14078 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14079 unsigned immbits)
14080 {
14081 unsigned size = et.size >> 3;
14082 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14083 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14084 inst.instruction |= LOW4 (inst.operands[1].reg);
14085 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14086 inst.instruction |= (isquad != 0) << 6;
14087 inst.instruction |= immbits << 16;
14088 inst.instruction |= (size >> 3) << 7;
14089 inst.instruction |= (size & 0x7) << 19;
14090 if (write_ubit)
14091 inst.instruction |= (uval != 0) << 24;
14092
14093 neon_dp_fixup (&inst);
14094 }
14095
14096 static void
14097 do_neon_shl_imm (void)
14098 {
14099 if (!inst.operands[2].isreg)
14100 {
14101 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14102 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14103 NEON_ENCODE (IMMED, inst);
14104 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
14105 }
14106 else
14107 {
14108 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14109 struct neon_type_el et = neon_check_type (3, rs,
14110 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14111 unsigned int tmp;
14112
14113 /* VSHL/VQSHL 3-register variants have syntax such as:
14114 vshl.xx Dd, Dm, Dn
14115 whereas other 3-register operations encoded by neon_three_same have
14116 syntax like:
14117 vadd.xx Dd, Dn, Dm
14118 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14119 here. */
14120 tmp = inst.operands[2].reg;
14121 inst.operands[2].reg = inst.operands[1].reg;
14122 inst.operands[1].reg = tmp;
14123 NEON_ENCODE (INTEGER, inst);
14124 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14125 }
14126 }
14127
14128 static void
14129 do_neon_qshl_imm (void)
14130 {
14131 if (!inst.operands[2].isreg)
14132 {
14133 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14134 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14135
14136 NEON_ENCODE (IMMED, inst);
14137 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14138 inst.operands[2].imm);
14139 }
14140 else
14141 {
14142 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14143 struct neon_type_el et = neon_check_type (3, rs,
14144 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14145 unsigned int tmp;
14146
14147 /* See note in do_neon_shl_imm. */
14148 tmp = inst.operands[2].reg;
14149 inst.operands[2].reg = inst.operands[1].reg;
14150 inst.operands[1].reg = tmp;
14151 NEON_ENCODE (INTEGER, inst);
14152 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14153 }
14154 }
14155
14156 static void
14157 do_neon_rshl (void)
14158 {
14159 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14160 struct neon_type_el et = neon_check_type (3, rs,
14161 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14162 unsigned int tmp;
14163
14164 tmp = inst.operands[2].reg;
14165 inst.operands[2].reg = inst.operands[1].reg;
14166 inst.operands[1].reg = tmp;
14167 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14168 }
14169
14170 static int
14171 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14172 {
14173 /* Handle .I8 pseudo-instructions. */
14174 if (size == 8)
14175 {
14176 /* Unfortunately, this will make everything apart from zero out-of-range.
14177 FIXME is this the intended semantics? There doesn't seem much point in
14178 accepting .I8 if so. */
14179 immediate |= immediate << 8;
14180 size = 16;
14181 }
14182
14183 if (size >= 32)
14184 {
14185 if (immediate == (immediate & 0x000000ff))
14186 {
14187 *immbits = immediate;
14188 return 0x1;
14189 }
14190 else if (immediate == (immediate & 0x0000ff00))
14191 {
14192 *immbits = immediate >> 8;
14193 return 0x3;
14194 }
14195 else if (immediate == (immediate & 0x00ff0000))
14196 {
14197 *immbits = immediate >> 16;
14198 return 0x5;
14199 }
14200 else if (immediate == (immediate & 0xff000000))
14201 {
14202 *immbits = immediate >> 24;
14203 return 0x7;
14204 }
14205 if ((immediate & 0xffff) != (immediate >> 16))
14206 goto bad_immediate;
14207 immediate &= 0xffff;
14208 }
14209
14210 if (immediate == (immediate & 0x000000ff))
14211 {
14212 *immbits = immediate;
14213 return 0x9;
14214 }
14215 else if (immediate == (immediate & 0x0000ff00))
14216 {
14217 *immbits = immediate >> 8;
14218 return 0xb;
14219 }
14220
14221 bad_immediate:
14222 first_error (_("immediate value out of range"));
14223 return FAIL;
14224 }
14225
14226 static void
14227 do_neon_logic (void)
14228 {
14229 if (inst.operands[2].present && inst.operands[2].isreg)
14230 {
14231 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14232 neon_check_type (3, rs, N_IGNORE_TYPE);
14233 /* U bit and size field were set as part of the bitmask. */
14234 NEON_ENCODE (INTEGER, inst);
14235 neon_three_same (neon_quad (rs), 0, -1);
14236 }
14237 else
14238 {
14239 const int three_ops_form = (inst.operands[2].present
14240 && !inst.operands[2].isreg);
14241 const int immoperand = (three_ops_form ? 2 : 1);
14242 enum neon_shape rs = (three_ops_form
14243 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14244 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14245 struct neon_type_el et = neon_check_type (2, rs,
14246 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14247 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14248 unsigned immbits;
14249 int cmode;
14250
14251 if (et.type == NT_invtype)
14252 return;
14253
14254 if (three_ops_form)
14255 constraint (inst.operands[0].reg != inst.operands[1].reg,
14256 _("first and second operands shall be the same register"));
14257
14258 NEON_ENCODE (IMMED, inst);
14259
14260 immbits = inst.operands[immoperand].imm;
14261 if (et.size == 64)
14262 {
14263 /* .i64 is a pseudo-op, so the immediate must be a repeating
14264 pattern. */
14265 if (immbits != (inst.operands[immoperand].regisimm ?
14266 inst.operands[immoperand].reg : 0))
14267 {
14268 /* Set immbits to an invalid constant. */
14269 immbits = 0xdeadbeef;
14270 }
14271 }
14272
14273 switch (opcode)
14274 {
14275 case N_MNEM_vbic:
14276 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14277 break;
14278
14279 case N_MNEM_vorr:
14280 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14281 break;
14282
14283 case N_MNEM_vand:
14284 /* Pseudo-instruction for VBIC. */
14285 neon_invert_size (&immbits, 0, et.size);
14286 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14287 break;
14288
14289 case N_MNEM_vorn:
14290 /* Pseudo-instruction for VORR. */
14291 neon_invert_size (&immbits, 0, et.size);
14292 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14293 break;
14294
14295 default:
14296 abort ();
14297 }
14298
14299 if (cmode == FAIL)
14300 return;
14301
14302 inst.instruction |= neon_quad (rs) << 6;
14303 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14304 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14305 inst.instruction |= cmode << 8;
14306 neon_write_immbits (immbits);
14307
14308 neon_dp_fixup (&inst);
14309 }
14310 }
14311
14312 static void
14313 do_neon_bitfield (void)
14314 {
14315 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14316 neon_check_type (3, rs, N_IGNORE_TYPE);
14317 neon_three_same (neon_quad (rs), 0, -1);
14318 }
14319
14320 static void
14321 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14322 unsigned destbits)
14323 {
14324 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14325 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14326 types | N_KEY);
14327 if (et.type == NT_float)
14328 {
14329 NEON_ENCODE (FLOAT, inst);
14330 neon_three_same (neon_quad (rs), 0, -1);
14331 }
14332 else
14333 {
14334 NEON_ENCODE (INTEGER, inst);
14335 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14336 }
14337 }
14338
14339 static void
14340 do_neon_dyadic_if_su (void)
14341 {
14342 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14343 }
14344
14345 static void
14346 do_neon_dyadic_if_su_d (void)
14347 {
14348 /* This version only allow D registers, but that constraint is enforced during
14349 operand parsing so we don't need to do anything extra here. */
14350 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14351 }
14352
14353 static void
14354 do_neon_dyadic_if_i_d (void)
14355 {
14356 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14357 affected if we specify unsigned args. */
14358 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14359 }
14360
14361 enum vfp_or_neon_is_neon_bits
14362 {
14363 NEON_CHECK_CC = 1,
14364 NEON_CHECK_ARCH = 2,
14365 NEON_CHECK_ARCH8 = 4
14366 };
14367
14368 /* Call this function if an instruction which may have belonged to the VFP or
14369 Neon instruction sets, but turned out to be a Neon instruction (due to the
14370 operand types involved, etc.). We have to check and/or fix-up a couple of
14371 things:
14372
14373 - Make sure the user hasn't attempted to make a Neon instruction
14374 conditional.
14375 - Alter the value in the condition code field if necessary.
14376 - Make sure that the arch supports Neon instructions.
14377
14378 Which of these operations take place depends on bits from enum
14379 vfp_or_neon_is_neon_bits.
14380
14381 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14382 current instruction's condition is COND_ALWAYS, the condition field is
14383 changed to inst.uncond_value. This is necessary because instructions shared
14384 between VFP and Neon may be conditional for the VFP variants only, and the
14385 unconditional Neon version must have, e.g., 0xF in the condition field. */
14386
14387 static int
14388 vfp_or_neon_is_neon (unsigned check)
14389 {
14390 /* Conditions are always legal in Thumb mode (IT blocks). */
14391 if (!thumb_mode && (check & NEON_CHECK_CC))
14392 {
14393 if (inst.cond != COND_ALWAYS)
14394 {
14395 first_error (_(BAD_COND));
14396 return FAIL;
14397 }
14398 if (inst.uncond_value != -1)
14399 inst.instruction |= inst.uncond_value << 28;
14400 }
14401
14402 if ((check & NEON_CHECK_ARCH)
14403 && !mark_feature_used (&fpu_neon_ext_v1))
14404 {
14405 first_error (_(BAD_FPU));
14406 return FAIL;
14407 }
14408
14409 if ((check & NEON_CHECK_ARCH8)
14410 && !mark_feature_used (&fpu_neon_ext_armv8))
14411 {
14412 first_error (_(BAD_FPU));
14413 return FAIL;
14414 }
14415
14416 return SUCCESS;
14417 }
14418
14419 static void
14420 do_neon_addsub_if_i (void)
14421 {
14422 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14423 return;
14424
14425 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14426 return;
14427
14428 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14429 affected if we specify unsigned args. */
14430 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14431 }
14432
14433 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14434 result to be:
14435 V<op> A,B (A is operand 0, B is operand 2)
14436 to mean:
14437 V<op> A,B,A
14438 not:
14439 V<op> A,B,B
14440 so handle that case specially. */
14441
14442 static void
14443 neon_exchange_operands (void)
14444 {
14445 void *scratch = alloca (sizeof (inst.operands[0]));
14446 if (inst.operands[1].present)
14447 {
14448 /* Swap operands[1] and operands[2]. */
14449 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14450 inst.operands[1] = inst.operands[2];
14451 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14452 }
14453 else
14454 {
14455 inst.operands[1] = inst.operands[2];
14456 inst.operands[2] = inst.operands[0];
14457 }
14458 }
14459
14460 static void
14461 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14462 {
14463 if (inst.operands[2].isreg)
14464 {
14465 if (invert)
14466 neon_exchange_operands ();
14467 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14468 }
14469 else
14470 {
14471 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14472 struct neon_type_el et = neon_check_type (2, rs,
14473 N_EQK | N_SIZ, immtypes | N_KEY);
14474
14475 NEON_ENCODE (IMMED, inst);
14476 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14477 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14478 inst.instruction |= LOW4 (inst.operands[1].reg);
14479 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14480 inst.instruction |= neon_quad (rs) << 6;
14481 inst.instruction |= (et.type == NT_float) << 10;
14482 inst.instruction |= neon_logbits (et.size) << 18;
14483
14484 neon_dp_fixup (&inst);
14485 }
14486 }
14487
14488 static void
14489 do_neon_cmp (void)
14490 {
14491 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14492 }
14493
14494 static void
14495 do_neon_cmp_inv (void)
14496 {
14497 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14498 }
14499
14500 static void
14501 do_neon_ceq (void)
14502 {
14503 neon_compare (N_IF_32, N_IF_32, FALSE);
14504 }
14505
14506 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14507 scalars, which are encoded in 5 bits, M : Rm.
14508 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14509 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14510 index in M. */
14511
14512 static unsigned
14513 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14514 {
14515 unsigned regno = NEON_SCALAR_REG (scalar);
14516 unsigned elno = NEON_SCALAR_INDEX (scalar);
14517
14518 switch (elsize)
14519 {
14520 case 16:
14521 if (regno > 7 || elno > 3)
14522 goto bad_scalar;
14523 return regno | (elno << 3);
14524
14525 case 32:
14526 if (regno > 15 || elno > 1)
14527 goto bad_scalar;
14528 return regno | (elno << 4);
14529
14530 default:
14531 bad_scalar:
14532 first_error (_("scalar out of range for multiply instruction"));
14533 }
14534
14535 return 0;
14536 }
14537
14538 /* Encode multiply / multiply-accumulate scalar instructions. */
14539
14540 static void
14541 neon_mul_mac (struct neon_type_el et, int ubit)
14542 {
14543 unsigned scalar;
14544
14545 /* Give a more helpful error message if we have an invalid type. */
14546 if (et.type == NT_invtype)
14547 return;
14548
14549 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14550 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14551 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14552 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14553 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14554 inst.instruction |= LOW4 (scalar);
14555 inst.instruction |= HI1 (scalar) << 5;
14556 inst.instruction |= (et.type == NT_float) << 8;
14557 inst.instruction |= neon_logbits (et.size) << 20;
14558 inst.instruction |= (ubit != 0) << 24;
14559
14560 neon_dp_fixup (&inst);
14561 }
14562
14563 static void
14564 do_neon_mac_maybe_scalar (void)
14565 {
14566 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14567 return;
14568
14569 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14570 return;
14571
14572 if (inst.operands[2].isscalar)
14573 {
14574 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14575 struct neon_type_el et = neon_check_type (3, rs,
14576 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14577 NEON_ENCODE (SCALAR, inst);
14578 neon_mul_mac (et, neon_quad (rs));
14579 }
14580 else
14581 {
14582 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14583 affected if we specify unsigned args. */
14584 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14585 }
14586 }
14587
14588 static void
14589 do_neon_fmac (void)
14590 {
14591 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14592 return;
14593
14594 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14595 return;
14596
14597 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14598 }
14599
14600 static void
14601 do_neon_tst (void)
14602 {
14603 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14604 struct neon_type_el et = neon_check_type (3, rs,
14605 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14606 neon_three_same (neon_quad (rs), 0, et.size);
14607 }
14608
14609 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14610 same types as the MAC equivalents. The polynomial type for this instruction
14611 is encoded the same as the integer type. */
14612
14613 static void
14614 do_neon_mul (void)
14615 {
14616 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14617 return;
14618
14619 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14620 return;
14621
14622 if (inst.operands[2].isscalar)
14623 do_neon_mac_maybe_scalar ();
14624 else
14625 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14626 }
14627
14628 static void
14629 do_neon_qdmulh (void)
14630 {
14631 if (inst.operands[2].isscalar)
14632 {
14633 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14634 struct neon_type_el et = neon_check_type (3, rs,
14635 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14636 NEON_ENCODE (SCALAR, inst);
14637 neon_mul_mac (et, neon_quad (rs));
14638 }
14639 else
14640 {
14641 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14642 struct neon_type_el et = neon_check_type (3, rs,
14643 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14644 NEON_ENCODE (INTEGER, inst);
14645 /* The U bit (rounding) comes from bit mask. */
14646 neon_three_same (neon_quad (rs), 0, et.size);
14647 }
14648 }
14649
14650 static void
14651 do_neon_fcmp_absolute (void)
14652 {
14653 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14654 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14655 /* Size field comes from bit mask. */
14656 neon_three_same (neon_quad (rs), 1, -1);
14657 }
14658
14659 static void
14660 do_neon_fcmp_absolute_inv (void)
14661 {
14662 neon_exchange_operands ();
14663 do_neon_fcmp_absolute ();
14664 }
14665
14666 static void
14667 do_neon_step (void)
14668 {
14669 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14670 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14671 neon_three_same (neon_quad (rs), 0, -1);
14672 }
14673
14674 static void
14675 do_neon_abs_neg (void)
14676 {
14677 enum neon_shape rs;
14678 struct neon_type_el et;
14679
14680 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14681 return;
14682
14683 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14684 return;
14685
14686 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14687 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14688
14689 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14690 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14691 inst.instruction |= LOW4 (inst.operands[1].reg);
14692 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14693 inst.instruction |= neon_quad (rs) << 6;
14694 inst.instruction |= (et.type == NT_float) << 10;
14695 inst.instruction |= neon_logbits (et.size) << 18;
14696
14697 neon_dp_fixup (&inst);
14698 }
14699
14700 static void
14701 do_neon_sli (void)
14702 {
14703 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14704 struct neon_type_el et = neon_check_type (2, rs,
14705 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14706 int imm = inst.operands[2].imm;
14707 constraint (imm < 0 || (unsigned)imm >= et.size,
14708 _("immediate out of range for insert"));
14709 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14710 }
14711
14712 static void
14713 do_neon_sri (void)
14714 {
14715 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14716 struct neon_type_el et = neon_check_type (2, rs,
14717 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14718 int imm = inst.operands[2].imm;
14719 constraint (imm < 1 || (unsigned)imm > et.size,
14720 _("immediate out of range for insert"));
14721 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14722 }
14723
14724 static void
14725 do_neon_qshlu_imm (void)
14726 {
14727 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14728 struct neon_type_el et = neon_check_type (2, rs,
14729 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14730 int imm = inst.operands[2].imm;
14731 constraint (imm < 0 || (unsigned)imm >= et.size,
14732 _("immediate out of range for shift"));
14733 /* Only encodes the 'U present' variant of the instruction.
14734 In this case, signed types have OP (bit 8) set to 0.
14735 Unsigned types have OP set to 1. */
14736 inst.instruction |= (et.type == NT_unsigned) << 8;
14737 /* The rest of the bits are the same as other immediate shifts. */
14738 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14739 }
14740
14741 static void
14742 do_neon_qmovn (void)
14743 {
14744 struct neon_type_el et = neon_check_type (2, NS_DQ,
14745 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14746 /* Saturating move where operands can be signed or unsigned, and the
14747 destination has the same signedness. */
14748 NEON_ENCODE (INTEGER, inst);
14749 if (et.type == NT_unsigned)
14750 inst.instruction |= 0xc0;
14751 else
14752 inst.instruction |= 0x80;
14753 neon_two_same (0, 1, et.size / 2);
14754 }
14755
14756 static void
14757 do_neon_qmovun (void)
14758 {
14759 struct neon_type_el et = neon_check_type (2, NS_DQ,
14760 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14761 /* Saturating move with unsigned results. Operands must be signed. */
14762 NEON_ENCODE (INTEGER, inst);
14763 neon_two_same (0, 1, et.size / 2);
14764 }
14765
14766 static void
14767 do_neon_rshift_sat_narrow (void)
14768 {
14769 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14770 or unsigned. If operands are unsigned, results must also be unsigned. */
14771 struct neon_type_el et = neon_check_type (2, NS_DQI,
14772 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14773 int imm = inst.operands[2].imm;
14774 /* This gets the bounds check, size encoding and immediate bits calculation
14775 right. */
14776 et.size /= 2;
14777
14778 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14779 VQMOVN.I<size> <Dd>, <Qm>. */
14780 if (imm == 0)
14781 {
14782 inst.operands[2].present = 0;
14783 inst.instruction = N_MNEM_vqmovn;
14784 do_neon_qmovn ();
14785 return;
14786 }
14787
14788 constraint (imm < 1 || (unsigned)imm > et.size,
14789 _("immediate out of range"));
14790 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14791 }
14792
14793 static void
14794 do_neon_rshift_sat_narrow_u (void)
14795 {
14796 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14797 or unsigned. If operands are unsigned, results must also be unsigned. */
14798 struct neon_type_el et = neon_check_type (2, NS_DQI,
14799 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14800 int imm = inst.operands[2].imm;
14801 /* This gets the bounds check, size encoding and immediate bits calculation
14802 right. */
14803 et.size /= 2;
14804
14805 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14806 VQMOVUN.I<size> <Dd>, <Qm>. */
14807 if (imm == 0)
14808 {
14809 inst.operands[2].present = 0;
14810 inst.instruction = N_MNEM_vqmovun;
14811 do_neon_qmovun ();
14812 return;
14813 }
14814
14815 constraint (imm < 1 || (unsigned)imm > et.size,
14816 _("immediate out of range"));
14817 /* FIXME: The manual is kind of unclear about what value U should have in
14818 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14819 must be 1. */
14820 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14821 }
14822
14823 static void
14824 do_neon_movn (void)
14825 {
14826 struct neon_type_el et = neon_check_type (2, NS_DQ,
14827 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14828 NEON_ENCODE (INTEGER, inst);
14829 neon_two_same (0, 1, et.size / 2);
14830 }
14831
14832 static void
14833 do_neon_rshift_narrow (void)
14834 {
14835 struct neon_type_el et = neon_check_type (2, NS_DQI,
14836 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14837 int imm = inst.operands[2].imm;
14838 /* This gets the bounds check, size encoding and immediate bits calculation
14839 right. */
14840 et.size /= 2;
14841
14842 /* If immediate is zero then we are a pseudo-instruction for
14843 VMOVN.I<size> <Dd>, <Qm> */
14844 if (imm == 0)
14845 {
14846 inst.operands[2].present = 0;
14847 inst.instruction = N_MNEM_vmovn;
14848 do_neon_movn ();
14849 return;
14850 }
14851
14852 constraint (imm < 1 || (unsigned)imm > et.size,
14853 _("immediate out of range for narrowing operation"));
14854 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14855 }
14856
14857 static void
14858 do_neon_shll (void)
14859 {
14860 /* FIXME: Type checking when lengthening. */
14861 struct neon_type_el et = neon_check_type (2, NS_QDI,
14862 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14863 unsigned imm = inst.operands[2].imm;
14864
14865 if (imm == et.size)
14866 {
14867 /* Maximum shift variant. */
14868 NEON_ENCODE (INTEGER, inst);
14869 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14870 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14871 inst.instruction |= LOW4 (inst.operands[1].reg);
14872 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14873 inst.instruction |= neon_logbits (et.size) << 18;
14874
14875 neon_dp_fixup (&inst);
14876 }
14877 else
14878 {
14879 /* A more-specific type check for non-max versions. */
14880 et = neon_check_type (2, NS_QDI,
14881 N_EQK | N_DBL, N_SU_32 | N_KEY);
14882 NEON_ENCODE (IMMED, inst);
14883 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14884 }
14885 }
14886
14887 /* Check the various types for the VCVT instruction, and return which version
14888 the current instruction is. */
14889
14890 #define CVT_FLAVOUR_VAR \
14891 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14892 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14893 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14894 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14895 /* Half-precision conversions. */ \
14896 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14897 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14898 /* VFP instructions. */ \
14899 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14900 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14901 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14902 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14903 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14904 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14905 /* VFP instructions with bitshift. */ \
14906 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14907 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14908 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14909 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14910 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14911 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14912 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14913 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14914
14915 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14916 neon_cvt_flavour_##C,
14917
14918 /* The different types of conversions we can do. */
14919 enum neon_cvt_flavour
14920 {
14921 CVT_FLAVOUR_VAR
14922 neon_cvt_flavour_invalid,
14923 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14924 };
14925
14926 #undef CVT_VAR
14927
14928 static enum neon_cvt_flavour
14929 get_neon_cvt_flavour (enum neon_shape rs)
14930 {
14931 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14932 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14933 if (et.type != NT_invtype) \
14934 { \
14935 inst.error = NULL; \
14936 return (neon_cvt_flavour_##C); \
14937 }
14938
14939 struct neon_type_el et;
14940 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14941 || rs == NS_FF) ? N_VFP : 0;
14942 /* The instruction versions which take an immediate take one register
14943 argument, which is extended to the width of the full register. Thus the
14944 "source" and "destination" registers must have the same width. Hack that
14945 here by making the size equal to the key (wider, in this case) operand. */
14946 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14947
14948 CVT_FLAVOUR_VAR;
14949
14950 return neon_cvt_flavour_invalid;
14951 #undef CVT_VAR
14952 }
14953
14954 enum neon_cvt_mode
14955 {
14956 neon_cvt_mode_a,
14957 neon_cvt_mode_n,
14958 neon_cvt_mode_p,
14959 neon_cvt_mode_m,
14960 neon_cvt_mode_z,
14961 neon_cvt_mode_x,
14962 neon_cvt_mode_r
14963 };
14964
14965 /* Neon-syntax VFP conversions. */
14966
14967 static void
14968 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14969 {
14970 const char *opname = 0;
14971
14972 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14973 {
14974 /* Conversions with immediate bitshift. */
14975 const char *enc[] =
14976 {
14977 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14978 CVT_FLAVOUR_VAR
14979 NULL
14980 #undef CVT_VAR
14981 };
14982
14983 if (flavour < (int) ARRAY_SIZE (enc))
14984 {
14985 opname = enc[flavour];
14986 constraint (inst.operands[0].reg != inst.operands[1].reg,
14987 _("operands 0 and 1 must be the same register"));
14988 inst.operands[1] = inst.operands[2];
14989 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14990 }
14991 }
14992 else
14993 {
14994 /* Conversions without bitshift. */
14995 const char *enc[] =
14996 {
14997 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14998 CVT_FLAVOUR_VAR
14999 NULL
15000 #undef CVT_VAR
15001 };
15002
15003 if (flavour < (int) ARRAY_SIZE (enc))
15004 opname = enc[flavour];
15005 }
15006
15007 if (opname)
15008 do_vfp_nsyn_opcode (opname);
15009 }
15010
15011 static void
15012 do_vfp_nsyn_cvtz (void)
15013 {
15014 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15015 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15016 const char *enc[] =
15017 {
15018 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15019 CVT_FLAVOUR_VAR
15020 NULL
15021 #undef CVT_VAR
15022 };
15023
15024 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15025 do_vfp_nsyn_opcode (enc[flavour]);
15026 }
15027
15028 static void
15029 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15030 enum neon_cvt_mode mode)
15031 {
15032 int sz, op;
15033 int rm;
15034
15035 set_it_insn_type (OUTSIDE_IT_INSN);
15036
15037 switch (flavour)
15038 {
15039 case neon_cvt_flavour_s32_f64:
15040 sz = 1;
15041 op = 1;
15042 break;
15043 case neon_cvt_flavour_s32_f32:
15044 sz = 0;
15045 op = 1;
15046 break;
15047 case neon_cvt_flavour_u32_f64:
15048 sz = 1;
15049 op = 0;
15050 break;
15051 case neon_cvt_flavour_u32_f32:
15052 sz = 0;
15053 op = 0;
15054 break;
15055 default:
15056 first_error (_("invalid instruction shape"));
15057 return;
15058 }
15059
15060 switch (mode)
15061 {
15062 case neon_cvt_mode_a: rm = 0; break;
15063 case neon_cvt_mode_n: rm = 1; break;
15064 case neon_cvt_mode_p: rm = 2; break;
15065 case neon_cvt_mode_m: rm = 3; break;
15066 default: first_error (_("invalid rounding mode")); return;
15067 }
15068
15069 NEON_ENCODE (FPV8, inst);
15070 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15071 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15072 inst.instruction |= sz << 8;
15073 inst.instruction |= op << 7;
15074 inst.instruction |= rm << 16;
15075 inst.instruction |= 0xf0000000;
15076 inst.is_neon = TRUE;
15077 }
15078
15079 static void
15080 do_neon_cvt_1 (enum neon_cvt_mode mode)
15081 {
15082 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15083 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15084 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15085
15086 /* PR11109: Handle round-to-zero for VCVT conversions. */
15087 if (mode == neon_cvt_mode_z
15088 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15089 && (flavour == neon_cvt_flavour_s32_f32
15090 || flavour == neon_cvt_flavour_u32_f32
15091 || flavour == neon_cvt_flavour_s32_f64
15092 || flavour == neon_cvt_flavour_u32_f64)
15093 && (rs == NS_FD || rs == NS_FF))
15094 {
15095 do_vfp_nsyn_cvtz ();
15096 return;
15097 }
15098
15099 /* VFP rather than Neon conversions. */
15100 if (flavour >= neon_cvt_flavour_first_fp)
15101 {
15102 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15103 do_vfp_nsyn_cvt (rs, flavour);
15104 else
15105 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15106
15107 return;
15108 }
15109
15110 switch (rs)
15111 {
15112 case NS_DDI:
15113 case NS_QQI:
15114 {
15115 unsigned immbits;
15116 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15117
15118 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15119 return;
15120
15121 /* Fixed-point conversion with #0 immediate is encoded as an
15122 integer conversion. */
15123 if (inst.operands[2].present && inst.operands[2].imm == 0)
15124 goto int_encode;
15125 immbits = 32 - inst.operands[2].imm;
15126 NEON_ENCODE (IMMED, inst);
15127 if (flavour != neon_cvt_flavour_invalid)
15128 inst.instruction |= enctab[flavour];
15129 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15130 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15131 inst.instruction |= LOW4 (inst.operands[1].reg);
15132 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15133 inst.instruction |= neon_quad (rs) << 6;
15134 inst.instruction |= 1 << 21;
15135 inst.instruction |= immbits << 16;
15136
15137 neon_dp_fixup (&inst);
15138 }
15139 break;
15140
15141 case NS_DD:
15142 case NS_QQ:
15143 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15144 {
15145 NEON_ENCODE (FLOAT, inst);
15146 set_it_insn_type (OUTSIDE_IT_INSN);
15147
15148 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15149 return;
15150
15151 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15152 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15153 inst.instruction |= LOW4 (inst.operands[1].reg);
15154 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15155 inst.instruction |= neon_quad (rs) << 6;
15156 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15157 inst.instruction |= mode << 8;
15158 if (thumb_mode)
15159 inst.instruction |= 0xfc000000;
15160 else
15161 inst.instruction |= 0xf0000000;
15162 }
15163 else
15164 {
15165 int_encode:
15166 {
15167 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15168
15169 NEON_ENCODE (INTEGER, inst);
15170
15171 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15172 return;
15173
15174 if (flavour != neon_cvt_flavour_invalid)
15175 inst.instruction |= enctab[flavour];
15176
15177 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15178 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15179 inst.instruction |= LOW4 (inst.operands[1].reg);
15180 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15181 inst.instruction |= neon_quad (rs) << 6;
15182 inst.instruction |= 2 << 18;
15183
15184 neon_dp_fixup (&inst);
15185 }
15186 }
15187 break;
15188
15189 /* Half-precision conversions for Advanced SIMD -- neon. */
15190 case NS_QD:
15191 case NS_DQ:
15192
15193 if ((rs == NS_DQ)
15194 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15195 {
15196 as_bad (_("operand size must match register width"));
15197 break;
15198 }
15199
15200 if ((rs == NS_QD)
15201 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15202 {
15203 as_bad (_("operand size must match register width"));
15204 break;
15205 }
15206
15207 if (rs == NS_DQ)
15208 inst.instruction = 0x3b60600;
15209 else
15210 inst.instruction = 0x3b60700;
15211
15212 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15213 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15214 inst.instruction |= LOW4 (inst.operands[1].reg);
15215 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15216 neon_dp_fixup (&inst);
15217 break;
15218
15219 default:
15220 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15221 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15222 do_vfp_nsyn_cvt (rs, flavour);
15223 else
15224 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15225 }
15226 }
15227
15228 static void
15229 do_neon_cvtr (void)
15230 {
15231 do_neon_cvt_1 (neon_cvt_mode_x);
15232 }
15233
15234 static void
15235 do_neon_cvt (void)
15236 {
15237 do_neon_cvt_1 (neon_cvt_mode_z);
15238 }
15239
15240 static void
15241 do_neon_cvta (void)
15242 {
15243 do_neon_cvt_1 (neon_cvt_mode_a);
15244 }
15245
15246 static void
15247 do_neon_cvtn (void)
15248 {
15249 do_neon_cvt_1 (neon_cvt_mode_n);
15250 }
15251
15252 static void
15253 do_neon_cvtp (void)
15254 {
15255 do_neon_cvt_1 (neon_cvt_mode_p);
15256 }
15257
15258 static void
15259 do_neon_cvtm (void)
15260 {
15261 do_neon_cvt_1 (neon_cvt_mode_m);
15262 }
15263
15264 static void
15265 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15266 {
15267 if (is_double)
15268 mark_feature_used (&fpu_vfp_ext_armv8);
15269
15270 encode_arm_vfp_reg (inst.operands[0].reg,
15271 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15272 encode_arm_vfp_reg (inst.operands[1].reg,
15273 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15274 inst.instruction |= to ? 0x10000 : 0;
15275 inst.instruction |= t ? 0x80 : 0;
15276 inst.instruction |= is_double ? 0x100 : 0;
15277 do_vfp_cond_or_thumb ();
15278 }
15279
15280 static void
15281 do_neon_cvttb_1 (bfd_boolean t)
15282 {
15283 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15284
15285 if (rs == NS_NULL)
15286 return;
15287 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15288 {
15289 inst.error = NULL;
15290 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15291 }
15292 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15293 {
15294 inst.error = NULL;
15295 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15296 }
15297 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15298 {
15299 inst.error = NULL;
15300 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15301 }
15302 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15303 {
15304 inst.error = NULL;
15305 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15306 }
15307 else
15308 return;
15309 }
15310
15311 static void
15312 do_neon_cvtb (void)
15313 {
15314 do_neon_cvttb_1 (FALSE);
15315 }
15316
15317
15318 static void
15319 do_neon_cvtt (void)
15320 {
15321 do_neon_cvttb_1 (TRUE);
15322 }
15323
15324 static void
15325 neon_move_immediate (void)
15326 {
15327 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15328 struct neon_type_el et = neon_check_type (2, rs,
15329 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15330 unsigned immlo, immhi = 0, immbits;
15331 int op, cmode, float_p;
15332
15333 constraint (et.type == NT_invtype,
15334 _("operand size must be specified for immediate VMOV"));
15335
15336 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15337 op = (inst.instruction & (1 << 5)) != 0;
15338
15339 immlo = inst.operands[1].imm;
15340 if (inst.operands[1].regisimm)
15341 immhi = inst.operands[1].reg;
15342
15343 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15344 _("immediate has bits set outside the operand size"));
15345
15346 float_p = inst.operands[1].immisfloat;
15347
15348 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15349 et.size, et.type)) == FAIL)
15350 {
15351 /* Invert relevant bits only. */
15352 neon_invert_size (&immlo, &immhi, et.size);
15353 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15354 with one or the other; those cases are caught by
15355 neon_cmode_for_move_imm. */
15356 op = !op;
15357 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15358 &op, et.size, et.type)) == FAIL)
15359 {
15360 first_error (_("immediate out of range"));
15361 return;
15362 }
15363 }
15364
15365 inst.instruction &= ~(1 << 5);
15366 inst.instruction |= op << 5;
15367
15368 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15369 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15370 inst.instruction |= neon_quad (rs) << 6;
15371 inst.instruction |= cmode << 8;
15372
15373 neon_write_immbits (immbits);
15374 }
15375
15376 static void
15377 do_neon_mvn (void)
15378 {
15379 if (inst.operands[1].isreg)
15380 {
15381 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15382
15383 NEON_ENCODE (INTEGER, inst);
15384 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15385 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15386 inst.instruction |= LOW4 (inst.operands[1].reg);
15387 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15388 inst.instruction |= neon_quad (rs) << 6;
15389 }
15390 else
15391 {
15392 NEON_ENCODE (IMMED, inst);
15393 neon_move_immediate ();
15394 }
15395
15396 neon_dp_fixup (&inst);
15397 }
15398
15399 /* Encode instructions of form:
15400
15401 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15402 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15403
15404 static void
15405 neon_mixed_length (struct neon_type_el et, unsigned size)
15406 {
15407 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15408 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15409 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15410 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15411 inst.instruction |= LOW4 (inst.operands[2].reg);
15412 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15413 inst.instruction |= (et.type == NT_unsigned) << 24;
15414 inst.instruction |= neon_logbits (size) << 20;
15415
15416 neon_dp_fixup (&inst);
15417 }
15418
15419 static void
15420 do_neon_dyadic_long (void)
15421 {
15422 /* FIXME: Type checking for lengthening op. */
15423 struct neon_type_el et = neon_check_type (3, NS_QDD,
15424 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15425 neon_mixed_length (et, et.size);
15426 }
15427
15428 static void
15429 do_neon_abal (void)
15430 {
15431 struct neon_type_el et = neon_check_type (3, NS_QDD,
15432 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15433 neon_mixed_length (et, et.size);
15434 }
15435
15436 static void
15437 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15438 {
15439 if (inst.operands[2].isscalar)
15440 {
15441 struct neon_type_el et = neon_check_type (3, NS_QDS,
15442 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15443 NEON_ENCODE (SCALAR, inst);
15444 neon_mul_mac (et, et.type == NT_unsigned);
15445 }
15446 else
15447 {
15448 struct neon_type_el et = neon_check_type (3, NS_QDD,
15449 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15450 NEON_ENCODE (INTEGER, inst);
15451 neon_mixed_length (et, et.size);
15452 }
15453 }
15454
15455 static void
15456 do_neon_mac_maybe_scalar_long (void)
15457 {
15458 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15459 }
15460
15461 static void
15462 do_neon_dyadic_wide (void)
15463 {
15464 struct neon_type_el et = neon_check_type (3, NS_QQD,
15465 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15466 neon_mixed_length (et, et.size);
15467 }
15468
15469 static void
15470 do_neon_dyadic_narrow (void)
15471 {
15472 struct neon_type_el et = neon_check_type (3, NS_QDD,
15473 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15474 /* Operand sign is unimportant, and the U bit is part of the opcode,
15475 so force the operand type to integer. */
15476 et.type = NT_integer;
15477 neon_mixed_length (et, et.size / 2);
15478 }
15479
15480 static void
15481 do_neon_mul_sat_scalar_long (void)
15482 {
15483 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15484 }
15485
15486 static void
15487 do_neon_vmull (void)
15488 {
15489 if (inst.operands[2].isscalar)
15490 do_neon_mac_maybe_scalar_long ();
15491 else
15492 {
15493 struct neon_type_el et = neon_check_type (3, NS_QDD,
15494 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15495
15496 if (et.type == NT_poly)
15497 NEON_ENCODE (POLY, inst);
15498 else
15499 NEON_ENCODE (INTEGER, inst);
15500
15501 /* For polynomial encoding the U bit must be zero, and the size must
15502 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15503 obviously, as 0b10). */
15504 if (et.size == 64)
15505 {
15506 /* Check we're on the correct architecture. */
15507 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15508 inst.error =
15509 _("Instruction form not available on this architecture.");
15510
15511 et.size = 32;
15512 }
15513
15514 neon_mixed_length (et, et.size);
15515 }
15516 }
15517
15518 static void
15519 do_neon_ext (void)
15520 {
15521 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15522 struct neon_type_el et = neon_check_type (3, rs,
15523 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15524 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15525
15526 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15527 _("shift out of range"));
15528 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15529 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15530 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15531 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15532 inst.instruction |= LOW4 (inst.operands[2].reg);
15533 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15534 inst.instruction |= neon_quad (rs) << 6;
15535 inst.instruction |= imm << 8;
15536
15537 neon_dp_fixup (&inst);
15538 }
15539
15540 static void
15541 do_neon_rev (void)
15542 {
15543 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15544 struct neon_type_el et = neon_check_type (2, rs,
15545 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15546 unsigned op = (inst.instruction >> 7) & 3;
15547 /* N (width of reversed regions) is encoded as part of the bitmask. We
15548 extract it here to check the elements to be reversed are smaller.
15549 Otherwise we'd get a reserved instruction. */
15550 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15551 gas_assert (elsize != 0);
15552 constraint (et.size >= elsize,
15553 _("elements must be smaller than reversal region"));
15554 neon_two_same (neon_quad (rs), 1, et.size);
15555 }
15556
15557 static void
15558 do_neon_dup (void)
15559 {
15560 if (inst.operands[1].isscalar)
15561 {
15562 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15563 struct neon_type_el et = neon_check_type (2, rs,
15564 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15565 unsigned sizebits = et.size >> 3;
15566 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15567 int logsize = neon_logbits (et.size);
15568 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15569
15570 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15571 return;
15572
15573 NEON_ENCODE (SCALAR, inst);
15574 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15575 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15576 inst.instruction |= LOW4 (dm);
15577 inst.instruction |= HI1 (dm) << 5;
15578 inst.instruction |= neon_quad (rs) << 6;
15579 inst.instruction |= x << 17;
15580 inst.instruction |= sizebits << 16;
15581
15582 neon_dp_fixup (&inst);
15583 }
15584 else
15585 {
15586 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15587 struct neon_type_el et = neon_check_type (2, rs,
15588 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15589 /* Duplicate ARM register to lanes of vector. */
15590 NEON_ENCODE (ARMREG, inst);
15591 switch (et.size)
15592 {
15593 case 8: inst.instruction |= 0x400000; break;
15594 case 16: inst.instruction |= 0x000020; break;
15595 case 32: inst.instruction |= 0x000000; break;
15596 default: break;
15597 }
15598 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15599 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15600 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15601 inst.instruction |= neon_quad (rs) << 21;
15602 /* The encoding for this instruction is identical for the ARM and Thumb
15603 variants, except for the condition field. */
15604 do_vfp_cond_or_thumb ();
15605 }
15606 }
15607
15608 /* VMOV has particularly many variations. It can be one of:
15609 0. VMOV<c><q> <Qd>, <Qm>
15610 1. VMOV<c><q> <Dd>, <Dm>
15611 (Register operations, which are VORR with Rm = Rn.)
15612 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15613 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15614 (Immediate loads.)
15615 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15616 (ARM register to scalar.)
15617 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15618 (Two ARM registers to vector.)
15619 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15620 (Scalar to ARM register.)
15621 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15622 (Vector to two ARM registers.)
15623 8. VMOV.F32 <Sd>, <Sm>
15624 9. VMOV.F64 <Dd>, <Dm>
15625 (VFP register moves.)
15626 10. VMOV.F32 <Sd>, #imm
15627 11. VMOV.F64 <Dd>, #imm
15628 (VFP float immediate load.)
15629 12. VMOV <Rd>, <Sm>
15630 (VFP single to ARM reg.)
15631 13. VMOV <Sd>, <Rm>
15632 (ARM reg to VFP single.)
15633 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15634 (Two ARM regs to two VFP singles.)
15635 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15636 (Two VFP singles to two ARM regs.)
15637
15638 These cases can be disambiguated using neon_select_shape, except cases 1/9
15639 and 3/11 which depend on the operand type too.
15640
15641 All the encoded bits are hardcoded by this function.
15642
15643 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15644 Cases 5, 7 may be used with VFPv2 and above.
15645
15646 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15647 can specify a type where it doesn't make sense to, and is ignored). */
15648
15649 static void
15650 do_neon_mov (void)
15651 {
15652 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15653 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15654 NS_NULL);
15655 struct neon_type_el et;
15656 const char *ldconst = 0;
15657
15658 switch (rs)
15659 {
15660 case NS_DD: /* case 1/9. */
15661 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15662 /* It is not an error here if no type is given. */
15663 inst.error = NULL;
15664 if (et.type == NT_float && et.size == 64)
15665 {
15666 do_vfp_nsyn_opcode ("fcpyd");
15667 break;
15668 }
15669 /* fall through. */
15670
15671 case NS_QQ: /* case 0/1. */
15672 {
15673 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15674 return;
15675 /* The architecture manual I have doesn't explicitly state which
15676 value the U bit should have for register->register moves, but
15677 the equivalent VORR instruction has U = 0, so do that. */
15678 inst.instruction = 0x0200110;
15679 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15680 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15681 inst.instruction |= LOW4 (inst.operands[1].reg);
15682 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15683 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15684 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15685 inst.instruction |= neon_quad (rs) << 6;
15686
15687 neon_dp_fixup (&inst);
15688 }
15689 break;
15690
15691 case NS_DI: /* case 3/11. */
15692 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15693 inst.error = NULL;
15694 if (et.type == NT_float && et.size == 64)
15695 {
15696 /* case 11 (fconstd). */
15697 ldconst = "fconstd";
15698 goto encode_fconstd;
15699 }
15700 /* fall through. */
15701
15702 case NS_QI: /* case 2/3. */
15703 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15704 return;
15705 inst.instruction = 0x0800010;
15706 neon_move_immediate ();
15707 neon_dp_fixup (&inst);
15708 break;
15709
15710 case NS_SR: /* case 4. */
15711 {
15712 unsigned bcdebits = 0;
15713 int logsize;
15714 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15715 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15716
15717 /* .<size> is optional here, defaulting to .32. */
15718 if (inst.vectype.elems == 0
15719 && inst.operands[0].vectype.type == NT_invtype
15720 && inst.operands[1].vectype.type == NT_invtype)
15721 {
15722 inst.vectype.el[0].type = NT_untyped;
15723 inst.vectype.el[0].size = 32;
15724 inst.vectype.elems = 1;
15725 }
15726
15727 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15728 logsize = neon_logbits (et.size);
15729
15730 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15731 _(BAD_FPU));
15732 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15733 && et.size != 32, _(BAD_FPU));
15734 constraint (et.type == NT_invtype, _("bad type for scalar"));
15735 constraint (x >= 64 / et.size, _("scalar index out of range"));
15736
15737 switch (et.size)
15738 {
15739 case 8: bcdebits = 0x8; break;
15740 case 16: bcdebits = 0x1; break;
15741 case 32: bcdebits = 0x0; break;
15742 default: ;
15743 }
15744
15745 bcdebits |= x << logsize;
15746
15747 inst.instruction = 0xe000b10;
15748 do_vfp_cond_or_thumb ();
15749 inst.instruction |= LOW4 (dn) << 16;
15750 inst.instruction |= HI1 (dn) << 7;
15751 inst.instruction |= inst.operands[1].reg << 12;
15752 inst.instruction |= (bcdebits & 3) << 5;
15753 inst.instruction |= (bcdebits >> 2) << 21;
15754 }
15755 break;
15756
15757 case NS_DRR: /* case 5 (fmdrr). */
15758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15759 _(BAD_FPU));
15760
15761 inst.instruction = 0xc400b10;
15762 do_vfp_cond_or_thumb ();
15763 inst.instruction |= LOW4 (inst.operands[0].reg);
15764 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15765 inst.instruction |= inst.operands[1].reg << 12;
15766 inst.instruction |= inst.operands[2].reg << 16;
15767 break;
15768
15769 case NS_RS: /* case 6. */
15770 {
15771 unsigned logsize;
15772 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15773 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15774 unsigned abcdebits = 0;
15775
15776 /* .<dt> is optional here, defaulting to .32. */
15777 if (inst.vectype.elems == 0
15778 && inst.operands[0].vectype.type == NT_invtype
15779 && inst.operands[1].vectype.type == NT_invtype)
15780 {
15781 inst.vectype.el[0].type = NT_untyped;
15782 inst.vectype.el[0].size = 32;
15783 inst.vectype.elems = 1;
15784 }
15785
15786 et = neon_check_type (2, NS_NULL,
15787 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15788 logsize = neon_logbits (et.size);
15789
15790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15791 _(BAD_FPU));
15792 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15793 && et.size != 32, _(BAD_FPU));
15794 constraint (et.type == NT_invtype, _("bad type for scalar"));
15795 constraint (x >= 64 / et.size, _("scalar index out of range"));
15796
15797 switch (et.size)
15798 {
15799 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15800 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15801 case 32: abcdebits = 0x00; break;
15802 default: ;
15803 }
15804
15805 abcdebits |= x << logsize;
15806 inst.instruction = 0xe100b10;
15807 do_vfp_cond_or_thumb ();
15808 inst.instruction |= LOW4 (dn) << 16;
15809 inst.instruction |= HI1 (dn) << 7;
15810 inst.instruction |= inst.operands[0].reg << 12;
15811 inst.instruction |= (abcdebits & 3) << 5;
15812 inst.instruction |= (abcdebits >> 2) << 21;
15813 }
15814 break;
15815
15816 case NS_RRD: /* case 7 (fmrrd). */
15817 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15818 _(BAD_FPU));
15819
15820 inst.instruction = 0xc500b10;
15821 do_vfp_cond_or_thumb ();
15822 inst.instruction |= inst.operands[0].reg << 12;
15823 inst.instruction |= inst.operands[1].reg << 16;
15824 inst.instruction |= LOW4 (inst.operands[2].reg);
15825 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15826 break;
15827
15828 case NS_FF: /* case 8 (fcpys). */
15829 do_vfp_nsyn_opcode ("fcpys");
15830 break;
15831
15832 case NS_FI: /* case 10 (fconsts). */
15833 ldconst = "fconsts";
15834 encode_fconstd:
15835 if (is_quarter_float (inst.operands[1].imm))
15836 {
15837 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15838 do_vfp_nsyn_opcode (ldconst);
15839 }
15840 else
15841 first_error (_("immediate out of range"));
15842 break;
15843
15844 case NS_RF: /* case 12 (fmrs). */
15845 do_vfp_nsyn_opcode ("fmrs");
15846 break;
15847
15848 case NS_FR: /* case 13 (fmsr). */
15849 do_vfp_nsyn_opcode ("fmsr");
15850 break;
15851
15852 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15853 (one of which is a list), but we have parsed four. Do some fiddling to
15854 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15855 expect. */
15856 case NS_RRFF: /* case 14 (fmrrs). */
15857 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15858 _("VFP registers must be adjacent"));
15859 inst.operands[2].imm = 2;
15860 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15861 do_vfp_nsyn_opcode ("fmrrs");
15862 break;
15863
15864 case NS_FFRR: /* case 15 (fmsrr). */
15865 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15866 _("VFP registers must be adjacent"));
15867 inst.operands[1] = inst.operands[2];
15868 inst.operands[2] = inst.operands[3];
15869 inst.operands[0].imm = 2;
15870 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15871 do_vfp_nsyn_opcode ("fmsrr");
15872 break;
15873
15874 case NS_NULL:
15875 /* neon_select_shape has determined that the instruction
15876 shape is wrong and has already set the error message. */
15877 break;
15878
15879 default:
15880 abort ();
15881 }
15882 }
15883
15884 static void
15885 do_neon_rshift_round_imm (void)
15886 {
15887 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15888 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15889 int imm = inst.operands[2].imm;
15890
15891 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15892 if (imm == 0)
15893 {
15894 inst.operands[2].present = 0;
15895 do_neon_mov ();
15896 return;
15897 }
15898
15899 constraint (imm < 1 || (unsigned)imm > et.size,
15900 _("immediate out of range for shift"));
15901 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15902 et.size - imm);
15903 }
15904
15905 static void
15906 do_neon_movl (void)
15907 {
15908 struct neon_type_el et = neon_check_type (2, NS_QD,
15909 N_EQK | N_DBL, N_SU_32 | N_KEY);
15910 unsigned sizebits = et.size >> 3;
15911 inst.instruction |= sizebits << 19;
15912 neon_two_same (0, et.type == NT_unsigned, -1);
15913 }
15914
15915 static void
15916 do_neon_trn (void)
15917 {
15918 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15919 struct neon_type_el et = neon_check_type (2, rs,
15920 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15921 NEON_ENCODE (INTEGER, inst);
15922 neon_two_same (neon_quad (rs), 1, et.size);
15923 }
15924
15925 static void
15926 do_neon_zip_uzp (void)
15927 {
15928 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15929 struct neon_type_el et = neon_check_type (2, rs,
15930 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15931 if (rs == NS_DD && et.size == 32)
15932 {
15933 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15934 inst.instruction = N_MNEM_vtrn;
15935 do_neon_trn ();
15936 return;
15937 }
15938 neon_two_same (neon_quad (rs), 1, et.size);
15939 }
15940
15941 static void
15942 do_neon_sat_abs_neg (void)
15943 {
15944 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15945 struct neon_type_el et = neon_check_type (2, rs,
15946 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15947 neon_two_same (neon_quad (rs), 1, et.size);
15948 }
15949
15950 static void
15951 do_neon_pair_long (void)
15952 {
15953 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15954 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15955 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15956 inst.instruction |= (et.type == NT_unsigned) << 7;
15957 neon_two_same (neon_quad (rs), 1, et.size);
15958 }
15959
15960 static void
15961 do_neon_recip_est (void)
15962 {
15963 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15964 struct neon_type_el et = neon_check_type (2, rs,
15965 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15966 inst.instruction |= (et.type == NT_float) << 8;
15967 neon_two_same (neon_quad (rs), 1, et.size);
15968 }
15969
15970 static void
15971 do_neon_cls (void)
15972 {
15973 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15974 struct neon_type_el et = neon_check_type (2, rs,
15975 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15976 neon_two_same (neon_quad (rs), 1, et.size);
15977 }
15978
15979 static void
15980 do_neon_clz (void)
15981 {
15982 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15983 struct neon_type_el et = neon_check_type (2, rs,
15984 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15985 neon_two_same (neon_quad (rs), 1, et.size);
15986 }
15987
15988 static void
15989 do_neon_cnt (void)
15990 {
15991 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15992 struct neon_type_el et = neon_check_type (2, rs,
15993 N_EQK | N_INT, N_8 | N_KEY);
15994 neon_two_same (neon_quad (rs), 1, et.size);
15995 }
15996
15997 static void
15998 do_neon_swp (void)
15999 {
16000 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16001 neon_two_same (neon_quad (rs), 1, -1);
16002 }
16003
16004 static void
16005 do_neon_tbl_tbx (void)
16006 {
16007 unsigned listlenbits;
16008 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16009
16010 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16011 {
16012 first_error (_("bad list length for table lookup"));
16013 return;
16014 }
16015
16016 listlenbits = inst.operands[1].imm - 1;
16017 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16018 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16019 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16020 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16021 inst.instruction |= LOW4 (inst.operands[2].reg);
16022 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16023 inst.instruction |= listlenbits << 8;
16024
16025 neon_dp_fixup (&inst);
16026 }
16027
16028 static void
16029 do_neon_ldm_stm (void)
16030 {
16031 /* P, U and L bits are part of bitmask. */
16032 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16033 unsigned offsetbits = inst.operands[1].imm * 2;
16034
16035 if (inst.operands[1].issingle)
16036 {
16037 do_vfp_nsyn_ldm_stm (is_dbmode);
16038 return;
16039 }
16040
16041 constraint (is_dbmode && !inst.operands[0].writeback,
16042 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16043
16044 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16045 _("register list must contain at least 1 and at most 16 "
16046 "registers"));
16047
16048 inst.instruction |= inst.operands[0].reg << 16;
16049 inst.instruction |= inst.operands[0].writeback << 21;
16050 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16051 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16052
16053 inst.instruction |= offsetbits;
16054
16055 do_vfp_cond_or_thumb ();
16056 }
16057
16058 static void
16059 do_neon_ldr_str (void)
16060 {
16061 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16062
16063 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16064 And is UNPREDICTABLE in thumb mode. */
16065 if (!is_ldr
16066 && inst.operands[1].reg == REG_PC
16067 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16068 {
16069 if (thumb_mode)
16070 inst.error = _("Use of PC here is UNPREDICTABLE");
16071 else if (warn_on_deprecated)
16072 as_warn (_("Use of PC here is deprecated"));
16073 }
16074
16075 if (inst.operands[0].issingle)
16076 {
16077 if (is_ldr)
16078 do_vfp_nsyn_opcode ("flds");
16079 else
16080 do_vfp_nsyn_opcode ("fsts");
16081 }
16082 else
16083 {
16084 if (is_ldr)
16085 do_vfp_nsyn_opcode ("fldd");
16086 else
16087 do_vfp_nsyn_opcode ("fstd");
16088 }
16089 }
16090
16091 /* "interleave" version also handles non-interleaving register VLD1/VST1
16092 instructions. */
16093
16094 static void
16095 do_neon_ld_st_interleave (void)
16096 {
16097 struct neon_type_el et = neon_check_type (1, NS_NULL,
16098 N_8 | N_16 | N_32 | N_64);
16099 unsigned alignbits = 0;
16100 unsigned idx;
16101 /* The bits in this table go:
16102 0: register stride of one (0) or two (1)
16103 1,2: register list length, minus one (1, 2, 3, 4).
16104 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16105 We use -1 for invalid entries. */
16106 const int typetable[] =
16107 {
16108 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16109 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16110 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16111 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16112 };
16113 int typebits;
16114
16115 if (et.type == NT_invtype)
16116 return;
16117
16118 if (inst.operands[1].immisalign)
16119 switch (inst.operands[1].imm >> 8)
16120 {
16121 case 64: alignbits = 1; break;
16122 case 128:
16123 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16124 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16125 goto bad_alignment;
16126 alignbits = 2;
16127 break;
16128 case 256:
16129 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16130 goto bad_alignment;
16131 alignbits = 3;
16132 break;
16133 default:
16134 bad_alignment:
16135 first_error (_("bad alignment"));
16136 return;
16137 }
16138
16139 inst.instruction |= alignbits << 4;
16140 inst.instruction |= neon_logbits (et.size) << 6;
16141
16142 /* Bits [4:6] of the immediate in a list specifier encode register stride
16143 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16144 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16145 up the right value for "type" in a table based on this value and the given
16146 list style, then stick it back. */
16147 idx = ((inst.operands[0].imm >> 4) & 7)
16148 | (((inst.instruction >> 8) & 3) << 3);
16149
16150 typebits = typetable[idx];
16151
16152 constraint (typebits == -1, _("bad list type for instruction"));
16153 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16154 _("bad element type for instruction"));
16155
16156 inst.instruction &= ~0xf00;
16157 inst.instruction |= typebits << 8;
16158 }
16159
16160 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16161 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16162 otherwise. The variable arguments are a list of pairs of legal (size, align)
16163 values, terminated with -1. */
16164
16165 static int
16166 neon_alignment_bit (int size, int align, int *do_align, ...)
16167 {
16168 va_list ap;
16169 int result = FAIL, thissize, thisalign;
16170
16171 if (!inst.operands[1].immisalign)
16172 {
16173 *do_align = 0;
16174 return SUCCESS;
16175 }
16176
16177 va_start (ap, do_align);
16178
16179 do
16180 {
16181 thissize = va_arg (ap, int);
16182 if (thissize == -1)
16183 break;
16184 thisalign = va_arg (ap, int);
16185
16186 if (size == thissize && align == thisalign)
16187 result = SUCCESS;
16188 }
16189 while (result != SUCCESS);
16190
16191 va_end (ap);
16192
16193 if (result == SUCCESS)
16194 *do_align = 1;
16195 else
16196 first_error (_("unsupported alignment for instruction"));
16197
16198 return result;
16199 }
16200
16201 static void
16202 do_neon_ld_st_lane (void)
16203 {
16204 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16205 int align_good, do_align = 0;
16206 int logsize = neon_logbits (et.size);
16207 int align = inst.operands[1].imm >> 8;
16208 int n = (inst.instruction >> 8) & 3;
16209 int max_el = 64 / et.size;
16210
16211 if (et.type == NT_invtype)
16212 return;
16213
16214 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16215 _("bad list length"));
16216 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16217 _("scalar index out of range"));
16218 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16219 && et.size == 8,
16220 _("stride of 2 unavailable when element size is 8"));
16221
16222 switch (n)
16223 {
16224 case 0: /* VLD1 / VST1. */
16225 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16226 32, 32, -1);
16227 if (align_good == FAIL)
16228 return;
16229 if (do_align)
16230 {
16231 unsigned alignbits = 0;
16232 switch (et.size)
16233 {
16234 case 16: alignbits = 0x1; break;
16235 case 32: alignbits = 0x3; break;
16236 default: ;
16237 }
16238 inst.instruction |= alignbits << 4;
16239 }
16240 break;
16241
16242 case 1: /* VLD2 / VST2. */
16243 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16244 32, 64, -1);
16245 if (align_good == FAIL)
16246 return;
16247 if (do_align)
16248 inst.instruction |= 1 << 4;
16249 break;
16250
16251 case 2: /* VLD3 / VST3. */
16252 constraint (inst.operands[1].immisalign,
16253 _("can't use alignment with this instruction"));
16254 break;
16255
16256 case 3: /* VLD4 / VST4. */
16257 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16258 16, 64, 32, 64, 32, 128, -1);
16259 if (align_good == FAIL)
16260 return;
16261 if (do_align)
16262 {
16263 unsigned alignbits = 0;
16264 switch (et.size)
16265 {
16266 case 8: alignbits = 0x1; break;
16267 case 16: alignbits = 0x1; break;
16268 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16269 default: ;
16270 }
16271 inst.instruction |= alignbits << 4;
16272 }
16273 break;
16274
16275 default: ;
16276 }
16277
16278 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16279 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16280 inst.instruction |= 1 << (4 + logsize);
16281
16282 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16283 inst.instruction |= logsize << 10;
16284 }
16285
16286 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16287
16288 static void
16289 do_neon_ld_dup (void)
16290 {
16291 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16292 int align_good, do_align = 0;
16293
16294 if (et.type == NT_invtype)
16295 return;
16296
16297 switch ((inst.instruction >> 8) & 3)
16298 {
16299 case 0: /* VLD1. */
16300 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16301 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16302 &do_align, 16, 16, 32, 32, -1);
16303 if (align_good == FAIL)
16304 return;
16305 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16306 {
16307 case 1: break;
16308 case 2: inst.instruction |= 1 << 5; break;
16309 default: first_error (_("bad list length")); return;
16310 }
16311 inst.instruction |= neon_logbits (et.size) << 6;
16312 break;
16313
16314 case 1: /* VLD2. */
16315 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16316 &do_align, 8, 16, 16, 32, 32, 64, -1);
16317 if (align_good == FAIL)
16318 return;
16319 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16320 _("bad list length"));
16321 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16322 inst.instruction |= 1 << 5;
16323 inst.instruction |= neon_logbits (et.size) << 6;
16324 break;
16325
16326 case 2: /* VLD3. */
16327 constraint (inst.operands[1].immisalign,
16328 _("can't use alignment with this instruction"));
16329 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16330 _("bad list length"));
16331 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16332 inst.instruction |= 1 << 5;
16333 inst.instruction |= neon_logbits (et.size) << 6;
16334 break;
16335
16336 case 3: /* VLD4. */
16337 {
16338 int align = inst.operands[1].imm >> 8;
16339 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16340 16, 64, 32, 64, 32, 128, -1);
16341 if (align_good == FAIL)
16342 return;
16343 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16344 _("bad list length"));
16345 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16346 inst.instruction |= 1 << 5;
16347 if (et.size == 32 && align == 128)
16348 inst.instruction |= 0x3 << 6;
16349 else
16350 inst.instruction |= neon_logbits (et.size) << 6;
16351 }
16352 break;
16353
16354 default: ;
16355 }
16356
16357 inst.instruction |= do_align << 4;
16358 }
16359
16360 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16361 apart from bits [11:4]. */
16362
16363 static void
16364 do_neon_ldx_stx (void)
16365 {
16366 if (inst.operands[1].isreg)
16367 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16368
16369 switch (NEON_LANE (inst.operands[0].imm))
16370 {
16371 case NEON_INTERLEAVE_LANES:
16372 NEON_ENCODE (INTERLV, inst);
16373 do_neon_ld_st_interleave ();
16374 break;
16375
16376 case NEON_ALL_LANES:
16377 NEON_ENCODE (DUP, inst);
16378 if (inst.instruction == N_INV)
16379 {
16380 first_error ("only loads support such operands");
16381 break;
16382 }
16383 do_neon_ld_dup ();
16384 break;
16385
16386 default:
16387 NEON_ENCODE (LANE, inst);
16388 do_neon_ld_st_lane ();
16389 }
16390
16391 /* L bit comes from bit mask. */
16392 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16393 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16394 inst.instruction |= inst.operands[1].reg << 16;
16395
16396 if (inst.operands[1].postind)
16397 {
16398 int postreg = inst.operands[1].imm & 0xf;
16399 constraint (!inst.operands[1].immisreg,
16400 _("post-index must be a register"));
16401 constraint (postreg == 0xd || postreg == 0xf,
16402 _("bad register for post-index"));
16403 inst.instruction |= postreg;
16404 }
16405 else
16406 {
16407 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16408 constraint (inst.reloc.exp.X_op != O_constant
16409 || inst.reloc.exp.X_add_number != 0,
16410 BAD_ADDR_MODE);
16411
16412 if (inst.operands[1].writeback)
16413 {
16414 inst.instruction |= 0xd;
16415 }
16416 else
16417 inst.instruction |= 0xf;
16418 }
16419
16420 if (thumb_mode)
16421 inst.instruction |= 0xf9000000;
16422 else
16423 inst.instruction |= 0xf4000000;
16424 }
16425
16426 /* FP v8. */
16427 static void
16428 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16429 {
16430 NEON_ENCODE (FPV8, inst);
16431
16432 if (rs == NS_FFF)
16433 do_vfp_sp_dyadic ();
16434 else
16435 do_vfp_dp_rd_rn_rm ();
16436
16437 if (rs == NS_DDD)
16438 inst.instruction |= 0x100;
16439
16440 inst.instruction |= 0xf0000000;
16441 }
16442
16443 static void
16444 do_vsel (void)
16445 {
16446 set_it_insn_type (OUTSIDE_IT_INSN);
16447
16448 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16449 first_error (_("invalid instruction shape"));
16450 }
16451
16452 static void
16453 do_vmaxnm (void)
16454 {
16455 set_it_insn_type (OUTSIDE_IT_INSN);
16456
16457 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16458 return;
16459
16460 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16461 return;
16462
16463 neon_dyadic_misc (NT_untyped, N_F32, 0);
16464 }
16465
16466 static void
16467 do_vrint_1 (enum neon_cvt_mode mode)
16468 {
16469 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16470 struct neon_type_el et;
16471
16472 if (rs == NS_NULL)
16473 return;
16474
16475 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16476 if (et.type != NT_invtype)
16477 {
16478 /* VFP encodings. */
16479 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16480 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16481 set_it_insn_type (OUTSIDE_IT_INSN);
16482
16483 NEON_ENCODE (FPV8, inst);
16484 if (rs == NS_FF)
16485 do_vfp_sp_monadic ();
16486 else
16487 do_vfp_dp_rd_rm ();
16488
16489 switch (mode)
16490 {
16491 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16492 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16493 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16494 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16495 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16496 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16497 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16498 default: abort ();
16499 }
16500
16501 inst.instruction |= (rs == NS_DD) << 8;
16502 do_vfp_cond_or_thumb ();
16503 }
16504 else
16505 {
16506 /* Neon encodings (or something broken...). */
16507 inst.error = NULL;
16508 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16509
16510 if (et.type == NT_invtype)
16511 return;
16512
16513 set_it_insn_type (OUTSIDE_IT_INSN);
16514 NEON_ENCODE (FLOAT, inst);
16515
16516 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16517 return;
16518
16519 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16520 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16521 inst.instruction |= LOW4 (inst.operands[1].reg);
16522 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16523 inst.instruction |= neon_quad (rs) << 6;
16524 switch (mode)
16525 {
16526 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16527 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16528 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16529 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16530 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16531 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16532 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16533 default: abort ();
16534 }
16535
16536 if (thumb_mode)
16537 inst.instruction |= 0xfc000000;
16538 else
16539 inst.instruction |= 0xf0000000;
16540 }
16541 }
16542
16543 static void
16544 do_vrintx (void)
16545 {
16546 do_vrint_1 (neon_cvt_mode_x);
16547 }
16548
16549 static void
16550 do_vrintz (void)
16551 {
16552 do_vrint_1 (neon_cvt_mode_z);
16553 }
16554
16555 static void
16556 do_vrintr (void)
16557 {
16558 do_vrint_1 (neon_cvt_mode_r);
16559 }
16560
16561 static void
16562 do_vrinta (void)
16563 {
16564 do_vrint_1 (neon_cvt_mode_a);
16565 }
16566
16567 static void
16568 do_vrintn (void)
16569 {
16570 do_vrint_1 (neon_cvt_mode_n);
16571 }
16572
16573 static void
16574 do_vrintp (void)
16575 {
16576 do_vrint_1 (neon_cvt_mode_p);
16577 }
16578
16579 static void
16580 do_vrintm (void)
16581 {
16582 do_vrint_1 (neon_cvt_mode_m);
16583 }
16584
16585 /* Crypto v1 instructions. */
16586 static void
16587 do_crypto_2op_1 (unsigned elttype, int op)
16588 {
16589 set_it_insn_type (OUTSIDE_IT_INSN);
16590
16591 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16592 == NT_invtype)
16593 return;
16594
16595 inst.error = NULL;
16596
16597 NEON_ENCODE (INTEGER, inst);
16598 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16599 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16600 inst.instruction |= LOW4 (inst.operands[1].reg);
16601 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16602 if (op != -1)
16603 inst.instruction |= op << 6;
16604
16605 if (thumb_mode)
16606 inst.instruction |= 0xfc000000;
16607 else
16608 inst.instruction |= 0xf0000000;
16609 }
16610
16611 static void
16612 do_crypto_3op_1 (int u, int op)
16613 {
16614 set_it_insn_type (OUTSIDE_IT_INSN);
16615
16616 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16617 N_32 | N_UNT | N_KEY).type == NT_invtype)
16618 return;
16619
16620 inst.error = NULL;
16621
16622 NEON_ENCODE (INTEGER, inst);
16623 neon_three_same (1, u, 8 << op);
16624 }
16625
16626 static void
16627 do_aese (void)
16628 {
16629 do_crypto_2op_1 (N_8, 0);
16630 }
16631
16632 static void
16633 do_aesd (void)
16634 {
16635 do_crypto_2op_1 (N_8, 1);
16636 }
16637
16638 static void
16639 do_aesmc (void)
16640 {
16641 do_crypto_2op_1 (N_8, 2);
16642 }
16643
16644 static void
16645 do_aesimc (void)
16646 {
16647 do_crypto_2op_1 (N_8, 3);
16648 }
16649
16650 static void
16651 do_sha1c (void)
16652 {
16653 do_crypto_3op_1 (0, 0);
16654 }
16655
16656 static void
16657 do_sha1p (void)
16658 {
16659 do_crypto_3op_1 (0, 1);
16660 }
16661
16662 static void
16663 do_sha1m (void)
16664 {
16665 do_crypto_3op_1 (0, 2);
16666 }
16667
16668 static void
16669 do_sha1su0 (void)
16670 {
16671 do_crypto_3op_1 (0, 3);
16672 }
16673
16674 static void
16675 do_sha256h (void)
16676 {
16677 do_crypto_3op_1 (1, 0);
16678 }
16679
16680 static void
16681 do_sha256h2 (void)
16682 {
16683 do_crypto_3op_1 (1, 1);
16684 }
16685
16686 static void
16687 do_sha256su1 (void)
16688 {
16689 do_crypto_3op_1 (1, 2);
16690 }
16691
16692 static void
16693 do_sha1h (void)
16694 {
16695 do_crypto_2op_1 (N_32, -1);
16696 }
16697
16698 static void
16699 do_sha1su1 (void)
16700 {
16701 do_crypto_2op_1 (N_32, 0);
16702 }
16703
16704 static void
16705 do_sha256su0 (void)
16706 {
16707 do_crypto_2op_1 (N_32, 1);
16708 }
16709
16710 static void
16711 do_crc32_1 (unsigned int poly, unsigned int sz)
16712 {
16713 unsigned int Rd = inst.operands[0].reg;
16714 unsigned int Rn = inst.operands[1].reg;
16715 unsigned int Rm = inst.operands[2].reg;
16716
16717 set_it_insn_type (OUTSIDE_IT_INSN);
16718 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16719 inst.instruction |= LOW4 (Rn) << 16;
16720 inst.instruction |= LOW4 (Rm);
16721 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16722 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16723
16724 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16725 as_warn (UNPRED_REG ("r15"));
16726 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16727 as_warn (UNPRED_REG ("r13"));
16728 }
16729
16730 static void
16731 do_crc32b (void)
16732 {
16733 do_crc32_1 (0, 0);
16734 }
16735
16736 static void
16737 do_crc32h (void)
16738 {
16739 do_crc32_1 (0, 1);
16740 }
16741
16742 static void
16743 do_crc32w (void)
16744 {
16745 do_crc32_1 (0, 2);
16746 }
16747
16748 static void
16749 do_crc32cb (void)
16750 {
16751 do_crc32_1 (1, 0);
16752 }
16753
16754 static void
16755 do_crc32ch (void)
16756 {
16757 do_crc32_1 (1, 1);
16758 }
16759
16760 static void
16761 do_crc32cw (void)
16762 {
16763 do_crc32_1 (1, 2);
16764 }
16765
16766 \f
16767 /* Overall per-instruction processing. */
16768
16769 /* We need to be able to fix up arbitrary expressions in some statements.
16770 This is so that we can handle symbols that are an arbitrary distance from
16771 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16772 which returns part of an address in a form which will be valid for
16773 a data instruction. We do this by pushing the expression into a symbol
16774 in the expr_section, and creating a fix for that. */
16775
16776 static void
16777 fix_new_arm (fragS * frag,
16778 int where,
16779 short int size,
16780 expressionS * exp,
16781 int pc_rel,
16782 int reloc)
16783 {
16784 fixS * new_fix;
16785
16786 switch (exp->X_op)
16787 {
16788 case O_constant:
16789 if (pc_rel)
16790 {
16791 /* Create an absolute valued symbol, so we have something to
16792 refer to in the object file. Unfortunately for us, gas's
16793 generic expression parsing will already have folded out
16794 any use of .set foo/.type foo %function that may have
16795 been used to set type information of the target location,
16796 that's being specified symbolically. We have to presume
16797 the user knows what they are doing. */
16798 char name[16 + 8];
16799 symbolS *symbol;
16800
16801 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16802
16803 symbol = symbol_find_or_make (name);
16804 S_SET_SEGMENT (symbol, absolute_section);
16805 symbol_set_frag (symbol, &zero_address_frag);
16806 S_SET_VALUE (symbol, exp->X_add_number);
16807 exp->X_op = O_symbol;
16808 exp->X_add_symbol = symbol;
16809 exp->X_add_number = 0;
16810 }
16811 /* FALLTHROUGH */
16812 case O_symbol:
16813 case O_add:
16814 case O_subtract:
16815 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16816 (enum bfd_reloc_code_real) reloc);
16817 break;
16818
16819 default:
16820 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16821 pc_rel, (enum bfd_reloc_code_real) reloc);
16822 break;
16823 }
16824
16825 /* Mark whether the fix is to a THUMB instruction, or an ARM
16826 instruction. */
16827 new_fix->tc_fix_data = thumb_mode;
16828 }
16829
16830 /* Create a frg for an instruction requiring relaxation. */
16831 static void
16832 output_relax_insn (void)
16833 {
16834 char * to;
16835 symbolS *sym;
16836 int offset;
16837
16838 /* The size of the instruction is unknown, so tie the debug info to the
16839 start of the instruction. */
16840 dwarf2_emit_insn (0);
16841
16842 switch (inst.reloc.exp.X_op)
16843 {
16844 case O_symbol:
16845 sym = inst.reloc.exp.X_add_symbol;
16846 offset = inst.reloc.exp.X_add_number;
16847 break;
16848 case O_constant:
16849 sym = NULL;
16850 offset = inst.reloc.exp.X_add_number;
16851 break;
16852 default:
16853 sym = make_expr_symbol (&inst.reloc.exp);
16854 offset = 0;
16855 break;
16856 }
16857 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16858 inst.relax, sym, offset, NULL/*offset, opcode*/);
16859 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16860 }
16861
16862 /* Write a 32-bit thumb instruction to buf. */
16863 static void
16864 put_thumb32_insn (char * buf, unsigned long insn)
16865 {
16866 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16867 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16868 }
16869
16870 static void
16871 output_inst (const char * str)
16872 {
16873 char * to = NULL;
16874
16875 if (inst.error)
16876 {
16877 as_bad ("%s -- `%s'", inst.error, str);
16878 return;
16879 }
16880 if (inst.relax)
16881 {
16882 output_relax_insn ();
16883 return;
16884 }
16885 if (inst.size == 0)
16886 return;
16887
16888 to = frag_more (inst.size);
16889 /* PR 9814: Record the thumb mode into the current frag so that we know
16890 what type of NOP padding to use, if necessary. We override any previous
16891 setting so that if the mode has changed then the NOPS that we use will
16892 match the encoding of the last instruction in the frag. */
16893 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16894
16895 if (thumb_mode && (inst.size > THUMB_SIZE))
16896 {
16897 gas_assert (inst.size == (2 * THUMB_SIZE));
16898 put_thumb32_insn (to, inst.instruction);
16899 }
16900 else if (inst.size > INSN_SIZE)
16901 {
16902 gas_assert (inst.size == (2 * INSN_SIZE));
16903 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16904 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16905 }
16906 else
16907 md_number_to_chars (to, inst.instruction, inst.size);
16908
16909 if (inst.reloc.type != BFD_RELOC_UNUSED)
16910 fix_new_arm (frag_now, to - frag_now->fr_literal,
16911 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16912 inst.reloc.type);
16913
16914 dwarf2_emit_insn (inst.size);
16915 }
16916
16917 static char *
16918 output_it_inst (int cond, int mask, char * to)
16919 {
16920 unsigned long instruction = 0xbf00;
16921
16922 mask &= 0xf;
16923 instruction |= mask;
16924 instruction |= cond << 4;
16925
16926 if (to == NULL)
16927 {
16928 to = frag_more (2);
16929 #ifdef OBJ_ELF
16930 dwarf2_emit_insn (2);
16931 #endif
16932 }
16933
16934 md_number_to_chars (to, instruction, 2);
16935
16936 return to;
16937 }
16938
16939 /* Tag values used in struct asm_opcode's tag field. */
16940 enum opcode_tag
16941 {
16942 OT_unconditional, /* Instruction cannot be conditionalized.
16943 The ARM condition field is still 0xE. */
16944 OT_unconditionalF, /* Instruction cannot be conditionalized
16945 and carries 0xF in its ARM condition field. */
16946 OT_csuffix, /* Instruction takes a conditional suffix. */
16947 OT_csuffixF, /* Some forms of the instruction take a conditional
16948 suffix, others place 0xF where the condition field
16949 would be. */
16950 OT_cinfix3, /* Instruction takes a conditional infix,
16951 beginning at character index 3. (In
16952 unified mode, it becomes a suffix.) */
16953 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16954 tsts, cmps, cmns, and teqs. */
16955 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16956 character index 3, even in unified mode. Used for
16957 legacy instructions where suffix and infix forms
16958 may be ambiguous. */
16959 OT_csuf_or_in3, /* Instruction takes either a conditional
16960 suffix or an infix at character index 3. */
16961 OT_odd_infix_unc, /* This is the unconditional variant of an
16962 instruction that takes a conditional infix
16963 at an unusual position. In unified mode,
16964 this variant will accept a suffix. */
16965 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16966 are the conditional variants of instructions that
16967 take conditional infixes in unusual positions.
16968 The infix appears at character index
16969 (tag - OT_odd_infix_0). These are not accepted
16970 in unified mode. */
16971 };
16972
16973 /* Subroutine of md_assemble, responsible for looking up the primary
16974 opcode from the mnemonic the user wrote. STR points to the
16975 beginning of the mnemonic.
16976
16977 This is not simply a hash table lookup, because of conditional
16978 variants. Most instructions have conditional variants, which are
16979 expressed with a _conditional affix_ to the mnemonic. If we were
16980 to encode each conditional variant as a literal string in the opcode
16981 table, it would have approximately 20,000 entries.
16982
16983 Most mnemonics take this affix as a suffix, and in unified syntax,
16984 'most' is upgraded to 'all'. However, in the divided syntax, some
16985 instructions take the affix as an infix, notably the s-variants of
16986 the arithmetic instructions. Of those instructions, all but six
16987 have the infix appear after the third character of the mnemonic.
16988
16989 Accordingly, the algorithm for looking up primary opcodes given
16990 an identifier is:
16991
16992 1. Look up the identifier in the opcode table.
16993 If we find a match, go to step U.
16994
16995 2. Look up the last two characters of the identifier in the
16996 conditions table. If we find a match, look up the first N-2
16997 characters of the identifier in the opcode table. If we
16998 find a match, go to step CE.
16999
17000 3. Look up the fourth and fifth characters of the identifier in
17001 the conditions table. If we find a match, extract those
17002 characters from the identifier, and look up the remaining
17003 characters in the opcode table. If we find a match, go
17004 to step CM.
17005
17006 4. Fail.
17007
17008 U. Examine the tag field of the opcode structure, in case this is
17009 one of the six instructions with its conditional infix in an
17010 unusual place. If it is, the tag tells us where to find the
17011 infix; look it up in the conditions table and set inst.cond
17012 accordingly. Otherwise, this is an unconditional instruction.
17013 Again set inst.cond accordingly. Return the opcode structure.
17014
17015 CE. Examine the tag field to make sure this is an instruction that
17016 should receive a conditional suffix. If it is not, fail.
17017 Otherwise, set inst.cond from the suffix we already looked up,
17018 and return the opcode structure.
17019
17020 CM. Examine the tag field to make sure this is an instruction that
17021 should receive a conditional infix after the third character.
17022 If it is not, fail. Otherwise, undo the edits to the current
17023 line of input and proceed as for case CE. */
17024
17025 static const struct asm_opcode *
17026 opcode_lookup (char **str)
17027 {
17028 char *end, *base;
17029 char *affix;
17030 const struct asm_opcode *opcode;
17031 const struct asm_cond *cond;
17032 char save[2];
17033
17034 /* Scan up to the end of the mnemonic, which must end in white space,
17035 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17036 for (base = end = *str; *end != '\0'; end++)
17037 if (*end == ' ' || *end == '.')
17038 break;
17039
17040 if (end == base)
17041 return NULL;
17042
17043 /* Handle a possible width suffix and/or Neon type suffix. */
17044 if (end[0] == '.')
17045 {
17046 int offset = 2;
17047
17048 /* The .w and .n suffixes are only valid if the unified syntax is in
17049 use. */
17050 if (unified_syntax && end[1] == 'w')
17051 inst.size_req = 4;
17052 else if (unified_syntax && end[1] == 'n')
17053 inst.size_req = 2;
17054 else
17055 offset = 0;
17056
17057 inst.vectype.elems = 0;
17058
17059 *str = end + offset;
17060
17061 if (end[offset] == '.')
17062 {
17063 /* See if we have a Neon type suffix (possible in either unified or
17064 non-unified ARM syntax mode). */
17065 if (parse_neon_type (&inst.vectype, str) == FAIL)
17066 return NULL;
17067 }
17068 else if (end[offset] != '\0' && end[offset] != ' ')
17069 return NULL;
17070 }
17071 else
17072 *str = end;
17073
17074 /* Look for unaffixed or special-case affixed mnemonic. */
17075 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17076 end - base);
17077 if (opcode)
17078 {
17079 /* step U */
17080 if (opcode->tag < OT_odd_infix_0)
17081 {
17082 inst.cond = COND_ALWAYS;
17083 return opcode;
17084 }
17085
17086 if (warn_on_deprecated && unified_syntax)
17087 as_warn (_("conditional infixes are deprecated in unified syntax"));
17088 affix = base + (opcode->tag - OT_odd_infix_0);
17089 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17090 gas_assert (cond);
17091
17092 inst.cond = cond->value;
17093 return opcode;
17094 }
17095
17096 /* Cannot have a conditional suffix on a mnemonic of less than two
17097 characters. */
17098 if (end - base < 3)
17099 return NULL;
17100
17101 /* Look for suffixed mnemonic. */
17102 affix = end - 2;
17103 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17104 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17105 affix - base);
17106 if (opcode && cond)
17107 {
17108 /* step CE */
17109 switch (opcode->tag)
17110 {
17111 case OT_cinfix3_legacy:
17112 /* Ignore conditional suffixes matched on infix only mnemonics. */
17113 break;
17114
17115 case OT_cinfix3:
17116 case OT_cinfix3_deprecated:
17117 case OT_odd_infix_unc:
17118 if (!unified_syntax)
17119 return 0;
17120 /* else fall through */
17121
17122 case OT_csuffix:
17123 case OT_csuffixF:
17124 case OT_csuf_or_in3:
17125 inst.cond = cond->value;
17126 return opcode;
17127
17128 case OT_unconditional:
17129 case OT_unconditionalF:
17130 if (thumb_mode)
17131 inst.cond = cond->value;
17132 else
17133 {
17134 /* Delayed diagnostic. */
17135 inst.error = BAD_COND;
17136 inst.cond = COND_ALWAYS;
17137 }
17138 return opcode;
17139
17140 default:
17141 return NULL;
17142 }
17143 }
17144
17145 /* Cannot have a usual-position infix on a mnemonic of less than
17146 six characters (five would be a suffix). */
17147 if (end - base < 6)
17148 return NULL;
17149
17150 /* Look for infixed mnemonic in the usual position. */
17151 affix = base + 3;
17152 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17153 if (!cond)
17154 return NULL;
17155
17156 memcpy (save, affix, 2);
17157 memmove (affix, affix + 2, (end - affix) - 2);
17158 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17159 (end - base) - 2);
17160 memmove (affix + 2, affix, (end - affix) - 2);
17161 memcpy (affix, save, 2);
17162
17163 if (opcode
17164 && (opcode->tag == OT_cinfix3
17165 || opcode->tag == OT_cinfix3_deprecated
17166 || opcode->tag == OT_csuf_or_in3
17167 || opcode->tag == OT_cinfix3_legacy))
17168 {
17169 /* Step CM. */
17170 if (warn_on_deprecated && unified_syntax
17171 && (opcode->tag == OT_cinfix3
17172 || opcode->tag == OT_cinfix3_deprecated))
17173 as_warn (_("conditional infixes are deprecated in unified syntax"));
17174
17175 inst.cond = cond->value;
17176 return opcode;
17177 }
17178
17179 return NULL;
17180 }
17181
17182 /* This function generates an initial IT instruction, leaving its block
17183 virtually open for the new instructions. Eventually,
17184 the mask will be updated by now_it_add_mask () each time
17185 a new instruction needs to be included in the IT block.
17186 Finally, the block is closed with close_automatic_it_block ().
17187 The block closure can be requested either from md_assemble (),
17188 a tencode (), or due to a label hook. */
17189
17190 static void
17191 new_automatic_it_block (int cond)
17192 {
17193 now_it.state = AUTOMATIC_IT_BLOCK;
17194 now_it.mask = 0x18;
17195 now_it.cc = cond;
17196 now_it.block_length = 1;
17197 mapping_state (MAP_THUMB);
17198 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17199 now_it.warn_deprecated = FALSE;
17200 now_it.insn_cond = TRUE;
17201 }
17202
17203 /* Close an automatic IT block.
17204 See comments in new_automatic_it_block (). */
17205
17206 static void
17207 close_automatic_it_block (void)
17208 {
17209 now_it.mask = 0x10;
17210 now_it.block_length = 0;
17211 }
17212
17213 /* Update the mask of the current automatically-generated IT
17214 instruction. See comments in new_automatic_it_block (). */
17215
17216 static void
17217 now_it_add_mask (int cond)
17218 {
17219 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17220 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17221 | ((bitvalue) << (nbit)))
17222 const int resulting_bit = (cond & 1);
17223
17224 now_it.mask &= 0xf;
17225 now_it.mask = SET_BIT_VALUE (now_it.mask,
17226 resulting_bit,
17227 (5 - now_it.block_length));
17228 now_it.mask = SET_BIT_VALUE (now_it.mask,
17229 1,
17230 ((5 - now_it.block_length) - 1) );
17231 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17232
17233 #undef CLEAR_BIT
17234 #undef SET_BIT_VALUE
17235 }
17236
17237 /* The IT blocks handling machinery is accessed through the these functions:
17238 it_fsm_pre_encode () from md_assemble ()
17239 set_it_insn_type () optional, from the tencode functions
17240 set_it_insn_type_last () ditto
17241 in_it_block () ditto
17242 it_fsm_post_encode () from md_assemble ()
17243 force_automatic_it_block_close () from label habdling functions
17244
17245 Rationale:
17246 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17247 initializing the IT insn type with a generic initial value depending
17248 on the inst.condition.
17249 2) During the tencode function, two things may happen:
17250 a) The tencode function overrides the IT insn type by
17251 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17252 b) The tencode function queries the IT block state by
17253 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17254
17255 Both set_it_insn_type and in_it_block run the internal FSM state
17256 handling function (handle_it_state), because: a) setting the IT insn
17257 type may incur in an invalid state (exiting the function),
17258 and b) querying the state requires the FSM to be updated.
17259 Specifically we want to avoid creating an IT block for conditional
17260 branches, so it_fsm_pre_encode is actually a guess and we can't
17261 determine whether an IT block is required until the tencode () routine
17262 has decided what type of instruction this actually it.
17263 Because of this, if set_it_insn_type and in_it_block have to be used,
17264 set_it_insn_type has to be called first.
17265
17266 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17267 determines the insn IT type depending on the inst.cond code.
17268 When a tencode () routine encodes an instruction that can be
17269 either outside an IT block, or, in the case of being inside, has to be
17270 the last one, set_it_insn_type_last () will determine the proper
17271 IT instruction type based on the inst.cond code. Otherwise,
17272 set_it_insn_type can be called for overriding that logic or
17273 for covering other cases.
17274
17275 Calling handle_it_state () may not transition the IT block state to
17276 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17277 still queried. Instead, if the FSM determines that the state should
17278 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17279 after the tencode () function: that's what it_fsm_post_encode () does.
17280
17281 Since in_it_block () calls the state handling function to get an
17282 updated state, an error may occur (due to invalid insns combination).
17283 In that case, inst.error is set.
17284 Therefore, inst.error has to be checked after the execution of
17285 the tencode () routine.
17286
17287 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17288 any pending state change (if any) that didn't take place in
17289 handle_it_state () as explained above. */
17290
17291 static void
17292 it_fsm_pre_encode (void)
17293 {
17294 if (inst.cond != COND_ALWAYS)
17295 inst.it_insn_type = INSIDE_IT_INSN;
17296 else
17297 inst.it_insn_type = OUTSIDE_IT_INSN;
17298
17299 now_it.state_handled = 0;
17300 }
17301
17302 /* IT state FSM handling function. */
17303
17304 static int
17305 handle_it_state (void)
17306 {
17307 now_it.state_handled = 1;
17308 now_it.insn_cond = FALSE;
17309
17310 switch (now_it.state)
17311 {
17312 case OUTSIDE_IT_BLOCK:
17313 switch (inst.it_insn_type)
17314 {
17315 case OUTSIDE_IT_INSN:
17316 break;
17317
17318 case INSIDE_IT_INSN:
17319 case INSIDE_IT_LAST_INSN:
17320 if (thumb_mode == 0)
17321 {
17322 if (unified_syntax
17323 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17324 as_tsktsk (_("Warning: conditional outside an IT block"\
17325 " for Thumb."));
17326 }
17327 else
17328 {
17329 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17330 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17331 {
17332 /* Automatically generate the IT instruction. */
17333 new_automatic_it_block (inst.cond);
17334 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17335 close_automatic_it_block ();
17336 }
17337 else
17338 {
17339 inst.error = BAD_OUT_IT;
17340 return FAIL;
17341 }
17342 }
17343 break;
17344
17345 case IF_INSIDE_IT_LAST_INSN:
17346 case NEUTRAL_IT_INSN:
17347 break;
17348
17349 case IT_INSN:
17350 now_it.state = MANUAL_IT_BLOCK;
17351 now_it.block_length = 0;
17352 break;
17353 }
17354 break;
17355
17356 case AUTOMATIC_IT_BLOCK:
17357 /* Three things may happen now:
17358 a) We should increment current it block size;
17359 b) We should close current it block (closing insn or 4 insns);
17360 c) We should close current it block and start a new one (due
17361 to incompatible conditions or
17362 4 insns-length block reached). */
17363
17364 switch (inst.it_insn_type)
17365 {
17366 case OUTSIDE_IT_INSN:
17367 /* The closure of the block shall happen immediatelly,
17368 so any in_it_block () call reports the block as closed. */
17369 force_automatic_it_block_close ();
17370 break;
17371
17372 case INSIDE_IT_INSN:
17373 case INSIDE_IT_LAST_INSN:
17374 case IF_INSIDE_IT_LAST_INSN:
17375 now_it.block_length++;
17376
17377 if (now_it.block_length > 4
17378 || !now_it_compatible (inst.cond))
17379 {
17380 force_automatic_it_block_close ();
17381 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17382 new_automatic_it_block (inst.cond);
17383 }
17384 else
17385 {
17386 now_it.insn_cond = TRUE;
17387 now_it_add_mask (inst.cond);
17388 }
17389
17390 if (now_it.state == AUTOMATIC_IT_BLOCK
17391 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17392 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17393 close_automatic_it_block ();
17394 break;
17395
17396 case NEUTRAL_IT_INSN:
17397 now_it.block_length++;
17398 now_it.insn_cond = TRUE;
17399
17400 if (now_it.block_length > 4)
17401 force_automatic_it_block_close ();
17402 else
17403 now_it_add_mask (now_it.cc & 1);
17404 break;
17405
17406 case IT_INSN:
17407 close_automatic_it_block ();
17408 now_it.state = MANUAL_IT_BLOCK;
17409 break;
17410 }
17411 break;
17412
17413 case MANUAL_IT_BLOCK:
17414 {
17415 /* Check conditional suffixes. */
17416 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17417 int is_last;
17418 now_it.mask <<= 1;
17419 now_it.mask &= 0x1f;
17420 is_last = (now_it.mask == 0x10);
17421 now_it.insn_cond = TRUE;
17422
17423 switch (inst.it_insn_type)
17424 {
17425 case OUTSIDE_IT_INSN:
17426 inst.error = BAD_NOT_IT;
17427 return FAIL;
17428
17429 case INSIDE_IT_INSN:
17430 if (cond != inst.cond)
17431 {
17432 inst.error = BAD_IT_COND;
17433 return FAIL;
17434 }
17435 break;
17436
17437 case INSIDE_IT_LAST_INSN:
17438 case IF_INSIDE_IT_LAST_INSN:
17439 if (cond != inst.cond)
17440 {
17441 inst.error = BAD_IT_COND;
17442 return FAIL;
17443 }
17444 if (!is_last)
17445 {
17446 inst.error = BAD_BRANCH;
17447 return FAIL;
17448 }
17449 break;
17450
17451 case NEUTRAL_IT_INSN:
17452 /* The BKPT instruction is unconditional even in an IT block. */
17453 break;
17454
17455 case IT_INSN:
17456 inst.error = BAD_IT_IT;
17457 return FAIL;
17458 }
17459 }
17460 break;
17461 }
17462
17463 return SUCCESS;
17464 }
17465
17466 struct depr_insn_mask
17467 {
17468 unsigned long pattern;
17469 unsigned long mask;
17470 const char* description;
17471 };
17472
17473 /* List of 16-bit instruction patterns deprecated in an IT block in
17474 ARMv8. */
17475 static const struct depr_insn_mask depr_it_insns[] = {
17476 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17477 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17478 { 0xa000, 0xb800, N_("ADR") },
17479 { 0x4800, 0xf800, N_("Literal loads") },
17480 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17481 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17482 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17483 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17484 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17485 { 0, 0, NULL }
17486 };
17487
17488 static void
17489 it_fsm_post_encode (void)
17490 {
17491 int is_last;
17492
17493 if (!now_it.state_handled)
17494 handle_it_state ();
17495
17496 if (now_it.insn_cond
17497 && !now_it.warn_deprecated
17498 && warn_on_deprecated
17499 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17500 {
17501 if (inst.instruction >= 0x10000)
17502 {
17503 as_warn (_("IT blocks containing 32-bit Thumb instructions are "
17504 "deprecated in ARMv8"));
17505 now_it.warn_deprecated = TRUE;
17506 }
17507 else
17508 {
17509 const struct depr_insn_mask *p = depr_it_insns;
17510
17511 while (p->mask != 0)
17512 {
17513 if ((inst.instruction & p->mask) == p->pattern)
17514 {
17515 as_warn (_("IT blocks containing 16-bit Thumb instructions "
17516 "of the following class are deprecated in ARMv8: "
17517 "%s"), p->description);
17518 now_it.warn_deprecated = TRUE;
17519 break;
17520 }
17521
17522 ++p;
17523 }
17524 }
17525
17526 if (now_it.block_length > 1)
17527 {
17528 as_warn (_("IT blocks containing more than one conditional "
17529 "instruction are deprecated in ARMv8"));
17530 now_it.warn_deprecated = TRUE;
17531 }
17532 }
17533
17534 is_last = (now_it.mask == 0x10);
17535 if (is_last)
17536 {
17537 now_it.state = OUTSIDE_IT_BLOCK;
17538 now_it.mask = 0;
17539 }
17540 }
17541
17542 static void
17543 force_automatic_it_block_close (void)
17544 {
17545 if (now_it.state == AUTOMATIC_IT_BLOCK)
17546 {
17547 close_automatic_it_block ();
17548 now_it.state = OUTSIDE_IT_BLOCK;
17549 now_it.mask = 0;
17550 }
17551 }
17552
17553 static int
17554 in_it_block (void)
17555 {
17556 if (!now_it.state_handled)
17557 handle_it_state ();
17558
17559 return now_it.state != OUTSIDE_IT_BLOCK;
17560 }
17561
17562 void
17563 md_assemble (char *str)
17564 {
17565 char *p = str;
17566 const struct asm_opcode * opcode;
17567
17568 /* Align the previous label if needed. */
17569 if (last_label_seen != NULL)
17570 {
17571 symbol_set_frag (last_label_seen, frag_now);
17572 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17573 S_SET_SEGMENT (last_label_seen, now_seg);
17574 }
17575
17576 memset (&inst, '\0', sizeof (inst));
17577 inst.reloc.type = BFD_RELOC_UNUSED;
17578
17579 opcode = opcode_lookup (&p);
17580 if (!opcode)
17581 {
17582 /* It wasn't an instruction, but it might be a register alias of
17583 the form alias .req reg, or a Neon .dn/.qn directive. */
17584 if (! create_register_alias (str, p)
17585 && ! create_neon_reg_alias (str, p))
17586 as_bad (_("bad instruction `%s'"), str);
17587
17588 return;
17589 }
17590
17591 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17592 as_warn (_("s suffix on comparison instruction is deprecated"));
17593
17594 /* The value which unconditional instructions should have in place of the
17595 condition field. */
17596 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17597
17598 if (thumb_mode)
17599 {
17600 arm_feature_set variant;
17601
17602 variant = cpu_variant;
17603 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17604 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17605 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17606 /* Check that this instruction is supported for this CPU. */
17607 if (!opcode->tvariant
17608 || (thumb_mode == 1
17609 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17610 {
17611 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17612 return;
17613 }
17614 if (inst.cond != COND_ALWAYS && !unified_syntax
17615 && opcode->tencode != do_t_branch)
17616 {
17617 as_bad (_("Thumb does not support conditional execution"));
17618 return;
17619 }
17620
17621 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17622 {
17623 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17624 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17625 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17626 {
17627 /* Two things are addressed here.
17628 1) Implicit require narrow instructions on Thumb-1.
17629 This avoids relaxation accidentally introducing Thumb-2
17630 instructions.
17631 2) Reject wide instructions in non Thumb-2 cores. */
17632 if (inst.size_req == 0)
17633 inst.size_req = 2;
17634 else if (inst.size_req == 4)
17635 {
17636 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17637 return;
17638 }
17639 }
17640 }
17641
17642 inst.instruction = opcode->tvalue;
17643
17644 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17645 {
17646 /* Prepare the it_insn_type for those encodings that don't set
17647 it. */
17648 it_fsm_pre_encode ();
17649
17650 opcode->tencode ();
17651
17652 it_fsm_post_encode ();
17653 }
17654
17655 if (!(inst.error || inst.relax))
17656 {
17657 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17658 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17659 if (inst.size_req && inst.size_req != inst.size)
17660 {
17661 as_bad (_("cannot honor width suffix -- `%s'"), str);
17662 return;
17663 }
17664 }
17665
17666 /* Something has gone badly wrong if we try to relax a fixed size
17667 instruction. */
17668 gas_assert (inst.size_req == 0 || !inst.relax);
17669
17670 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17671 *opcode->tvariant);
17672 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17673 set those bits when Thumb-2 32-bit instructions are seen. ie.
17674 anything other than bl/blx and v6-M instructions.
17675 This is overly pessimistic for relaxable instructions. */
17676 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17677 || inst.relax)
17678 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17679 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17680 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17681 arm_ext_v6t2);
17682
17683 check_neon_suffixes;
17684
17685 if (!inst.error)
17686 {
17687 mapping_state (MAP_THUMB);
17688 }
17689 }
17690 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17691 {
17692 bfd_boolean is_bx;
17693
17694 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17695 is_bx = (opcode->aencode == do_bx);
17696
17697 /* Check that this instruction is supported for this CPU. */
17698 if (!(is_bx && fix_v4bx)
17699 && !(opcode->avariant &&
17700 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17701 {
17702 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17703 return;
17704 }
17705 if (inst.size_req)
17706 {
17707 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17708 return;
17709 }
17710
17711 inst.instruction = opcode->avalue;
17712 if (opcode->tag == OT_unconditionalF)
17713 inst.instruction |= 0xF << 28;
17714 else
17715 inst.instruction |= inst.cond << 28;
17716 inst.size = INSN_SIZE;
17717 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17718 {
17719 it_fsm_pre_encode ();
17720 opcode->aencode ();
17721 it_fsm_post_encode ();
17722 }
17723 /* Arm mode bx is marked as both v4T and v5 because it's still required
17724 on a hypothetical non-thumb v5 core. */
17725 if (is_bx)
17726 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17727 else
17728 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17729 *opcode->avariant);
17730
17731 check_neon_suffixes;
17732
17733 if (!inst.error)
17734 {
17735 mapping_state (MAP_ARM);
17736 }
17737 }
17738 else
17739 {
17740 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17741 "-- `%s'"), str);
17742 return;
17743 }
17744 output_inst (str);
17745 }
17746
17747 static void
17748 check_it_blocks_finished (void)
17749 {
17750 #ifdef OBJ_ELF
17751 asection *sect;
17752
17753 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17754 if (seg_info (sect)->tc_segment_info_data.current_it.state
17755 == MANUAL_IT_BLOCK)
17756 {
17757 as_warn (_("section '%s' finished with an open IT block."),
17758 sect->name);
17759 }
17760 #else
17761 if (now_it.state == MANUAL_IT_BLOCK)
17762 as_warn (_("file finished with an open IT block."));
17763 #endif
17764 }
17765
17766 /* Various frobbings of labels and their addresses. */
17767
17768 void
17769 arm_start_line_hook (void)
17770 {
17771 last_label_seen = NULL;
17772 }
17773
17774 void
17775 arm_frob_label (symbolS * sym)
17776 {
17777 last_label_seen = sym;
17778
17779 ARM_SET_THUMB (sym, thumb_mode);
17780
17781 #if defined OBJ_COFF || defined OBJ_ELF
17782 ARM_SET_INTERWORK (sym, support_interwork);
17783 #endif
17784
17785 force_automatic_it_block_close ();
17786
17787 /* Note - do not allow local symbols (.Lxxx) to be labelled
17788 as Thumb functions. This is because these labels, whilst
17789 they exist inside Thumb code, are not the entry points for
17790 possible ARM->Thumb calls. Also, these labels can be used
17791 as part of a computed goto or switch statement. eg gcc
17792 can generate code that looks like this:
17793
17794 ldr r2, [pc, .Laaa]
17795 lsl r3, r3, #2
17796 ldr r2, [r3, r2]
17797 mov pc, r2
17798
17799 .Lbbb: .word .Lxxx
17800 .Lccc: .word .Lyyy
17801 ..etc...
17802 .Laaa: .word Lbbb
17803
17804 The first instruction loads the address of the jump table.
17805 The second instruction converts a table index into a byte offset.
17806 The third instruction gets the jump address out of the table.
17807 The fourth instruction performs the jump.
17808
17809 If the address stored at .Laaa is that of a symbol which has the
17810 Thumb_Func bit set, then the linker will arrange for this address
17811 to have the bottom bit set, which in turn would mean that the
17812 address computation performed by the third instruction would end
17813 up with the bottom bit set. Since the ARM is capable of unaligned
17814 word loads, the instruction would then load the incorrect address
17815 out of the jump table, and chaos would ensue. */
17816 if (label_is_thumb_function_name
17817 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17818 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17819 {
17820 /* When the address of a Thumb function is taken the bottom
17821 bit of that address should be set. This will allow
17822 interworking between Arm and Thumb functions to work
17823 correctly. */
17824
17825 THUMB_SET_FUNC (sym, 1);
17826
17827 label_is_thumb_function_name = FALSE;
17828 }
17829
17830 dwarf2_emit_label (sym);
17831 }
17832
17833 bfd_boolean
17834 arm_data_in_code (void)
17835 {
17836 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17837 {
17838 *input_line_pointer = '/';
17839 input_line_pointer += 5;
17840 *input_line_pointer = 0;
17841 return TRUE;
17842 }
17843
17844 return FALSE;
17845 }
17846
17847 char *
17848 arm_canonicalize_symbol_name (char * name)
17849 {
17850 int len;
17851
17852 if (thumb_mode && (len = strlen (name)) > 5
17853 && streq (name + len - 5, "/data"))
17854 *(name + len - 5) = 0;
17855
17856 return name;
17857 }
17858 \f
17859 /* Table of all register names defined by default. The user can
17860 define additional names with .req. Note that all register names
17861 should appear in both upper and lowercase variants. Some registers
17862 also have mixed-case names. */
17863
17864 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17865 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17866 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17867 #define REGSET(p,t) \
17868 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17869 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17870 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17871 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17872 #define REGSETH(p,t) \
17873 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17874 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17875 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17876 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17877 #define REGSET2(p,t) \
17878 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17879 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17880 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17881 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17882 #define SPLRBANK(base,bank,t) \
17883 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17884 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17885 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17886 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17887 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17888 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17889
17890 static const struct reg_entry reg_names[] =
17891 {
17892 /* ARM integer registers. */
17893 REGSET(r, RN), REGSET(R, RN),
17894
17895 /* ATPCS synonyms. */
17896 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17897 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17898 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17899
17900 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17901 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17902 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17903
17904 /* Well-known aliases. */
17905 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17906 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17907
17908 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17909 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17910
17911 /* Coprocessor numbers. */
17912 REGSET(p, CP), REGSET(P, CP),
17913
17914 /* Coprocessor register numbers. The "cr" variants are for backward
17915 compatibility. */
17916 REGSET(c, CN), REGSET(C, CN),
17917 REGSET(cr, CN), REGSET(CR, CN),
17918
17919 /* ARM banked registers. */
17920 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17921 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17922 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17923 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17924 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17925 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17926 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17927
17928 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17929 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17930 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17931 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17932 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17933 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17934 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17935 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17936
17937 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17938 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17939 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17940 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17941 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17942 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17943 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17944 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17945 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17946
17947 /* FPA registers. */
17948 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17949 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17950
17951 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17952 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17953
17954 /* VFP SP registers. */
17955 REGSET(s,VFS), REGSET(S,VFS),
17956 REGSETH(s,VFS), REGSETH(S,VFS),
17957
17958 /* VFP DP Registers. */
17959 REGSET(d,VFD), REGSET(D,VFD),
17960 /* Extra Neon DP registers. */
17961 REGSETH(d,VFD), REGSETH(D,VFD),
17962
17963 /* Neon QP registers. */
17964 REGSET2(q,NQ), REGSET2(Q,NQ),
17965
17966 /* VFP control registers. */
17967 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17968 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17969 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17970 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17971 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17972 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17973
17974 /* Maverick DSP coprocessor registers. */
17975 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17976 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17977
17978 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17979 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17980 REGDEF(dspsc,0,DSPSC),
17981
17982 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17983 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17984 REGDEF(DSPSC,0,DSPSC),
17985
17986 /* iWMMXt data registers - p0, c0-15. */
17987 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17988
17989 /* iWMMXt control registers - p1, c0-3. */
17990 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17991 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17992 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17993 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17994
17995 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17996 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17997 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17998 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17999 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18000
18001 /* XScale accumulator registers. */
18002 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18003 };
18004 #undef REGDEF
18005 #undef REGNUM
18006 #undef REGSET
18007
18008 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18009 within psr_required_here. */
18010 static const struct asm_psr psrs[] =
18011 {
18012 /* Backward compatibility notation. Note that "all" is no longer
18013 truly all possible PSR bits. */
18014 {"all", PSR_c | PSR_f},
18015 {"flg", PSR_f},
18016 {"ctl", PSR_c},
18017
18018 /* Individual flags. */
18019 {"f", PSR_f},
18020 {"c", PSR_c},
18021 {"x", PSR_x},
18022 {"s", PSR_s},
18023
18024 /* Combinations of flags. */
18025 {"fs", PSR_f | PSR_s},
18026 {"fx", PSR_f | PSR_x},
18027 {"fc", PSR_f | PSR_c},
18028 {"sf", PSR_s | PSR_f},
18029 {"sx", PSR_s | PSR_x},
18030 {"sc", PSR_s | PSR_c},
18031 {"xf", PSR_x | PSR_f},
18032 {"xs", PSR_x | PSR_s},
18033 {"xc", PSR_x | PSR_c},
18034 {"cf", PSR_c | PSR_f},
18035 {"cs", PSR_c | PSR_s},
18036 {"cx", PSR_c | PSR_x},
18037 {"fsx", PSR_f | PSR_s | PSR_x},
18038 {"fsc", PSR_f | PSR_s | PSR_c},
18039 {"fxs", PSR_f | PSR_x | PSR_s},
18040 {"fxc", PSR_f | PSR_x | PSR_c},
18041 {"fcs", PSR_f | PSR_c | PSR_s},
18042 {"fcx", PSR_f | PSR_c | PSR_x},
18043 {"sfx", PSR_s | PSR_f | PSR_x},
18044 {"sfc", PSR_s | PSR_f | PSR_c},
18045 {"sxf", PSR_s | PSR_x | PSR_f},
18046 {"sxc", PSR_s | PSR_x | PSR_c},
18047 {"scf", PSR_s | PSR_c | PSR_f},
18048 {"scx", PSR_s | PSR_c | PSR_x},
18049 {"xfs", PSR_x | PSR_f | PSR_s},
18050 {"xfc", PSR_x | PSR_f | PSR_c},
18051 {"xsf", PSR_x | PSR_s | PSR_f},
18052 {"xsc", PSR_x | PSR_s | PSR_c},
18053 {"xcf", PSR_x | PSR_c | PSR_f},
18054 {"xcs", PSR_x | PSR_c | PSR_s},
18055 {"cfs", PSR_c | PSR_f | PSR_s},
18056 {"cfx", PSR_c | PSR_f | PSR_x},
18057 {"csf", PSR_c | PSR_s | PSR_f},
18058 {"csx", PSR_c | PSR_s | PSR_x},
18059 {"cxf", PSR_c | PSR_x | PSR_f},
18060 {"cxs", PSR_c | PSR_x | PSR_s},
18061 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18062 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18063 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18064 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18065 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18066 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18067 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18068 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18069 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18070 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18071 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18072 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18073 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18074 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18075 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18076 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18077 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18078 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18079 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18080 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18081 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18082 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18083 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18084 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18085 };
18086
18087 /* Table of V7M psr names. */
18088 static const struct asm_psr v7m_psrs[] =
18089 {
18090 {"apsr", 0 }, {"APSR", 0 },
18091 {"iapsr", 1 }, {"IAPSR", 1 },
18092 {"eapsr", 2 }, {"EAPSR", 2 },
18093 {"psr", 3 }, {"PSR", 3 },
18094 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18095 {"ipsr", 5 }, {"IPSR", 5 },
18096 {"epsr", 6 }, {"EPSR", 6 },
18097 {"iepsr", 7 }, {"IEPSR", 7 },
18098 {"msp", 8 }, {"MSP", 8 },
18099 {"psp", 9 }, {"PSP", 9 },
18100 {"primask", 16}, {"PRIMASK", 16},
18101 {"basepri", 17}, {"BASEPRI", 17},
18102 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18103 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18104 {"faultmask", 19}, {"FAULTMASK", 19},
18105 {"control", 20}, {"CONTROL", 20}
18106 };
18107
18108 /* Table of all shift-in-operand names. */
18109 static const struct asm_shift_name shift_names [] =
18110 {
18111 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18112 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18113 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18114 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18115 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18116 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18117 };
18118
18119 /* Table of all explicit relocation names. */
18120 #ifdef OBJ_ELF
18121 static struct reloc_entry reloc_names[] =
18122 {
18123 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18124 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18125 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18126 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18127 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18128 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18129 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18130 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18131 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18132 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18133 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18134 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18135 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18136 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18137 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18138 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18139 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18140 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18141 };
18142 #endif
18143
18144 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18145 static const struct asm_cond conds[] =
18146 {
18147 {"eq", 0x0},
18148 {"ne", 0x1},
18149 {"cs", 0x2}, {"hs", 0x2},
18150 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18151 {"mi", 0x4},
18152 {"pl", 0x5},
18153 {"vs", 0x6},
18154 {"vc", 0x7},
18155 {"hi", 0x8},
18156 {"ls", 0x9},
18157 {"ge", 0xa},
18158 {"lt", 0xb},
18159 {"gt", 0xc},
18160 {"le", 0xd},
18161 {"al", 0xe}
18162 };
18163
18164 #define UL_BARRIER(L,U,CODE,FEAT) \
18165 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
18166 { U, CODE, ARM_FEATURE (FEAT, 0) }
18167
18168 static struct asm_barrier_opt barrier_opt_names[] =
18169 {
18170 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18171 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18172 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18173 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18174 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18175 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18176 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18177 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18178 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18179 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18180 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18181 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18182 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18183 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18184 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18185 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18186 };
18187
18188 #undef UL_BARRIER
18189
18190 /* Table of ARM-format instructions. */
18191
18192 /* Macros for gluing together operand strings. N.B. In all cases
18193 other than OPS0, the trailing OP_stop comes from default
18194 zero-initialization of the unspecified elements of the array. */
18195 #define OPS0() { OP_stop, }
18196 #define OPS1(a) { OP_##a, }
18197 #define OPS2(a,b) { OP_##a,OP_##b, }
18198 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18199 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18200 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18201 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18202
18203 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18204 This is useful when mixing operands for ARM and THUMB, i.e. using the
18205 MIX_ARM_THUMB_OPERANDS macro.
18206 In order to use these macros, prefix the number of operands with _
18207 e.g. _3. */
18208 #define OPS_1(a) { a, }
18209 #define OPS_2(a,b) { a,b, }
18210 #define OPS_3(a,b,c) { a,b,c, }
18211 #define OPS_4(a,b,c,d) { a,b,c,d, }
18212 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18213 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18214
18215 /* These macros abstract out the exact format of the mnemonic table and
18216 save some repeated characters. */
18217
18218 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18219 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18220 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18221 THUMB_VARIANT, do_##ae, do_##te }
18222
18223 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18224 a T_MNEM_xyz enumerator. */
18225 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18226 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18227 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18228 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18229
18230 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18231 infix after the third character. */
18232 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18233 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18234 THUMB_VARIANT, do_##ae, do_##te }
18235 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18236 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18237 THUMB_VARIANT, do_##ae, do_##te }
18238 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18239 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18240 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18241 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18242 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18243 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18244 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18245 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18246
18247 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18248 field is still 0xE. Many of the Thumb variants can be executed
18249 conditionally, so this is checked separately. */
18250 #define TUE(mnem, op, top, nops, ops, ae, te) \
18251 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18252 THUMB_VARIANT, do_##ae, do_##te }
18253
18254 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18255 Used by mnemonics that have very minimal differences in the encoding for
18256 ARM and Thumb variants and can be handled in a common function. */
18257 #define TUEc(mnem, op, top, nops, ops, en) \
18258 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18259 THUMB_VARIANT, do_##en, do_##en }
18260
18261 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18262 condition code field. */
18263 #define TUF(mnem, op, top, nops, ops, ae, te) \
18264 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18265 THUMB_VARIANT, do_##ae, do_##te }
18266
18267 /* ARM-only variants of all the above. */
18268 #define CE(mnem, op, nops, ops, ae) \
18269 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18270
18271 #define C3(mnem, op, nops, ops, ae) \
18272 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18273
18274 /* Legacy mnemonics that always have conditional infix after the third
18275 character. */
18276 #define CL(mnem, op, nops, ops, ae) \
18277 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18278 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18279
18280 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18281 #define cCE(mnem, op, nops, ops, ae) \
18282 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18283
18284 /* Legacy coprocessor instructions where conditional infix and conditional
18285 suffix are ambiguous. For consistency this includes all FPA instructions,
18286 not just the potentially ambiguous ones. */
18287 #define cCL(mnem, op, nops, ops, ae) \
18288 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18289 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18290
18291 /* Coprocessor, takes either a suffix or a position-3 infix
18292 (for an FPA corner case). */
18293 #define C3E(mnem, op, nops, ops, ae) \
18294 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18295 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18296
18297 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18298 { m1 #m2 m3, OPS##nops ops, \
18299 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18300 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18301
18302 #define CM(m1, m2, op, nops, ops, ae) \
18303 xCM_ (m1, , m2, op, nops, ops, ae), \
18304 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18305 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18306 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18307 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18308 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18309 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18310 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18311 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18312 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18313 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18314 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18315 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18316 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18317 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18318 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18319 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18320 xCM_ (m1, le, m2, op, nops, ops, ae), \
18321 xCM_ (m1, al, m2, op, nops, ops, ae)
18322
18323 #define UE(mnem, op, nops, ops, ae) \
18324 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18325
18326 #define UF(mnem, op, nops, ops, ae) \
18327 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18328
18329 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18330 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18331 use the same encoding function for each. */
18332 #define NUF(mnem, op, nops, ops, enc) \
18333 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18334 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18335
18336 /* Neon data processing, version which indirects through neon_enc_tab for
18337 the various overloaded versions of opcodes. */
18338 #define nUF(mnem, op, nops, ops, enc) \
18339 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18340 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18341
18342 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18343 version. */
18344 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18345 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18346 THUMB_VARIANT, do_##enc, do_##enc }
18347
18348 #define NCE(mnem, op, nops, ops, enc) \
18349 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18350
18351 #define NCEF(mnem, op, nops, ops, enc) \
18352 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18353
18354 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18355 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18356 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18357 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18358
18359 #define nCE(mnem, op, nops, ops, enc) \
18360 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18361
18362 #define nCEF(mnem, op, nops, ops, enc) \
18363 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18364
18365 #define do_0 0
18366
18367 static const struct asm_opcode insns[] =
18368 {
18369 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18370 #define THUMB_VARIANT & arm_ext_v4t
18371 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18372 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18373 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18374 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18375 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18376 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18377 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18378 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18379 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18380 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18381 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18382 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18383 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18384 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18385 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18386 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18387
18388 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18389 for setting PSR flag bits. They are obsolete in V6 and do not
18390 have Thumb equivalents. */
18391 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18392 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18393 CL("tstp", 110f000, 2, (RR, SH), cmp),
18394 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18395 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18396 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18397 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18398 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18399 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18400
18401 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18402 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18403 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18404 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18405
18406 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18407 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18408 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18409 OP_RRnpc),
18410 OP_ADDRGLDR),ldst, t_ldst),
18411 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18412
18413 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18414 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18415 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18416 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18417 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18418 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18419
18420 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18421 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18422 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18423 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18424
18425 /* Pseudo ops. */
18426 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18427 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18428 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18429 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18430
18431 /* Thumb-compatibility pseudo ops. */
18432 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18433 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18434 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18435 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18436 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18437 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18438 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18439 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18440 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18441 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18442 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18443 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18444
18445 /* These may simplify to neg. */
18446 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18447 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18448
18449 #undef THUMB_VARIANT
18450 #define THUMB_VARIANT & arm_ext_v6
18451
18452 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18453
18454 /* V1 instructions with no Thumb analogue prior to V6T2. */
18455 #undef THUMB_VARIANT
18456 #define THUMB_VARIANT & arm_ext_v6t2
18457
18458 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18459 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18460 CL("teqp", 130f000, 2, (RR, SH), cmp),
18461
18462 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18463 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18464 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18465 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18466
18467 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18468 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18469
18470 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18471 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18472
18473 /* V1 instructions with no Thumb analogue at all. */
18474 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18475 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18476
18477 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18478 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18479 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18480 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18481 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18482 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18483 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18484 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18485
18486 #undef ARM_VARIANT
18487 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18488 #undef THUMB_VARIANT
18489 #define THUMB_VARIANT & arm_ext_v4t
18490
18491 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18492 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18493
18494 #undef THUMB_VARIANT
18495 #define THUMB_VARIANT & arm_ext_v6t2
18496
18497 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18498 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18499
18500 /* Generic coprocessor instructions. */
18501 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18502 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18503 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18504 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18505 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18506 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18507 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18508
18509 #undef ARM_VARIANT
18510 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18511
18512 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18513 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18514
18515 #undef ARM_VARIANT
18516 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18517 #undef THUMB_VARIANT
18518 #define THUMB_VARIANT & arm_ext_msr
18519
18520 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18521 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18522
18523 #undef ARM_VARIANT
18524 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18525 #undef THUMB_VARIANT
18526 #define THUMB_VARIANT & arm_ext_v6t2
18527
18528 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18529 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18530 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18531 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18532 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18533 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18534 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18535 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18536
18537 #undef ARM_VARIANT
18538 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18539 #undef THUMB_VARIANT
18540 #define THUMB_VARIANT & arm_ext_v4t
18541
18542 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18543 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18544 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18545 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18546 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18547 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18548
18549 #undef ARM_VARIANT
18550 #define ARM_VARIANT & arm_ext_v4t_5
18551
18552 /* ARM Architecture 4T. */
18553 /* Note: bx (and blx) are required on V5, even if the processor does
18554 not support Thumb. */
18555 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18556
18557 #undef ARM_VARIANT
18558 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18559 #undef THUMB_VARIANT
18560 #define THUMB_VARIANT & arm_ext_v5t
18561
18562 /* Note: blx has 2 variants; the .value coded here is for
18563 BLX(2). Only this variant has conditional execution. */
18564 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18565 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18566
18567 #undef THUMB_VARIANT
18568 #define THUMB_VARIANT & arm_ext_v6t2
18569
18570 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18571 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18572 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18573 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18574 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18575 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18576 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18577 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18578
18579 #undef ARM_VARIANT
18580 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18581 #undef THUMB_VARIANT
18582 #define THUMB_VARIANT & arm_ext_v5exp
18583
18584 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18585 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18586 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18587 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18588
18589 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18590 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18591
18592 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18593 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18594 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18595 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18596
18597 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18598 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18599 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18600 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18601
18602 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18603 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18604
18605 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18606 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18607 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18608 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18609
18610 #undef ARM_VARIANT
18611 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18612 #undef THUMB_VARIANT
18613 #define THUMB_VARIANT & arm_ext_v6t2
18614
18615 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18616 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18617 ldrd, t_ldstd),
18618 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18619 ADDRGLDRS), ldrd, t_ldstd),
18620
18621 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18622 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18623
18624 #undef ARM_VARIANT
18625 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18626
18627 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18628
18629 #undef ARM_VARIANT
18630 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18631 #undef THUMB_VARIANT
18632 #define THUMB_VARIANT & arm_ext_v6
18633
18634 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18635 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18636 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18637 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18638 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18639 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18640 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18641 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18642 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18643 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18644
18645 #undef THUMB_VARIANT
18646 #define THUMB_VARIANT & arm_ext_v6t2
18647
18648 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18649 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18650 strex, t_strex),
18651 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18652 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18653
18654 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18655 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18656
18657 /* ARM V6 not included in V7M. */
18658 #undef THUMB_VARIANT
18659 #define THUMB_VARIANT & arm_ext_v6_notm
18660 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18661 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18662 UF(rfeib, 9900a00, 1, (RRw), rfe),
18663 UF(rfeda, 8100a00, 1, (RRw), rfe),
18664 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18665 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18666 UF(rfefa, 8100a00, 1, (RRw), rfe),
18667 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18668 UF(rfeed, 9900a00, 1, (RRw), rfe),
18669 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18670 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18671 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18672 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18673 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18674 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18675 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18676 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18677 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18678
18679 /* ARM V6 not included in V7M (eg. integer SIMD). */
18680 #undef THUMB_VARIANT
18681 #define THUMB_VARIANT & arm_ext_v6_dsp
18682 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18683 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18684 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18685 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18686 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18687 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18688 /* Old name for QASX. */
18689 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18690 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18691 /* Old name for QSAX. */
18692 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18693 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18694 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18695 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18696 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18697 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18698 /* Old name for SASX. */
18699 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18700 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18701 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18702 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18703 /* Old name for SHASX. */
18704 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18705 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18706 /* Old name for SHSAX. */
18707 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18708 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18709 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18710 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18711 /* Old name for SSAX. */
18712 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18713 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18714 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18715 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18716 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18717 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18718 /* Old name for UASX. */
18719 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18720 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18721 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18722 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18723 /* Old name for UHASX. */
18724 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18725 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18726 /* Old name for UHSAX. */
18727 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18728 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18729 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18730 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18731 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18732 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18733 /* Old name for UQASX. */
18734 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18735 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18736 /* Old name for UQSAX. */
18737 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18738 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18739 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18740 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18741 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18742 /* Old name for USAX. */
18743 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18744 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18745 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18746 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18747 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18748 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18749 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18750 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18751 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18752 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18753 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18754 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18755 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18756 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18757 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18758 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18759 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18760 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18761 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18762 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18763 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18764 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18765 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18766 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18767 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18768 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18769 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18770 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18771 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18772 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18773 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18774 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18775 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18776 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18777
18778 #undef ARM_VARIANT
18779 #define ARM_VARIANT & arm_ext_v6k
18780 #undef THUMB_VARIANT
18781 #define THUMB_VARIANT & arm_ext_v6k
18782
18783 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18784 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18785 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18786 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18787
18788 #undef THUMB_VARIANT
18789 #define THUMB_VARIANT & arm_ext_v6_notm
18790 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18791 ldrexd, t_ldrexd),
18792 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18793 RRnpcb), strexd, t_strexd),
18794
18795 #undef THUMB_VARIANT
18796 #define THUMB_VARIANT & arm_ext_v6t2
18797 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18798 rd_rn, rd_rn),
18799 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18800 rd_rn, rd_rn),
18801 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18802 strex, t_strexbh),
18803 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18804 strex, t_strexbh),
18805 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18806
18807 #undef ARM_VARIANT
18808 #define ARM_VARIANT & arm_ext_sec
18809 #undef THUMB_VARIANT
18810 #define THUMB_VARIANT & arm_ext_sec
18811
18812 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18813
18814 #undef ARM_VARIANT
18815 #define ARM_VARIANT & arm_ext_virt
18816 #undef THUMB_VARIANT
18817 #define THUMB_VARIANT & arm_ext_virt
18818
18819 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18820 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18821
18822 #undef ARM_VARIANT
18823 #define ARM_VARIANT & arm_ext_v6t2
18824 #undef THUMB_VARIANT
18825 #define THUMB_VARIANT & arm_ext_v6t2
18826
18827 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18828 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18829 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18830 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18831
18832 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18833 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18834 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18835 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18836
18837 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18838 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18839 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18840 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18841
18842 /* Thumb-only instructions. */
18843 #undef ARM_VARIANT
18844 #define ARM_VARIANT NULL
18845 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18846 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18847
18848 /* ARM does not really have an IT instruction, so always allow it.
18849 The opcode is copied from Thumb in order to allow warnings in
18850 -mimplicit-it=[never | arm] modes. */
18851 #undef ARM_VARIANT
18852 #define ARM_VARIANT & arm_ext_v1
18853
18854 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18855 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18856 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18857 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18858 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18859 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18860 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18861 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18862 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18863 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18864 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18865 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18866 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18867 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18868 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18869 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18870 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18871 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18872
18873 /* Thumb2 only instructions. */
18874 #undef ARM_VARIANT
18875 #define ARM_VARIANT NULL
18876
18877 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18878 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18879 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18880 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18881 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18882 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18883
18884 /* Hardware division instructions. */
18885 #undef ARM_VARIANT
18886 #define ARM_VARIANT & arm_ext_adiv
18887 #undef THUMB_VARIANT
18888 #define THUMB_VARIANT & arm_ext_div
18889
18890 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18891 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18892
18893 /* ARM V6M/V7 instructions. */
18894 #undef ARM_VARIANT
18895 #define ARM_VARIANT & arm_ext_barrier
18896 #undef THUMB_VARIANT
18897 #define THUMB_VARIANT & arm_ext_barrier
18898
18899 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18900 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18901 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18902
18903 /* ARM V7 instructions. */
18904 #undef ARM_VARIANT
18905 #define ARM_VARIANT & arm_ext_v7
18906 #undef THUMB_VARIANT
18907 #define THUMB_VARIANT & arm_ext_v7
18908
18909 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18910 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18911
18912 #undef ARM_VARIANT
18913 #define ARM_VARIANT & arm_ext_mp
18914 #undef THUMB_VARIANT
18915 #define THUMB_VARIANT & arm_ext_mp
18916
18917 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18918
18919 /* AArchv8 instructions. */
18920 #undef ARM_VARIANT
18921 #define ARM_VARIANT & arm_ext_v8
18922 #undef THUMB_VARIANT
18923 #define THUMB_VARIANT & arm_ext_v8
18924
18925 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18926 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18927 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18928 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18929 ldrexd, t_ldrexd),
18930 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18931 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18932 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18933 stlex, t_stlex),
18934 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18935 strexd, t_strexd),
18936 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18937 stlex, t_stlex),
18938 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18939 stlex, t_stlex),
18940 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18941 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18942 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18943 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18944 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18945 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18946
18947 /* ARMv8 T32 only. */
18948 #undef ARM_VARIANT
18949 #define ARM_VARIANT NULL
18950 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18951 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18952 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18953
18954 /* FP for ARMv8. */
18955 #undef ARM_VARIANT
18956 #define ARM_VARIANT & fpu_vfp_ext_armv8
18957 #undef THUMB_VARIANT
18958 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18959
18960 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18961 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18962 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18963 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18964 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18965 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18966 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18967 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18968 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18969 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18970 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18971 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18972 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18973 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18974 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18975 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18976 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18977
18978 /* Crypto v1 extensions. */
18979 #undef ARM_VARIANT
18980 #define ARM_VARIANT & fpu_crypto_ext_armv8
18981 #undef THUMB_VARIANT
18982 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18983
18984 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18985 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18986 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18987 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18988 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18989 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18990 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18991 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18992 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18993 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18994 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18995 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18996 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18997 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18998
18999 #undef ARM_VARIANT
19000 #define ARM_VARIANT & crc_ext_armv8
19001 #undef THUMB_VARIANT
19002 #define THUMB_VARIANT & crc_ext_armv8
19003 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19004 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19005 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19006 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19007 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19008 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19009
19010 #undef ARM_VARIANT
19011 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19012 #undef THUMB_VARIANT
19013 #define THUMB_VARIANT NULL
19014
19015 cCE("wfs", e200110, 1, (RR), rd),
19016 cCE("rfs", e300110, 1, (RR), rd),
19017 cCE("wfc", e400110, 1, (RR), rd),
19018 cCE("rfc", e500110, 1, (RR), rd),
19019
19020 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19021 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19022 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19023 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19024
19025 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19026 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19027 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19028 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19029
19030 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19031 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19032 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19033 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19034 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19035 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19036 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19037 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19038 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19039 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19040 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19041 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19042
19043 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19044 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19045 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19046 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19047 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19048 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19049 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19050 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19051 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19052 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19053 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19054 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19055
19056 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19057 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19058 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19059 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19060 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19061 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19062 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19063 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19064 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19065 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19066 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19067 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19068
19069 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19070 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19071 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19072 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19073 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19074 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19075 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19076 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19077 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19078 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19079 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19080 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19081
19082 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19083 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19084 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19085 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19086 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19087 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19088 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19089 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19090 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19091 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19092 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19093 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19094
19095 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19096 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19097 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19098 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19099 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19100 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19101 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19102 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19103 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19104 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19105 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19106 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19107
19108 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19109 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19110 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19111 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19112 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19113 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19114 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19115 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19116 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19117 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19118 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19119 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19120
19121 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19122 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19123 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19124 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19125 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19126 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19127 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19128 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19129 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19130 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19131 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19132 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19133
19134 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19135 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19136 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19137 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19138 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19139 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19140 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19141 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19142 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19143 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19144 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19145 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19146
19147 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19148 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19149 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19150 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19151 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19152 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19153 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19154 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19155 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19156 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19157 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19158 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19159
19160 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19161 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19162 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19163 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19164 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19165 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19166 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19167 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19168 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19169 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19170 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19171 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19172
19173 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19174 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19175 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19176 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19177 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19178 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19179 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19180 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19181 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19182 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19183 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19184 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19185
19186 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19187 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19188 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19189 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19190 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19191 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19192 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19193 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19194 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19195 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19196 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19197 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19198
19199 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19200 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19201 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19202 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19203 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19204 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19205 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19206 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19207 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19208 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19209 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19210 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19211
19212 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19213 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19214 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19215 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19216 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19217 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19218 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19219 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19220 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19221 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19222 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19223 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19224
19225 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19226 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19227 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19228 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19229 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19230 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19231 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19232 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19233 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19234 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19235 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19236 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19237
19238 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19239 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19240 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19241 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19242 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19243 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19244 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19245 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19246 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19247 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19248 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19249 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19250
19251 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19252 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19253 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19254 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19255 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19256 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19257 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19258 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19259 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19260 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19261 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19262 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19263
19264 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19265 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19266 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19267 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19268 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19269 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19270 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19271 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19272 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19273 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19274 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19275 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19276
19277 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19278 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19279 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19280 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19281 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19282 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19283 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19284 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19285 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19286 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19287 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19288 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19289
19290 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19291 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19292 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19293 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19294 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19295 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19296 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19297 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19298 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19299 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19300 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19301 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19302
19303 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19310 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19311 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19315
19316 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19323 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19324 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19328
19329 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19336 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19337 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19341
19342 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19349 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19350 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19354
19355 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19362 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19363 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19364 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19365 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19366 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19367
19368 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19369 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19370 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19371 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19372 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19373 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19374 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19375 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19376 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19377 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19378 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19379 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19380
19381 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19382 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19383 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19384 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19385 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19386 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19387 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19388 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19389 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19390 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19391 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19392 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19393
19394 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19395 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19396 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19397 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19398 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19399 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19400 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19401 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19402 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19403 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19404 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19405 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19406
19407 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19408 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19409 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19410 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19411
19412 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19413 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19414 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19415 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19416 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19417 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19418 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19419 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19420 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19421 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19422 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19423 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19424
19425 /* The implementation of the FIX instruction is broken on some
19426 assemblers, in that it accepts a precision specifier as well as a
19427 rounding specifier, despite the fact that this is meaningless.
19428 To be more compatible, we accept it as well, though of course it
19429 does not set any bits. */
19430 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19431 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19432 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19433 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19434 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19435 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19436 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19437 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19438 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19439 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19440 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19441 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19442 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19443
19444 /* Instructions that were new with the real FPA, call them V2. */
19445 #undef ARM_VARIANT
19446 #define ARM_VARIANT & fpu_fpa_ext_v2
19447
19448 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19449 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19450 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19451 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19452 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19453 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19454
19455 #undef ARM_VARIANT
19456 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19457
19458 /* Moves and type conversions. */
19459 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19460 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19461 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19462 cCE("fmstat", ef1fa10, 0, (), noargs),
19463 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19464 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19465 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19466 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19467 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19468 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19469 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19470 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19471 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19472 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19473
19474 /* Memory operations. */
19475 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19476 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19477 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19478 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19479 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19480 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19481 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19482 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19483 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19484 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19485 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19486 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19487 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19488 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19489 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19490 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19491 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19492 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19493
19494 /* Monadic operations. */
19495 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19496 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19497 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19498
19499 /* Dyadic operations. */
19500 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19501 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19502 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19503 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19504 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19505 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19506 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19507 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19508 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19509
19510 /* Comparisons. */
19511 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19512 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19513 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19514 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19515
19516 /* Double precision load/store are still present on single precision
19517 implementations. */
19518 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19519 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19520 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19521 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19522 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19523 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19524 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19525 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19526 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19527 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19528
19529 #undef ARM_VARIANT
19530 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19531
19532 /* Moves and type conversions. */
19533 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19534 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19535 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19536 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19537 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19538 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19539 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19540 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19541 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19542 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19543 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19544 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19545 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19546
19547 /* Monadic operations. */
19548 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19549 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19550 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19551
19552 /* Dyadic operations. */
19553 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19554 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19555 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19556 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19557 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19558 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19559 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19560 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19561 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19562
19563 /* Comparisons. */
19564 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19565 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19566 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19567 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19568
19569 #undef ARM_VARIANT
19570 #define ARM_VARIANT & fpu_vfp_ext_v2
19571
19572 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19573 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19574 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19575 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19576
19577 /* Instructions which may belong to either the Neon or VFP instruction sets.
19578 Individual encoder functions perform additional architecture checks. */
19579 #undef ARM_VARIANT
19580 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19581 #undef THUMB_VARIANT
19582 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19583
19584 /* These mnemonics are unique to VFP. */
19585 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19586 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19587 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19588 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19589 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19590 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19591 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19592 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19593 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19594 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19595
19596 /* Mnemonics shared by Neon and VFP. */
19597 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19598 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19599 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19600
19601 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19602 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19603
19604 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19605 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19606
19607 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19608 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19609 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19610 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19611 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19612 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19613 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19614 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19615
19616 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19617 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19618 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19619 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19620
19621
19622 /* NOTE: All VMOV encoding is special-cased! */
19623 NCE(vmov, 0, 1, (VMOV), neon_mov),
19624 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19625
19626 #undef THUMB_VARIANT
19627 #define THUMB_VARIANT & fpu_neon_ext_v1
19628 #undef ARM_VARIANT
19629 #define ARM_VARIANT & fpu_neon_ext_v1
19630
19631 /* Data processing with three registers of the same length. */
19632 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19633 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19634 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19635 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19636 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19637 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19638 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19639 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19640 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19641 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19642 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19643 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19644 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19645 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19646 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19647 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19648 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19649 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19650 /* If not immediate, fall back to neon_dyadic_i64_su.
19651 shl_imm should accept I8 I16 I32 I64,
19652 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19653 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19654 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19655 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19656 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19657 /* Logic ops, types optional & ignored. */
19658 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19659 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19660 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19661 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19662 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19663 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19664 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19665 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19666 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19667 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19668 /* Bitfield ops, untyped. */
19669 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19670 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19671 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19672 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19673 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19674 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19675 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19676 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19677 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19678 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19679 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19680 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19681 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19682 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19683 back to neon_dyadic_if_su. */
19684 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19685 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19686 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19687 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19688 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19689 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19690 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19691 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19692 /* Comparison. Type I8 I16 I32 F32. */
19693 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19694 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19695 /* As above, D registers only. */
19696 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19697 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19698 /* Int and float variants, signedness unimportant. */
19699 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19700 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19701 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19702 /* Add/sub take types I8 I16 I32 I64 F32. */
19703 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19704 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19705 /* vtst takes sizes 8, 16, 32. */
19706 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19707 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19708 /* VMUL takes I8 I16 I32 F32 P8. */
19709 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19710 /* VQD{R}MULH takes S16 S32. */
19711 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19712 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19713 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19714 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19715 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19716 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19717 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19718 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19719 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19720 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19721 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19722 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19723 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19724 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19725 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19726 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19727
19728 /* Two address, int/float. Types S8 S16 S32 F32. */
19729 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19730 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19731
19732 /* Data processing with two registers and a shift amount. */
19733 /* Right shifts, and variants with rounding.
19734 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19735 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19736 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19737 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19738 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19739 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19740 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19741 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19742 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19743 /* Shift and insert. Sizes accepted 8 16 32 64. */
19744 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19745 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19746 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19747 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19748 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19749 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19750 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19751 /* Right shift immediate, saturating & narrowing, with rounding variants.
19752 Types accepted S16 S32 S64 U16 U32 U64. */
19753 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19754 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19755 /* As above, unsigned. Types accepted S16 S32 S64. */
19756 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19757 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19758 /* Right shift narrowing. Types accepted I16 I32 I64. */
19759 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19760 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19761 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19762 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19763 /* CVT with optional immediate for fixed-point variant. */
19764 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19765
19766 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19767 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19768
19769 /* Data processing, three registers of different lengths. */
19770 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19771 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19772 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19773 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19774 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19775 /* If not scalar, fall back to neon_dyadic_long.
19776 Vector types as above, scalar types S16 S32 U16 U32. */
19777 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19778 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19779 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19780 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19781 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19782 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19783 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19784 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19785 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19786 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19787 /* Saturating doubling multiplies. Types S16 S32. */
19788 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19789 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19790 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19791 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19792 S16 S32 U16 U32. */
19793 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19794
19795 /* Extract. Size 8. */
19796 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19797 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19798
19799 /* Two registers, miscellaneous. */
19800 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19801 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19802 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19803 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19804 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19805 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19806 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19807 /* Vector replicate. Sizes 8 16 32. */
19808 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19809 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19810 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19811 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19812 /* VMOVN. Types I16 I32 I64. */
19813 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19814 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19815 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19816 /* VQMOVUN. Types S16 S32 S64. */
19817 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19818 /* VZIP / VUZP. Sizes 8 16 32. */
19819 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19820 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19821 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19822 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19823 /* VQABS / VQNEG. Types S8 S16 S32. */
19824 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19825 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19826 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19827 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19828 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19829 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19830 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19831 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19832 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19833 /* Reciprocal estimates. Types U32 F32. */
19834 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19835 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19836 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19837 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19838 /* VCLS. Types S8 S16 S32. */
19839 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19840 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19841 /* VCLZ. Types I8 I16 I32. */
19842 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19843 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19844 /* VCNT. Size 8. */
19845 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19846 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19847 /* Two address, untyped. */
19848 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19849 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19850 /* VTRN. Sizes 8 16 32. */
19851 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19852 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19853
19854 /* Table lookup. Size 8. */
19855 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19856 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19857
19858 #undef THUMB_VARIANT
19859 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19860 #undef ARM_VARIANT
19861 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19862
19863 /* Neon element/structure load/store. */
19864 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19865 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19866 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19867 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19868 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19869 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19870 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19871 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19872
19873 #undef THUMB_VARIANT
19874 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19875 #undef ARM_VARIANT
19876 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19877 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19878 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19879 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19880 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19881 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19882 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19883 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19884 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19885 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19886
19887 #undef THUMB_VARIANT
19888 #define THUMB_VARIANT & fpu_vfp_ext_v3
19889 #undef ARM_VARIANT
19890 #define ARM_VARIANT & fpu_vfp_ext_v3
19891
19892 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19893 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19894 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19895 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19896 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19897 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19898 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19899 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19900 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19901
19902 #undef ARM_VARIANT
19903 #define ARM_VARIANT & fpu_vfp_ext_fma
19904 #undef THUMB_VARIANT
19905 #define THUMB_VARIANT & fpu_vfp_ext_fma
19906 /* Mnemonics shared by Neon and VFP. These are included in the
19907 VFP FMA variant; NEON and VFP FMA always includes the NEON
19908 FMA instructions. */
19909 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19910 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19911 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19912 the v form should always be used. */
19913 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19914 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19915 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19916 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19917 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19918 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19919
19920 #undef THUMB_VARIANT
19921 #undef ARM_VARIANT
19922 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19923
19924 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19925 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19926 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19927 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19928 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19929 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19930 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19931 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19932
19933 #undef ARM_VARIANT
19934 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19935
19936 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19937 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19938 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19939 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19940 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19941 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19942 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19943 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19944 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19945 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19946 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19947 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19948 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19949 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19950 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19951 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19952 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19953 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19954 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19955 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19956 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19957 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19958 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19959 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19960 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19961 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19962 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
19963 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
19964 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
19965 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19966 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19967 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19968 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19969 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19970 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19971 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19972 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19973 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19974 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19975 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19976 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19977 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19978 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19979 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19980 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19981 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19982 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19983 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19984 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19985 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19986 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19987 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19988 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19989 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19990 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19991 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19992 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19993 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19994 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19995 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19996 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19997 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19998 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19999 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20000 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20001 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20002 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20003 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20004 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20005 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20006 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20007 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20008 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20009 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20010 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20011 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20012 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20013 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20014 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20015 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20016 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20017 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20018 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20019 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20020 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20021 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20022 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20023 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20024 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20025 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20026 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20027 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20028 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20029 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20030 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20031 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20032 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20033 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20034 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20035 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20036 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20037 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20038 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20039 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20040 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20041 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20042 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20043 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20044 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20045 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20046 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20047 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20048 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20049 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20050 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20051 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20052 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20053 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20054 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20055 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20056 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20057 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20058 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20059 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20060 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20061 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20062 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20063 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20064 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20065 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20066 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20067 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20068 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20069 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20070 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20071 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20079 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20080 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20081 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20082 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20083 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20084 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20085 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20088 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20089 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20090 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20091 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20092 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20093 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20097 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20098
20099 #undef ARM_VARIANT
20100 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20101
20102 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20103 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20104 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20105 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20106 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20107 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20108 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20109 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20110 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20111 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20112 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20113 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20114 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20115 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20116 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20117 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20118 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20119 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20120 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20121 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20122 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20123 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20124 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20125 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20126 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20127 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20128 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20129 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20130 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20131 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20132 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20133 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20134 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20135 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20136 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20137 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20138 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20139 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20140 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20141 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20142 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20143 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20144 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20145 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20146 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20147 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20148 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20149 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20150 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20151 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20152 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20153 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20154 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20155 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20156 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20157 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20158 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20159
20160 #undef ARM_VARIANT
20161 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20162
20163 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20164 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20165 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20166 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20167 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20168 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20169 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20170 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20171 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20172 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20173 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20174 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20175 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20176 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20177 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20178 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20179 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20180 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20181 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20182 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20183 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20184 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20185 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20186 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20187 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20188 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20189 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20190 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20191 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20192 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20193 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20194 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20195 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20196 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20197 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20198 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20199 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20200 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20201 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20202 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20203 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20204 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20205 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20206 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20207 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20208 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20209 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20210 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20211 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20212 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20213 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20214 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20215 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20216 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20217 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20218 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20219 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20220 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20221 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20222 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20223 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20224 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20225 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20226 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20227 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20228 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20229 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20230 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20231 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20232 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20233 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20234 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20235 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20236 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20237 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20238 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20239 };
20240 #undef ARM_VARIANT
20241 #undef THUMB_VARIANT
20242 #undef TCE
20243 #undef TUE
20244 #undef TUF
20245 #undef TCC
20246 #undef cCE
20247 #undef cCL
20248 #undef C3E
20249 #undef CE
20250 #undef CM
20251 #undef UE
20252 #undef UF
20253 #undef UT
20254 #undef NUF
20255 #undef nUF
20256 #undef NCE
20257 #undef nCE
20258 #undef OPS0
20259 #undef OPS1
20260 #undef OPS2
20261 #undef OPS3
20262 #undef OPS4
20263 #undef OPS5
20264 #undef OPS6
20265 #undef do_0
20266 \f
20267 /* MD interface: bits in the object file. */
20268
20269 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20270 for use in the a.out file, and stores them in the array pointed to by buf.
20271 This knows about the endian-ness of the target machine and does
20272 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20273 2 (short) and 4 (long) Floating numbers are put out as a series of
20274 LITTLENUMS (shorts, here at least). */
20275
20276 void
20277 md_number_to_chars (char * buf, valueT val, int n)
20278 {
20279 if (target_big_endian)
20280 number_to_chars_bigendian (buf, val, n);
20281 else
20282 number_to_chars_littleendian (buf, val, n);
20283 }
20284
20285 static valueT
20286 md_chars_to_number (char * buf, int n)
20287 {
20288 valueT result = 0;
20289 unsigned char * where = (unsigned char *) buf;
20290
20291 if (target_big_endian)
20292 {
20293 while (n--)
20294 {
20295 result <<= 8;
20296 result |= (*where++ & 255);
20297 }
20298 }
20299 else
20300 {
20301 while (n--)
20302 {
20303 result <<= 8;
20304 result |= (where[n] & 255);
20305 }
20306 }
20307
20308 return result;
20309 }
20310
20311 /* MD interface: Sections. */
20312
20313 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20314 that an rs_machine_dependent frag may reach. */
20315
20316 unsigned int
20317 arm_frag_max_var (fragS *fragp)
20318 {
20319 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20320 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20321
20322 Note that we generate relaxable instructions even for cases that don't
20323 really need it, like an immediate that's a trivial constant. So we're
20324 overestimating the instruction size for some of those cases. Rather
20325 than putting more intelligence here, it would probably be better to
20326 avoid generating a relaxation frag in the first place when it can be
20327 determined up front that a short instruction will suffice. */
20328
20329 gas_assert (fragp->fr_type == rs_machine_dependent);
20330 return INSN_SIZE;
20331 }
20332
20333 /* Estimate the size of a frag before relaxing. Assume everything fits in
20334 2 bytes. */
20335
20336 int
20337 md_estimate_size_before_relax (fragS * fragp,
20338 segT segtype ATTRIBUTE_UNUSED)
20339 {
20340 fragp->fr_var = 2;
20341 return 2;
20342 }
20343
20344 /* Convert a machine dependent frag. */
20345
20346 void
20347 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20348 {
20349 unsigned long insn;
20350 unsigned long old_op;
20351 char *buf;
20352 expressionS exp;
20353 fixS *fixp;
20354 int reloc_type;
20355 int pc_rel;
20356 int opcode;
20357
20358 buf = fragp->fr_literal + fragp->fr_fix;
20359
20360 old_op = bfd_get_16(abfd, buf);
20361 if (fragp->fr_symbol)
20362 {
20363 exp.X_op = O_symbol;
20364 exp.X_add_symbol = fragp->fr_symbol;
20365 }
20366 else
20367 {
20368 exp.X_op = O_constant;
20369 }
20370 exp.X_add_number = fragp->fr_offset;
20371 opcode = fragp->fr_subtype;
20372 switch (opcode)
20373 {
20374 case T_MNEM_ldr_pc:
20375 case T_MNEM_ldr_pc2:
20376 case T_MNEM_ldr_sp:
20377 case T_MNEM_str_sp:
20378 case T_MNEM_ldr:
20379 case T_MNEM_ldrb:
20380 case T_MNEM_ldrh:
20381 case T_MNEM_str:
20382 case T_MNEM_strb:
20383 case T_MNEM_strh:
20384 if (fragp->fr_var == 4)
20385 {
20386 insn = THUMB_OP32 (opcode);
20387 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20388 {
20389 insn |= (old_op & 0x700) << 4;
20390 }
20391 else
20392 {
20393 insn |= (old_op & 7) << 12;
20394 insn |= (old_op & 0x38) << 13;
20395 }
20396 insn |= 0x00000c00;
20397 put_thumb32_insn (buf, insn);
20398 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20399 }
20400 else
20401 {
20402 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20403 }
20404 pc_rel = (opcode == T_MNEM_ldr_pc2);
20405 break;
20406 case T_MNEM_adr:
20407 if (fragp->fr_var == 4)
20408 {
20409 insn = THUMB_OP32 (opcode);
20410 insn |= (old_op & 0xf0) << 4;
20411 put_thumb32_insn (buf, insn);
20412 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20413 }
20414 else
20415 {
20416 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20417 exp.X_add_number -= 4;
20418 }
20419 pc_rel = 1;
20420 break;
20421 case T_MNEM_mov:
20422 case T_MNEM_movs:
20423 case T_MNEM_cmp:
20424 case T_MNEM_cmn:
20425 if (fragp->fr_var == 4)
20426 {
20427 int r0off = (opcode == T_MNEM_mov
20428 || opcode == T_MNEM_movs) ? 0 : 8;
20429 insn = THUMB_OP32 (opcode);
20430 insn = (insn & 0xe1ffffff) | 0x10000000;
20431 insn |= (old_op & 0x700) << r0off;
20432 put_thumb32_insn (buf, insn);
20433 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20434 }
20435 else
20436 {
20437 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20438 }
20439 pc_rel = 0;
20440 break;
20441 case T_MNEM_b:
20442 if (fragp->fr_var == 4)
20443 {
20444 insn = THUMB_OP32(opcode);
20445 put_thumb32_insn (buf, insn);
20446 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20447 }
20448 else
20449 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20450 pc_rel = 1;
20451 break;
20452 case T_MNEM_bcond:
20453 if (fragp->fr_var == 4)
20454 {
20455 insn = THUMB_OP32(opcode);
20456 insn |= (old_op & 0xf00) << 14;
20457 put_thumb32_insn (buf, insn);
20458 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20459 }
20460 else
20461 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20462 pc_rel = 1;
20463 break;
20464 case T_MNEM_add_sp:
20465 case T_MNEM_add_pc:
20466 case T_MNEM_inc_sp:
20467 case T_MNEM_dec_sp:
20468 if (fragp->fr_var == 4)
20469 {
20470 /* ??? Choose between add and addw. */
20471 insn = THUMB_OP32 (opcode);
20472 insn |= (old_op & 0xf0) << 4;
20473 put_thumb32_insn (buf, insn);
20474 if (opcode == T_MNEM_add_pc)
20475 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20476 else
20477 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20478 }
20479 else
20480 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20481 pc_rel = 0;
20482 break;
20483
20484 case T_MNEM_addi:
20485 case T_MNEM_addis:
20486 case T_MNEM_subi:
20487 case T_MNEM_subis:
20488 if (fragp->fr_var == 4)
20489 {
20490 insn = THUMB_OP32 (opcode);
20491 insn |= (old_op & 0xf0) << 4;
20492 insn |= (old_op & 0xf) << 16;
20493 put_thumb32_insn (buf, insn);
20494 if (insn & (1 << 20))
20495 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20496 else
20497 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20498 }
20499 else
20500 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20501 pc_rel = 0;
20502 break;
20503 default:
20504 abort ();
20505 }
20506 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20507 (enum bfd_reloc_code_real) reloc_type);
20508 fixp->fx_file = fragp->fr_file;
20509 fixp->fx_line = fragp->fr_line;
20510 fragp->fr_fix += fragp->fr_var;
20511 }
20512
20513 /* Return the size of a relaxable immediate operand instruction.
20514 SHIFT and SIZE specify the form of the allowable immediate. */
20515 static int
20516 relax_immediate (fragS *fragp, int size, int shift)
20517 {
20518 offsetT offset;
20519 offsetT mask;
20520 offsetT low;
20521
20522 /* ??? Should be able to do better than this. */
20523 if (fragp->fr_symbol)
20524 return 4;
20525
20526 low = (1 << shift) - 1;
20527 mask = (1 << (shift + size)) - (1 << shift);
20528 offset = fragp->fr_offset;
20529 /* Force misaligned offsets to 32-bit variant. */
20530 if (offset & low)
20531 return 4;
20532 if (offset & ~mask)
20533 return 4;
20534 return 2;
20535 }
20536
20537 /* Get the address of a symbol during relaxation. */
20538 static addressT
20539 relaxed_symbol_addr (fragS *fragp, long stretch)
20540 {
20541 fragS *sym_frag;
20542 addressT addr;
20543 symbolS *sym;
20544
20545 sym = fragp->fr_symbol;
20546 sym_frag = symbol_get_frag (sym);
20547 know (S_GET_SEGMENT (sym) != absolute_section
20548 || sym_frag == &zero_address_frag);
20549 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20550
20551 /* If frag has yet to be reached on this pass, assume it will
20552 move by STRETCH just as we did. If this is not so, it will
20553 be because some frag between grows, and that will force
20554 another pass. */
20555
20556 if (stretch != 0
20557 && sym_frag->relax_marker != fragp->relax_marker)
20558 {
20559 fragS *f;
20560
20561 /* Adjust stretch for any alignment frag. Note that if have
20562 been expanding the earlier code, the symbol may be
20563 defined in what appears to be an earlier frag. FIXME:
20564 This doesn't handle the fr_subtype field, which specifies
20565 a maximum number of bytes to skip when doing an
20566 alignment. */
20567 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20568 {
20569 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20570 {
20571 if (stretch < 0)
20572 stretch = - ((- stretch)
20573 & ~ ((1 << (int) f->fr_offset) - 1));
20574 else
20575 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20576 if (stretch == 0)
20577 break;
20578 }
20579 }
20580 if (f != NULL)
20581 addr += stretch;
20582 }
20583
20584 return addr;
20585 }
20586
20587 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20588 load. */
20589 static int
20590 relax_adr (fragS *fragp, asection *sec, long stretch)
20591 {
20592 addressT addr;
20593 offsetT val;
20594
20595 /* Assume worst case for symbols not known to be in the same section. */
20596 if (fragp->fr_symbol == NULL
20597 || !S_IS_DEFINED (fragp->fr_symbol)
20598 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20599 || S_IS_WEAK (fragp->fr_symbol))
20600 return 4;
20601
20602 val = relaxed_symbol_addr (fragp, stretch);
20603 addr = fragp->fr_address + fragp->fr_fix;
20604 addr = (addr + 4) & ~3;
20605 /* Force misaligned targets to 32-bit variant. */
20606 if (val & 3)
20607 return 4;
20608 val -= addr;
20609 if (val < 0 || val > 1020)
20610 return 4;
20611 return 2;
20612 }
20613
20614 /* Return the size of a relaxable add/sub immediate instruction. */
20615 static int
20616 relax_addsub (fragS *fragp, asection *sec)
20617 {
20618 char *buf;
20619 int op;
20620
20621 buf = fragp->fr_literal + fragp->fr_fix;
20622 op = bfd_get_16(sec->owner, buf);
20623 if ((op & 0xf) == ((op >> 4) & 0xf))
20624 return relax_immediate (fragp, 8, 0);
20625 else
20626 return relax_immediate (fragp, 3, 0);
20627 }
20628
20629 /* Return TRUE iff the definition of symbol S could be pre-empted
20630 (overridden) at link or load time. */
20631 static bfd_boolean
20632 symbol_preemptible (symbolS *s)
20633 {
20634 /* Weak symbols can always be pre-empted. */
20635 if (S_IS_WEAK (s))
20636 return TRUE;
20637
20638 /* Non-global symbols cannot be pre-empted. */
20639 if (! S_IS_EXTERNAL (s))
20640 return FALSE;
20641
20642 #ifdef OBJ_ELF
20643 /* In ELF, a global symbol can be marked protected, or private. In that
20644 case it can't be pre-empted (other definitions in the same link unit
20645 would violate the ODR). */
20646 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20647 return FALSE;
20648 #endif
20649
20650 /* Other global symbols might be pre-empted. */
20651 return TRUE;
20652 }
20653
20654 /* Return the size of a relaxable branch instruction. BITS is the
20655 size of the offset field in the narrow instruction. */
20656
20657 static int
20658 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20659 {
20660 addressT addr;
20661 offsetT val;
20662 offsetT limit;
20663
20664 /* Assume worst case for symbols not known to be in the same section. */
20665 if (!S_IS_DEFINED (fragp->fr_symbol)
20666 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20667 || S_IS_WEAK (fragp->fr_symbol))
20668 return 4;
20669
20670 #ifdef OBJ_ELF
20671 /* A branch to a function in ARM state will require interworking. */
20672 if (S_IS_DEFINED (fragp->fr_symbol)
20673 && ARM_IS_FUNC (fragp->fr_symbol))
20674 return 4;
20675 #endif
20676
20677 if (symbol_preemptible (fragp->fr_symbol))
20678 return 4;
20679
20680 val = relaxed_symbol_addr (fragp, stretch);
20681 addr = fragp->fr_address + fragp->fr_fix + 4;
20682 val -= addr;
20683
20684 /* Offset is a signed value *2 */
20685 limit = 1 << bits;
20686 if (val >= limit || val < -limit)
20687 return 4;
20688 return 2;
20689 }
20690
20691
20692 /* Relax a machine dependent frag. This returns the amount by which
20693 the current size of the frag should change. */
20694
20695 int
20696 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20697 {
20698 int oldsize;
20699 int newsize;
20700
20701 oldsize = fragp->fr_var;
20702 switch (fragp->fr_subtype)
20703 {
20704 case T_MNEM_ldr_pc2:
20705 newsize = relax_adr (fragp, sec, stretch);
20706 break;
20707 case T_MNEM_ldr_pc:
20708 case T_MNEM_ldr_sp:
20709 case T_MNEM_str_sp:
20710 newsize = relax_immediate (fragp, 8, 2);
20711 break;
20712 case T_MNEM_ldr:
20713 case T_MNEM_str:
20714 newsize = relax_immediate (fragp, 5, 2);
20715 break;
20716 case T_MNEM_ldrh:
20717 case T_MNEM_strh:
20718 newsize = relax_immediate (fragp, 5, 1);
20719 break;
20720 case T_MNEM_ldrb:
20721 case T_MNEM_strb:
20722 newsize = relax_immediate (fragp, 5, 0);
20723 break;
20724 case T_MNEM_adr:
20725 newsize = relax_adr (fragp, sec, stretch);
20726 break;
20727 case T_MNEM_mov:
20728 case T_MNEM_movs:
20729 case T_MNEM_cmp:
20730 case T_MNEM_cmn:
20731 newsize = relax_immediate (fragp, 8, 0);
20732 break;
20733 case T_MNEM_b:
20734 newsize = relax_branch (fragp, sec, 11, stretch);
20735 break;
20736 case T_MNEM_bcond:
20737 newsize = relax_branch (fragp, sec, 8, stretch);
20738 break;
20739 case T_MNEM_add_sp:
20740 case T_MNEM_add_pc:
20741 newsize = relax_immediate (fragp, 8, 2);
20742 break;
20743 case T_MNEM_inc_sp:
20744 case T_MNEM_dec_sp:
20745 newsize = relax_immediate (fragp, 7, 2);
20746 break;
20747 case T_MNEM_addi:
20748 case T_MNEM_addis:
20749 case T_MNEM_subi:
20750 case T_MNEM_subis:
20751 newsize = relax_addsub (fragp, sec);
20752 break;
20753 default:
20754 abort ();
20755 }
20756
20757 fragp->fr_var = newsize;
20758 /* Freeze wide instructions that are at or before the same location as
20759 in the previous pass. This avoids infinite loops.
20760 Don't freeze them unconditionally because targets may be artificially
20761 misaligned by the expansion of preceding frags. */
20762 if (stretch <= 0 && newsize > 2)
20763 {
20764 md_convert_frag (sec->owner, sec, fragp);
20765 frag_wane (fragp);
20766 }
20767
20768 return newsize - oldsize;
20769 }
20770
20771 /* Round up a section size to the appropriate boundary. */
20772
20773 valueT
20774 md_section_align (segT segment ATTRIBUTE_UNUSED,
20775 valueT size)
20776 {
20777 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20778 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20779 {
20780 /* For a.out, force the section size to be aligned. If we don't do
20781 this, BFD will align it for us, but it will not write out the
20782 final bytes of the section. This may be a bug in BFD, but it is
20783 easier to fix it here since that is how the other a.out targets
20784 work. */
20785 int align;
20786
20787 align = bfd_get_section_alignment (stdoutput, segment);
20788 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20789 }
20790 #endif
20791
20792 return size;
20793 }
20794
20795 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20796 of an rs_align_code fragment. */
20797
20798 void
20799 arm_handle_align (fragS * fragP)
20800 {
20801 static char const arm_noop[2][2][4] =
20802 {
20803 { /* ARMv1 */
20804 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20805 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20806 },
20807 { /* ARMv6k */
20808 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20809 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20810 },
20811 };
20812 static char const thumb_noop[2][2][2] =
20813 {
20814 { /* Thumb-1 */
20815 {0xc0, 0x46}, /* LE */
20816 {0x46, 0xc0}, /* BE */
20817 },
20818 { /* Thumb-2 */
20819 {0x00, 0xbf}, /* LE */
20820 {0xbf, 0x00} /* BE */
20821 }
20822 };
20823 static char const wide_thumb_noop[2][4] =
20824 { /* Wide Thumb-2 */
20825 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20826 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20827 };
20828
20829 unsigned bytes, fix, noop_size;
20830 char * p;
20831 const char * noop;
20832 const char *narrow_noop = NULL;
20833 #ifdef OBJ_ELF
20834 enum mstate state;
20835 #endif
20836
20837 if (fragP->fr_type != rs_align_code)
20838 return;
20839
20840 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20841 p = fragP->fr_literal + fragP->fr_fix;
20842 fix = 0;
20843
20844 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20845 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20846
20847 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20848
20849 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20850 {
20851 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20852 {
20853 narrow_noop = thumb_noop[1][target_big_endian];
20854 noop = wide_thumb_noop[target_big_endian];
20855 }
20856 else
20857 noop = thumb_noop[0][target_big_endian];
20858 noop_size = 2;
20859 #ifdef OBJ_ELF
20860 state = MAP_THUMB;
20861 #endif
20862 }
20863 else
20864 {
20865 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20866 [target_big_endian];
20867 noop_size = 4;
20868 #ifdef OBJ_ELF
20869 state = MAP_ARM;
20870 #endif
20871 }
20872
20873 fragP->fr_var = noop_size;
20874
20875 if (bytes & (noop_size - 1))
20876 {
20877 fix = bytes & (noop_size - 1);
20878 #ifdef OBJ_ELF
20879 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20880 #endif
20881 memset (p, 0, fix);
20882 p += fix;
20883 bytes -= fix;
20884 }
20885
20886 if (narrow_noop)
20887 {
20888 if (bytes & noop_size)
20889 {
20890 /* Insert a narrow noop. */
20891 memcpy (p, narrow_noop, noop_size);
20892 p += noop_size;
20893 bytes -= noop_size;
20894 fix += noop_size;
20895 }
20896
20897 /* Use wide noops for the remainder */
20898 noop_size = 4;
20899 }
20900
20901 while (bytes >= noop_size)
20902 {
20903 memcpy (p, noop, noop_size);
20904 p += noop_size;
20905 bytes -= noop_size;
20906 fix += noop_size;
20907 }
20908
20909 fragP->fr_fix += fix;
20910 }
20911
20912 /* Called from md_do_align. Used to create an alignment
20913 frag in a code section. */
20914
20915 void
20916 arm_frag_align_code (int n, int max)
20917 {
20918 char * p;
20919
20920 /* We assume that there will never be a requirement
20921 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20922 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20923 {
20924 char err_msg[128];
20925
20926 sprintf (err_msg,
20927 _("alignments greater than %d bytes not supported in .text sections."),
20928 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20929 as_fatal ("%s", err_msg);
20930 }
20931
20932 p = frag_var (rs_align_code,
20933 MAX_MEM_FOR_RS_ALIGN_CODE,
20934 1,
20935 (relax_substateT) max,
20936 (symbolS *) NULL,
20937 (offsetT) n,
20938 (char *) NULL);
20939 *p = 0;
20940 }
20941
20942 /* Perform target specific initialisation of a frag.
20943 Note - despite the name this initialisation is not done when the frag
20944 is created, but only when its type is assigned. A frag can be created
20945 and used a long time before its type is set, so beware of assuming that
20946 this initialisationis performed first. */
20947
20948 #ifndef OBJ_ELF
20949 void
20950 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20951 {
20952 /* Record whether this frag is in an ARM or a THUMB area. */
20953 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20954 }
20955
20956 #else /* OBJ_ELF is defined. */
20957 void
20958 arm_init_frag (fragS * fragP, int max_chars)
20959 {
20960 /* If the current ARM vs THUMB mode has not already
20961 been recorded into this frag then do so now. */
20962 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20963 {
20964 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20965
20966 /* Record a mapping symbol for alignment frags. We will delete this
20967 later if the alignment ends up empty. */
20968 switch (fragP->fr_type)
20969 {
20970 case rs_align:
20971 case rs_align_test:
20972 case rs_fill:
20973 mapping_state_2 (MAP_DATA, max_chars);
20974 break;
20975 case rs_align_code:
20976 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20977 break;
20978 default:
20979 break;
20980 }
20981 }
20982 }
20983
20984 /* When we change sections we need to issue a new mapping symbol. */
20985
20986 void
20987 arm_elf_change_section (void)
20988 {
20989 /* Link an unlinked unwind index table section to the .text section. */
20990 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20991 && elf_linked_to_section (now_seg) == NULL)
20992 elf_linked_to_section (now_seg) = text_section;
20993 }
20994
20995 int
20996 arm_elf_section_type (const char * str, size_t len)
20997 {
20998 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20999 return SHT_ARM_EXIDX;
21000
21001 return -1;
21002 }
21003 \f
21004 /* Code to deal with unwinding tables. */
21005
21006 static void add_unwind_adjustsp (offsetT);
21007
21008 /* Generate any deferred unwind frame offset. */
21009
21010 static void
21011 flush_pending_unwind (void)
21012 {
21013 offsetT offset;
21014
21015 offset = unwind.pending_offset;
21016 unwind.pending_offset = 0;
21017 if (offset != 0)
21018 add_unwind_adjustsp (offset);
21019 }
21020
21021 /* Add an opcode to this list for this function. Two-byte opcodes should
21022 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21023 order. */
21024
21025 static void
21026 add_unwind_opcode (valueT op, int length)
21027 {
21028 /* Add any deferred stack adjustment. */
21029 if (unwind.pending_offset)
21030 flush_pending_unwind ();
21031
21032 unwind.sp_restored = 0;
21033
21034 if (unwind.opcode_count + length > unwind.opcode_alloc)
21035 {
21036 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21037 if (unwind.opcodes)
21038 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21039 unwind.opcode_alloc);
21040 else
21041 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21042 }
21043 while (length > 0)
21044 {
21045 length--;
21046 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21047 op >>= 8;
21048 unwind.opcode_count++;
21049 }
21050 }
21051
21052 /* Add unwind opcodes to adjust the stack pointer. */
21053
21054 static void
21055 add_unwind_adjustsp (offsetT offset)
21056 {
21057 valueT op;
21058
21059 if (offset > 0x200)
21060 {
21061 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21062 char bytes[5];
21063 int n;
21064 valueT o;
21065
21066 /* Long form: 0xb2, uleb128. */
21067 /* This might not fit in a word so add the individual bytes,
21068 remembering the list is built in reverse order. */
21069 o = (valueT) ((offset - 0x204) >> 2);
21070 if (o == 0)
21071 add_unwind_opcode (0, 1);
21072
21073 /* Calculate the uleb128 encoding of the offset. */
21074 n = 0;
21075 while (o)
21076 {
21077 bytes[n] = o & 0x7f;
21078 o >>= 7;
21079 if (o)
21080 bytes[n] |= 0x80;
21081 n++;
21082 }
21083 /* Add the insn. */
21084 for (; n; n--)
21085 add_unwind_opcode (bytes[n - 1], 1);
21086 add_unwind_opcode (0xb2, 1);
21087 }
21088 else if (offset > 0x100)
21089 {
21090 /* Two short opcodes. */
21091 add_unwind_opcode (0x3f, 1);
21092 op = (offset - 0x104) >> 2;
21093 add_unwind_opcode (op, 1);
21094 }
21095 else if (offset > 0)
21096 {
21097 /* Short opcode. */
21098 op = (offset - 4) >> 2;
21099 add_unwind_opcode (op, 1);
21100 }
21101 else if (offset < 0)
21102 {
21103 offset = -offset;
21104 while (offset > 0x100)
21105 {
21106 add_unwind_opcode (0x7f, 1);
21107 offset -= 0x100;
21108 }
21109 op = ((offset - 4) >> 2) | 0x40;
21110 add_unwind_opcode (op, 1);
21111 }
21112 }
21113
21114 /* Finish the list of unwind opcodes for this function. */
21115 static void
21116 finish_unwind_opcodes (void)
21117 {
21118 valueT op;
21119
21120 if (unwind.fp_used)
21121 {
21122 /* Adjust sp as necessary. */
21123 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21124 flush_pending_unwind ();
21125
21126 /* After restoring sp from the frame pointer. */
21127 op = 0x90 | unwind.fp_reg;
21128 add_unwind_opcode (op, 1);
21129 }
21130 else
21131 flush_pending_unwind ();
21132 }
21133
21134
21135 /* Start an exception table entry. If idx is nonzero this is an index table
21136 entry. */
21137
21138 static void
21139 start_unwind_section (const segT text_seg, int idx)
21140 {
21141 const char * text_name;
21142 const char * prefix;
21143 const char * prefix_once;
21144 const char * group_name;
21145 size_t prefix_len;
21146 size_t text_len;
21147 char * sec_name;
21148 size_t sec_name_len;
21149 int type;
21150 int flags;
21151 int linkonce;
21152
21153 if (idx)
21154 {
21155 prefix = ELF_STRING_ARM_unwind;
21156 prefix_once = ELF_STRING_ARM_unwind_once;
21157 type = SHT_ARM_EXIDX;
21158 }
21159 else
21160 {
21161 prefix = ELF_STRING_ARM_unwind_info;
21162 prefix_once = ELF_STRING_ARM_unwind_info_once;
21163 type = SHT_PROGBITS;
21164 }
21165
21166 text_name = segment_name (text_seg);
21167 if (streq (text_name, ".text"))
21168 text_name = "";
21169
21170 if (strncmp (text_name, ".gnu.linkonce.t.",
21171 strlen (".gnu.linkonce.t.")) == 0)
21172 {
21173 prefix = prefix_once;
21174 text_name += strlen (".gnu.linkonce.t.");
21175 }
21176
21177 prefix_len = strlen (prefix);
21178 text_len = strlen (text_name);
21179 sec_name_len = prefix_len + text_len;
21180 sec_name = (char *) xmalloc (sec_name_len + 1);
21181 memcpy (sec_name, prefix, prefix_len);
21182 memcpy (sec_name + prefix_len, text_name, text_len);
21183 sec_name[prefix_len + text_len] = '\0';
21184
21185 flags = SHF_ALLOC;
21186 linkonce = 0;
21187 group_name = 0;
21188
21189 /* Handle COMDAT group. */
21190 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21191 {
21192 group_name = elf_group_name (text_seg);
21193 if (group_name == NULL)
21194 {
21195 as_bad (_("Group section `%s' has no group signature"),
21196 segment_name (text_seg));
21197 ignore_rest_of_line ();
21198 return;
21199 }
21200 flags |= SHF_GROUP;
21201 linkonce = 1;
21202 }
21203
21204 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21205
21206 /* Set the section link for index tables. */
21207 if (idx)
21208 elf_linked_to_section (now_seg) = text_seg;
21209 }
21210
21211
21212 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21213 personality routine data. Returns zero, or the index table value for
21214 an inline entry. */
21215
21216 static valueT
21217 create_unwind_entry (int have_data)
21218 {
21219 int size;
21220 addressT where;
21221 char *ptr;
21222 /* The current word of data. */
21223 valueT data;
21224 /* The number of bytes left in this word. */
21225 int n;
21226
21227 finish_unwind_opcodes ();
21228
21229 /* Remember the current text section. */
21230 unwind.saved_seg = now_seg;
21231 unwind.saved_subseg = now_subseg;
21232
21233 start_unwind_section (now_seg, 0);
21234
21235 if (unwind.personality_routine == NULL)
21236 {
21237 if (unwind.personality_index == -2)
21238 {
21239 if (have_data)
21240 as_bad (_("handlerdata in cantunwind frame"));
21241 return 1; /* EXIDX_CANTUNWIND. */
21242 }
21243
21244 /* Use a default personality routine if none is specified. */
21245 if (unwind.personality_index == -1)
21246 {
21247 if (unwind.opcode_count > 3)
21248 unwind.personality_index = 1;
21249 else
21250 unwind.personality_index = 0;
21251 }
21252
21253 /* Space for the personality routine entry. */
21254 if (unwind.personality_index == 0)
21255 {
21256 if (unwind.opcode_count > 3)
21257 as_bad (_("too many unwind opcodes for personality routine 0"));
21258
21259 if (!have_data)
21260 {
21261 /* All the data is inline in the index table. */
21262 data = 0x80;
21263 n = 3;
21264 while (unwind.opcode_count > 0)
21265 {
21266 unwind.opcode_count--;
21267 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21268 n--;
21269 }
21270
21271 /* Pad with "finish" opcodes. */
21272 while (n--)
21273 data = (data << 8) | 0xb0;
21274
21275 return data;
21276 }
21277 size = 0;
21278 }
21279 else
21280 /* We get two opcodes "free" in the first word. */
21281 size = unwind.opcode_count - 2;
21282 }
21283 else
21284 {
21285 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21286 if (unwind.personality_index != -1)
21287 {
21288 as_bad (_("attempt to recreate an unwind entry"));
21289 return 1;
21290 }
21291
21292 /* An extra byte is required for the opcode count. */
21293 size = unwind.opcode_count + 1;
21294 }
21295
21296 size = (size + 3) >> 2;
21297 if (size > 0xff)
21298 as_bad (_("too many unwind opcodes"));
21299
21300 frag_align (2, 0, 0);
21301 record_alignment (now_seg, 2);
21302 unwind.table_entry = expr_build_dot ();
21303
21304 /* Allocate the table entry. */
21305 ptr = frag_more ((size << 2) + 4);
21306 /* PR 13449: Zero the table entries in case some of them are not used. */
21307 memset (ptr, 0, (size << 2) + 4);
21308 where = frag_now_fix () - ((size << 2) + 4);
21309
21310 switch (unwind.personality_index)
21311 {
21312 case -1:
21313 /* ??? Should this be a PLT generating relocation? */
21314 /* Custom personality routine. */
21315 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21316 BFD_RELOC_ARM_PREL31);
21317
21318 where += 4;
21319 ptr += 4;
21320
21321 /* Set the first byte to the number of additional words. */
21322 data = size > 0 ? size - 1 : 0;
21323 n = 3;
21324 break;
21325
21326 /* ABI defined personality routines. */
21327 case 0:
21328 /* Three opcodes bytes are packed into the first word. */
21329 data = 0x80;
21330 n = 3;
21331 break;
21332
21333 case 1:
21334 case 2:
21335 /* The size and first two opcode bytes go in the first word. */
21336 data = ((0x80 + unwind.personality_index) << 8) | size;
21337 n = 2;
21338 break;
21339
21340 default:
21341 /* Should never happen. */
21342 abort ();
21343 }
21344
21345 /* Pack the opcodes into words (MSB first), reversing the list at the same
21346 time. */
21347 while (unwind.opcode_count > 0)
21348 {
21349 if (n == 0)
21350 {
21351 md_number_to_chars (ptr, data, 4);
21352 ptr += 4;
21353 n = 4;
21354 data = 0;
21355 }
21356 unwind.opcode_count--;
21357 n--;
21358 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21359 }
21360
21361 /* Finish off the last word. */
21362 if (n < 4)
21363 {
21364 /* Pad with "finish" opcodes. */
21365 while (n--)
21366 data = (data << 8) | 0xb0;
21367
21368 md_number_to_chars (ptr, data, 4);
21369 }
21370
21371 if (!have_data)
21372 {
21373 /* Add an empty descriptor if there is no user-specified data. */
21374 ptr = frag_more (4);
21375 md_number_to_chars (ptr, 0, 4);
21376 }
21377
21378 return 0;
21379 }
21380
21381
21382 /* Initialize the DWARF-2 unwind information for this procedure. */
21383
21384 void
21385 tc_arm_frame_initial_instructions (void)
21386 {
21387 cfi_add_CFA_def_cfa (REG_SP, 0);
21388 }
21389 #endif /* OBJ_ELF */
21390
21391 /* Convert REGNAME to a DWARF-2 register number. */
21392
21393 int
21394 tc_arm_regname_to_dw2regnum (char *regname)
21395 {
21396 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21397 if (reg != FAIL)
21398 return reg;
21399
21400 /* PR 16694: Allow VFP registers as well. */
21401 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21402 if (reg != FAIL)
21403 return 64 + reg;
21404
21405 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21406 if (reg != FAIL)
21407 return reg + 256;
21408
21409 return -1;
21410 }
21411
21412 #ifdef TE_PE
21413 void
21414 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21415 {
21416 expressionS exp;
21417
21418 exp.X_op = O_secrel;
21419 exp.X_add_symbol = symbol;
21420 exp.X_add_number = 0;
21421 emit_expr (&exp, size);
21422 }
21423 #endif
21424
21425 /* MD interface: Symbol and relocation handling. */
21426
21427 /* Return the address within the segment that a PC-relative fixup is
21428 relative to. For ARM, PC-relative fixups applied to instructions
21429 are generally relative to the location of the fixup plus 8 bytes.
21430 Thumb branches are offset by 4, and Thumb loads relative to PC
21431 require special handling. */
21432
21433 long
21434 md_pcrel_from_section (fixS * fixP, segT seg)
21435 {
21436 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21437
21438 /* If this is pc-relative and we are going to emit a relocation
21439 then we just want to put out any pipeline compensation that the linker
21440 will need. Otherwise we want to use the calculated base.
21441 For WinCE we skip the bias for externals as well, since this
21442 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21443 if (fixP->fx_pcrel
21444 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21445 || (arm_force_relocation (fixP)
21446 #ifdef TE_WINCE
21447 && !S_IS_EXTERNAL (fixP->fx_addsy)
21448 #endif
21449 )))
21450 base = 0;
21451
21452
21453 switch (fixP->fx_r_type)
21454 {
21455 /* PC relative addressing on the Thumb is slightly odd as the
21456 bottom two bits of the PC are forced to zero for the
21457 calculation. This happens *after* application of the
21458 pipeline offset. However, Thumb adrl already adjusts for
21459 this, so we need not do it again. */
21460 case BFD_RELOC_ARM_THUMB_ADD:
21461 return base & ~3;
21462
21463 case BFD_RELOC_ARM_THUMB_OFFSET:
21464 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21465 case BFD_RELOC_ARM_T32_ADD_PC12:
21466 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21467 return (base + 4) & ~3;
21468
21469 /* Thumb branches are simply offset by +4. */
21470 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21471 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21472 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21473 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21474 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21475 return base + 4;
21476
21477 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21478 if (fixP->fx_addsy
21479 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21480 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21481 && ARM_IS_FUNC (fixP->fx_addsy)
21482 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21483 base = fixP->fx_where + fixP->fx_frag->fr_address;
21484 return base + 4;
21485
21486 /* BLX is like branches above, but forces the low two bits of PC to
21487 zero. */
21488 case BFD_RELOC_THUMB_PCREL_BLX:
21489 if (fixP->fx_addsy
21490 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21491 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21492 && THUMB_IS_FUNC (fixP->fx_addsy)
21493 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21494 base = fixP->fx_where + fixP->fx_frag->fr_address;
21495 return (base + 4) & ~3;
21496
21497 /* ARM mode branches are offset by +8. However, the Windows CE
21498 loader expects the relocation not to take this into account. */
21499 case BFD_RELOC_ARM_PCREL_BLX:
21500 if (fixP->fx_addsy
21501 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21502 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21503 && ARM_IS_FUNC (fixP->fx_addsy)
21504 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21505 base = fixP->fx_where + fixP->fx_frag->fr_address;
21506 return base + 8;
21507
21508 case BFD_RELOC_ARM_PCREL_CALL:
21509 if (fixP->fx_addsy
21510 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21511 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21512 && THUMB_IS_FUNC (fixP->fx_addsy)
21513 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21514 base = fixP->fx_where + fixP->fx_frag->fr_address;
21515 return base + 8;
21516
21517 case BFD_RELOC_ARM_PCREL_BRANCH:
21518 case BFD_RELOC_ARM_PCREL_JUMP:
21519 case BFD_RELOC_ARM_PLT32:
21520 #ifdef TE_WINCE
21521 /* When handling fixups immediately, because we have already
21522 discovered the value of a symbol, or the address of the frag involved
21523 we must account for the offset by +8, as the OS loader will never see the reloc.
21524 see fixup_segment() in write.c
21525 The S_IS_EXTERNAL test handles the case of global symbols.
21526 Those need the calculated base, not just the pipe compensation the linker will need. */
21527 if (fixP->fx_pcrel
21528 && fixP->fx_addsy != NULL
21529 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21530 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21531 return base + 8;
21532 return base;
21533 #else
21534 return base + 8;
21535 #endif
21536
21537
21538 /* ARM mode loads relative to PC are also offset by +8. Unlike
21539 branches, the Windows CE loader *does* expect the relocation
21540 to take this into account. */
21541 case BFD_RELOC_ARM_OFFSET_IMM:
21542 case BFD_RELOC_ARM_OFFSET_IMM8:
21543 case BFD_RELOC_ARM_HWLITERAL:
21544 case BFD_RELOC_ARM_LITERAL:
21545 case BFD_RELOC_ARM_CP_OFF_IMM:
21546 return base + 8;
21547
21548
21549 /* Other PC-relative relocations are un-offset. */
21550 default:
21551 return base;
21552 }
21553 }
21554
21555 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21556 Otherwise we have no need to default values of symbols. */
21557
21558 symbolS *
21559 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21560 {
21561 #ifdef OBJ_ELF
21562 if (name[0] == '_' && name[1] == 'G'
21563 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21564 {
21565 if (!GOT_symbol)
21566 {
21567 if (symbol_find (name))
21568 as_bad (_("GOT already in the symbol table"));
21569
21570 GOT_symbol = symbol_new (name, undefined_section,
21571 (valueT) 0, & zero_address_frag);
21572 }
21573
21574 return GOT_symbol;
21575 }
21576 #endif
21577
21578 return NULL;
21579 }
21580
21581 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21582 computed as two separate immediate values, added together. We
21583 already know that this value cannot be computed by just one ARM
21584 instruction. */
21585
21586 static unsigned int
21587 validate_immediate_twopart (unsigned int val,
21588 unsigned int * highpart)
21589 {
21590 unsigned int a;
21591 unsigned int i;
21592
21593 for (i = 0; i < 32; i += 2)
21594 if (((a = rotate_left (val, i)) & 0xff) != 0)
21595 {
21596 if (a & 0xff00)
21597 {
21598 if (a & ~ 0xffff)
21599 continue;
21600 * highpart = (a >> 8) | ((i + 24) << 7);
21601 }
21602 else if (a & 0xff0000)
21603 {
21604 if (a & 0xff000000)
21605 continue;
21606 * highpart = (a >> 16) | ((i + 16) << 7);
21607 }
21608 else
21609 {
21610 gas_assert (a & 0xff000000);
21611 * highpart = (a >> 24) | ((i + 8) << 7);
21612 }
21613
21614 return (a & 0xff) | (i << 7);
21615 }
21616
21617 return FAIL;
21618 }
21619
21620 static int
21621 validate_offset_imm (unsigned int val, int hwse)
21622 {
21623 if ((hwse && val > 255) || val > 4095)
21624 return FAIL;
21625 return val;
21626 }
21627
21628 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21629 negative immediate constant by altering the instruction. A bit of
21630 a hack really.
21631 MOV <-> MVN
21632 AND <-> BIC
21633 ADC <-> SBC
21634 by inverting the second operand, and
21635 ADD <-> SUB
21636 CMP <-> CMN
21637 by negating the second operand. */
21638
21639 static int
21640 negate_data_op (unsigned long * instruction,
21641 unsigned long value)
21642 {
21643 int op, new_inst;
21644 unsigned long negated, inverted;
21645
21646 negated = encode_arm_immediate (-value);
21647 inverted = encode_arm_immediate (~value);
21648
21649 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21650 switch (op)
21651 {
21652 /* First negates. */
21653 case OPCODE_SUB: /* ADD <-> SUB */
21654 new_inst = OPCODE_ADD;
21655 value = negated;
21656 break;
21657
21658 case OPCODE_ADD:
21659 new_inst = OPCODE_SUB;
21660 value = negated;
21661 break;
21662
21663 case OPCODE_CMP: /* CMP <-> CMN */
21664 new_inst = OPCODE_CMN;
21665 value = negated;
21666 break;
21667
21668 case OPCODE_CMN:
21669 new_inst = OPCODE_CMP;
21670 value = negated;
21671 break;
21672
21673 /* Now Inverted ops. */
21674 case OPCODE_MOV: /* MOV <-> MVN */
21675 new_inst = OPCODE_MVN;
21676 value = inverted;
21677 break;
21678
21679 case OPCODE_MVN:
21680 new_inst = OPCODE_MOV;
21681 value = inverted;
21682 break;
21683
21684 case OPCODE_AND: /* AND <-> BIC */
21685 new_inst = OPCODE_BIC;
21686 value = inverted;
21687 break;
21688
21689 case OPCODE_BIC:
21690 new_inst = OPCODE_AND;
21691 value = inverted;
21692 break;
21693
21694 case OPCODE_ADC: /* ADC <-> SBC */
21695 new_inst = OPCODE_SBC;
21696 value = inverted;
21697 break;
21698
21699 case OPCODE_SBC:
21700 new_inst = OPCODE_ADC;
21701 value = inverted;
21702 break;
21703
21704 /* We cannot do anything. */
21705 default:
21706 return FAIL;
21707 }
21708
21709 if (value == (unsigned) FAIL)
21710 return FAIL;
21711
21712 *instruction &= OPCODE_MASK;
21713 *instruction |= new_inst << DATA_OP_SHIFT;
21714 return value;
21715 }
21716
21717 /* Like negate_data_op, but for Thumb-2. */
21718
21719 static unsigned int
21720 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21721 {
21722 int op, new_inst;
21723 int rd;
21724 unsigned int negated, inverted;
21725
21726 negated = encode_thumb32_immediate (-value);
21727 inverted = encode_thumb32_immediate (~value);
21728
21729 rd = (*instruction >> 8) & 0xf;
21730 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21731 switch (op)
21732 {
21733 /* ADD <-> SUB. Includes CMP <-> CMN. */
21734 case T2_OPCODE_SUB:
21735 new_inst = T2_OPCODE_ADD;
21736 value = negated;
21737 break;
21738
21739 case T2_OPCODE_ADD:
21740 new_inst = T2_OPCODE_SUB;
21741 value = negated;
21742 break;
21743
21744 /* ORR <-> ORN. Includes MOV <-> MVN. */
21745 case T2_OPCODE_ORR:
21746 new_inst = T2_OPCODE_ORN;
21747 value = inverted;
21748 break;
21749
21750 case T2_OPCODE_ORN:
21751 new_inst = T2_OPCODE_ORR;
21752 value = inverted;
21753 break;
21754
21755 /* AND <-> BIC. TST has no inverted equivalent. */
21756 case T2_OPCODE_AND:
21757 new_inst = T2_OPCODE_BIC;
21758 if (rd == 15)
21759 value = FAIL;
21760 else
21761 value = inverted;
21762 break;
21763
21764 case T2_OPCODE_BIC:
21765 new_inst = T2_OPCODE_AND;
21766 value = inverted;
21767 break;
21768
21769 /* ADC <-> SBC */
21770 case T2_OPCODE_ADC:
21771 new_inst = T2_OPCODE_SBC;
21772 value = inverted;
21773 break;
21774
21775 case T2_OPCODE_SBC:
21776 new_inst = T2_OPCODE_ADC;
21777 value = inverted;
21778 break;
21779
21780 /* We cannot do anything. */
21781 default:
21782 return FAIL;
21783 }
21784
21785 if (value == (unsigned int)FAIL)
21786 return FAIL;
21787
21788 *instruction &= T2_OPCODE_MASK;
21789 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21790 return value;
21791 }
21792
21793 /* Read a 32-bit thumb instruction from buf. */
21794 static unsigned long
21795 get_thumb32_insn (char * buf)
21796 {
21797 unsigned long insn;
21798 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21799 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21800
21801 return insn;
21802 }
21803
21804
21805 /* We usually want to set the low bit on the address of thumb function
21806 symbols. In particular .word foo - . should have the low bit set.
21807 Generic code tries to fold the difference of two symbols to
21808 a constant. Prevent this and force a relocation when the first symbols
21809 is a thumb function. */
21810
21811 bfd_boolean
21812 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21813 {
21814 if (op == O_subtract
21815 && l->X_op == O_symbol
21816 && r->X_op == O_symbol
21817 && THUMB_IS_FUNC (l->X_add_symbol))
21818 {
21819 l->X_op = O_subtract;
21820 l->X_op_symbol = r->X_add_symbol;
21821 l->X_add_number -= r->X_add_number;
21822 return TRUE;
21823 }
21824
21825 /* Process as normal. */
21826 return FALSE;
21827 }
21828
21829 /* Encode Thumb2 unconditional branches and calls. The encoding
21830 for the 2 are identical for the immediate values. */
21831
21832 static void
21833 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21834 {
21835 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21836 offsetT newval;
21837 offsetT newval2;
21838 addressT S, I1, I2, lo, hi;
21839
21840 S = (value >> 24) & 0x01;
21841 I1 = (value >> 23) & 0x01;
21842 I2 = (value >> 22) & 0x01;
21843 hi = (value >> 12) & 0x3ff;
21844 lo = (value >> 1) & 0x7ff;
21845 newval = md_chars_to_number (buf, THUMB_SIZE);
21846 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21847 newval |= (S << 10) | hi;
21848 newval2 &= ~T2I1I2MASK;
21849 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21850 md_number_to_chars (buf, newval, THUMB_SIZE);
21851 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21852 }
21853
21854 void
21855 md_apply_fix (fixS * fixP,
21856 valueT * valP,
21857 segT seg)
21858 {
21859 offsetT value = * valP;
21860 offsetT newval;
21861 unsigned int newimm;
21862 unsigned long temp;
21863 int sign;
21864 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21865
21866 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21867
21868 /* Note whether this will delete the relocation. */
21869
21870 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21871 fixP->fx_done = 1;
21872
21873 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21874 consistency with the behaviour on 32-bit hosts. Remember value
21875 for emit_reloc. */
21876 value &= 0xffffffff;
21877 value ^= 0x80000000;
21878 value -= 0x80000000;
21879
21880 *valP = value;
21881 fixP->fx_addnumber = value;
21882
21883 /* Same treatment for fixP->fx_offset. */
21884 fixP->fx_offset &= 0xffffffff;
21885 fixP->fx_offset ^= 0x80000000;
21886 fixP->fx_offset -= 0x80000000;
21887
21888 switch (fixP->fx_r_type)
21889 {
21890 case BFD_RELOC_NONE:
21891 /* This will need to go in the object file. */
21892 fixP->fx_done = 0;
21893 break;
21894
21895 case BFD_RELOC_ARM_IMMEDIATE:
21896 /* We claim that this fixup has been processed here,
21897 even if in fact we generate an error because we do
21898 not have a reloc for it, so tc_gen_reloc will reject it. */
21899 fixP->fx_done = 1;
21900
21901 if (fixP->fx_addsy)
21902 {
21903 const char *msg = 0;
21904
21905 if (! S_IS_DEFINED (fixP->fx_addsy))
21906 msg = _("undefined symbol %s used as an immediate value");
21907 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21908 msg = _("symbol %s is in a different section");
21909 else if (S_IS_WEAK (fixP->fx_addsy))
21910 msg = _("symbol %s is weak and may be overridden later");
21911
21912 if (msg)
21913 {
21914 as_bad_where (fixP->fx_file, fixP->fx_line,
21915 msg, S_GET_NAME (fixP->fx_addsy));
21916 break;
21917 }
21918 }
21919
21920 temp = md_chars_to_number (buf, INSN_SIZE);
21921
21922 /* If the offset is negative, we should use encoding A2 for ADR. */
21923 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21924 newimm = negate_data_op (&temp, value);
21925 else
21926 {
21927 newimm = encode_arm_immediate (value);
21928
21929 /* If the instruction will fail, see if we can fix things up by
21930 changing the opcode. */
21931 if (newimm == (unsigned int) FAIL)
21932 newimm = negate_data_op (&temp, value);
21933 }
21934
21935 if (newimm == (unsigned int) FAIL)
21936 {
21937 as_bad_where (fixP->fx_file, fixP->fx_line,
21938 _("invalid constant (%lx) after fixup"),
21939 (unsigned long) value);
21940 break;
21941 }
21942
21943 newimm |= (temp & 0xfffff000);
21944 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21945 break;
21946
21947 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21948 {
21949 unsigned int highpart = 0;
21950 unsigned int newinsn = 0xe1a00000; /* nop. */
21951
21952 if (fixP->fx_addsy)
21953 {
21954 const char *msg = 0;
21955
21956 if (! S_IS_DEFINED (fixP->fx_addsy))
21957 msg = _("undefined symbol %s used as an immediate value");
21958 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21959 msg = _("symbol %s is in a different section");
21960 else if (S_IS_WEAK (fixP->fx_addsy))
21961 msg = _("symbol %s is weak and may be overridden later");
21962
21963 if (msg)
21964 {
21965 as_bad_where (fixP->fx_file, fixP->fx_line,
21966 msg, S_GET_NAME (fixP->fx_addsy));
21967 break;
21968 }
21969 }
21970
21971 newimm = encode_arm_immediate (value);
21972 temp = md_chars_to_number (buf, INSN_SIZE);
21973
21974 /* If the instruction will fail, see if we can fix things up by
21975 changing the opcode. */
21976 if (newimm == (unsigned int) FAIL
21977 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21978 {
21979 /* No ? OK - try using two ADD instructions to generate
21980 the value. */
21981 newimm = validate_immediate_twopart (value, & highpart);
21982
21983 /* Yes - then make sure that the second instruction is
21984 also an add. */
21985 if (newimm != (unsigned int) FAIL)
21986 newinsn = temp;
21987 /* Still No ? Try using a negated value. */
21988 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21989 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21990 /* Otherwise - give up. */
21991 else
21992 {
21993 as_bad_where (fixP->fx_file, fixP->fx_line,
21994 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21995 (long) value);
21996 break;
21997 }
21998
21999 /* Replace the first operand in the 2nd instruction (which
22000 is the PC) with the destination register. We have
22001 already added in the PC in the first instruction and we
22002 do not want to do it again. */
22003 newinsn &= ~ 0xf0000;
22004 newinsn |= ((newinsn & 0x0f000) << 4);
22005 }
22006
22007 newimm |= (temp & 0xfffff000);
22008 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22009
22010 highpart |= (newinsn & 0xfffff000);
22011 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22012 }
22013 break;
22014
22015 case BFD_RELOC_ARM_OFFSET_IMM:
22016 if (!fixP->fx_done && seg->use_rela_p)
22017 value = 0;
22018
22019 case BFD_RELOC_ARM_LITERAL:
22020 sign = value > 0;
22021
22022 if (value < 0)
22023 value = - value;
22024
22025 if (validate_offset_imm (value, 0) == FAIL)
22026 {
22027 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22028 as_bad_where (fixP->fx_file, fixP->fx_line,
22029 _("invalid literal constant: pool needs to be closer"));
22030 else
22031 as_bad_where (fixP->fx_file, fixP->fx_line,
22032 _("bad immediate value for offset (%ld)"),
22033 (long) value);
22034 break;
22035 }
22036
22037 newval = md_chars_to_number (buf, INSN_SIZE);
22038 if (value == 0)
22039 newval &= 0xfffff000;
22040 else
22041 {
22042 newval &= 0xff7ff000;
22043 newval |= value | (sign ? INDEX_UP : 0);
22044 }
22045 md_number_to_chars (buf, newval, INSN_SIZE);
22046 break;
22047
22048 case BFD_RELOC_ARM_OFFSET_IMM8:
22049 case BFD_RELOC_ARM_HWLITERAL:
22050 sign = value > 0;
22051
22052 if (value < 0)
22053 value = - value;
22054
22055 if (validate_offset_imm (value, 1) == FAIL)
22056 {
22057 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22058 as_bad_where (fixP->fx_file, fixP->fx_line,
22059 _("invalid literal constant: pool needs to be closer"));
22060 else
22061 as_bad_where (fixP->fx_file, fixP->fx_line,
22062 _("bad immediate value for 8-bit offset (%ld)"),
22063 (long) value);
22064 break;
22065 }
22066
22067 newval = md_chars_to_number (buf, INSN_SIZE);
22068 if (value == 0)
22069 newval &= 0xfffff0f0;
22070 else
22071 {
22072 newval &= 0xff7ff0f0;
22073 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22074 }
22075 md_number_to_chars (buf, newval, INSN_SIZE);
22076 break;
22077
22078 case BFD_RELOC_ARM_T32_OFFSET_U8:
22079 if (value < 0 || value > 1020 || value % 4 != 0)
22080 as_bad_where (fixP->fx_file, fixP->fx_line,
22081 _("bad immediate value for offset (%ld)"), (long) value);
22082 value /= 4;
22083
22084 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22085 newval |= value;
22086 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22087 break;
22088
22089 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22090 /* This is a complicated relocation used for all varieties of Thumb32
22091 load/store instruction with immediate offset:
22092
22093 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22094 *4, optional writeback(W)
22095 (doubleword load/store)
22096
22097 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22098 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22099 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22100 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22101 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22102
22103 Uppercase letters indicate bits that are already encoded at
22104 this point. Lowercase letters are our problem. For the
22105 second block of instructions, the secondary opcode nybble
22106 (bits 8..11) is present, and bit 23 is zero, even if this is
22107 a PC-relative operation. */
22108 newval = md_chars_to_number (buf, THUMB_SIZE);
22109 newval <<= 16;
22110 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22111
22112 if ((newval & 0xf0000000) == 0xe0000000)
22113 {
22114 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22115 if (value >= 0)
22116 newval |= (1 << 23);
22117 else
22118 value = -value;
22119 if (value % 4 != 0)
22120 {
22121 as_bad_where (fixP->fx_file, fixP->fx_line,
22122 _("offset not a multiple of 4"));
22123 break;
22124 }
22125 value /= 4;
22126 if (value > 0xff)
22127 {
22128 as_bad_where (fixP->fx_file, fixP->fx_line,
22129 _("offset out of range"));
22130 break;
22131 }
22132 newval &= ~0xff;
22133 }
22134 else if ((newval & 0x000f0000) == 0x000f0000)
22135 {
22136 /* PC-relative, 12-bit offset. */
22137 if (value >= 0)
22138 newval |= (1 << 23);
22139 else
22140 value = -value;
22141 if (value > 0xfff)
22142 {
22143 as_bad_where (fixP->fx_file, fixP->fx_line,
22144 _("offset out of range"));
22145 break;
22146 }
22147 newval &= ~0xfff;
22148 }
22149 else if ((newval & 0x00000100) == 0x00000100)
22150 {
22151 /* Writeback: 8-bit, +/- offset. */
22152 if (value >= 0)
22153 newval |= (1 << 9);
22154 else
22155 value = -value;
22156 if (value > 0xff)
22157 {
22158 as_bad_where (fixP->fx_file, fixP->fx_line,
22159 _("offset out of range"));
22160 break;
22161 }
22162 newval &= ~0xff;
22163 }
22164 else if ((newval & 0x00000f00) == 0x00000e00)
22165 {
22166 /* T-instruction: positive 8-bit offset. */
22167 if (value < 0 || value > 0xff)
22168 {
22169 as_bad_where (fixP->fx_file, fixP->fx_line,
22170 _("offset out of range"));
22171 break;
22172 }
22173 newval &= ~0xff;
22174 newval |= value;
22175 }
22176 else
22177 {
22178 /* Positive 12-bit or negative 8-bit offset. */
22179 int limit;
22180 if (value >= 0)
22181 {
22182 newval |= (1 << 23);
22183 limit = 0xfff;
22184 }
22185 else
22186 {
22187 value = -value;
22188 limit = 0xff;
22189 }
22190 if (value > limit)
22191 {
22192 as_bad_where (fixP->fx_file, fixP->fx_line,
22193 _("offset out of range"));
22194 break;
22195 }
22196 newval &= ~limit;
22197 }
22198
22199 newval |= value;
22200 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22201 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22202 break;
22203
22204 case BFD_RELOC_ARM_SHIFT_IMM:
22205 newval = md_chars_to_number (buf, INSN_SIZE);
22206 if (((unsigned long) value) > 32
22207 || (value == 32
22208 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22209 {
22210 as_bad_where (fixP->fx_file, fixP->fx_line,
22211 _("shift expression is too large"));
22212 break;
22213 }
22214
22215 if (value == 0)
22216 /* Shifts of zero must be done as lsl. */
22217 newval &= ~0x60;
22218 else if (value == 32)
22219 value = 0;
22220 newval &= 0xfffff07f;
22221 newval |= (value & 0x1f) << 7;
22222 md_number_to_chars (buf, newval, INSN_SIZE);
22223 break;
22224
22225 case BFD_RELOC_ARM_T32_IMMEDIATE:
22226 case BFD_RELOC_ARM_T32_ADD_IMM:
22227 case BFD_RELOC_ARM_T32_IMM12:
22228 case BFD_RELOC_ARM_T32_ADD_PC12:
22229 /* We claim that this fixup has been processed here,
22230 even if in fact we generate an error because we do
22231 not have a reloc for it, so tc_gen_reloc will reject it. */
22232 fixP->fx_done = 1;
22233
22234 if (fixP->fx_addsy
22235 && ! S_IS_DEFINED (fixP->fx_addsy))
22236 {
22237 as_bad_where (fixP->fx_file, fixP->fx_line,
22238 _("undefined symbol %s used as an immediate value"),
22239 S_GET_NAME (fixP->fx_addsy));
22240 break;
22241 }
22242
22243 newval = md_chars_to_number (buf, THUMB_SIZE);
22244 newval <<= 16;
22245 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22246
22247 newimm = FAIL;
22248 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22249 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22250 {
22251 newimm = encode_thumb32_immediate (value);
22252 if (newimm == (unsigned int) FAIL)
22253 newimm = thumb32_negate_data_op (&newval, value);
22254 }
22255 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22256 && newimm == (unsigned int) FAIL)
22257 {
22258 /* Turn add/sum into addw/subw. */
22259 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22260 newval = (newval & 0xfeffffff) | 0x02000000;
22261 /* No flat 12-bit imm encoding for addsw/subsw. */
22262 if ((newval & 0x00100000) == 0)
22263 {
22264 /* 12 bit immediate for addw/subw. */
22265 if (value < 0)
22266 {
22267 value = -value;
22268 newval ^= 0x00a00000;
22269 }
22270 if (value > 0xfff)
22271 newimm = (unsigned int) FAIL;
22272 else
22273 newimm = value;
22274 }
22275 }
22276
22277 if (newimm == (unsigned int)FAIL)
22278 {
22279 as_bad_where (fixP->fx_file, fixP->fx_line,
22280 _("invalid constant (%lx) after fixup"),
22281 (unsigned long) value);
22282 break;
22283 }
22284
22285 newval |= (newimm & 0x800) << 15;
22286 newval |= (newimm & 0x700) << 4;
22287 newval |= (newimm & 0x0ff);
22288
22289 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22290 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22291 break;
22292
22293 case BFD_RELOC_ARM_SMC:
22294 if (((unsigned long) value) > 0xffff)
22295 as_bad_where (fixP->fx_file, fixP->fx_line,
22296 _("invalid smc expression"));
22297 newval = md_chars_to_number (buf, INSN_SIZE);
22298 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22299 md_number_to_chars (buf, newval, INSN_SIZE);
22300 break;
22301
22302 case BFD_RELOC_ARM_HVC:
22303 if (((unsigned long) value) > 0xffff)
22304 as_bad_where (fixP->fx_file, fixP->fx_line,
22305 _("invalid hvc expression"));
22306 newval = md_chars_to_number (buf, INSN_SIZE);
22307 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22308 md_number_to_chars (buf, newval, INSN_SIZE);
22309 break;
22310
22311 case BFD_RELOC_ARM_SWI:
22312 if (fixP->tc_fix_data != 0)
22313 {
22314 if (((unsigned long) value) > 0xff)
22315 as_bad_where (fixP->fx_file, fixP->fx_line,
22316 _("invalid swi expression"));
22317 newval = md_chars_to_number (buf, THUMB_SIZE);
22318 newval |= value;
22319 md_number_to_chars (buf, newval, THUMB_SIZE);
22320 }
22321 else
22322 {
22323 if (((unsigned long) value) > 0x00ffffff)
22324 as_bad_where (fixP->fx_file, fixP->fx_line,
22325 _("invalid swi expression"));
22326 newval = md_chars_to_number (buf, INSN_SIZE);
22327 newval |= value;
22328 md_number_to_chars (buf, newval, INSN_SIZE);
22329 }
22330 break;
22331
22332 case BFD_RELOC_ARM_MULTI:
22333 if (((unsigned long) value) > 0xffff)
22334 as_bad_where (fixP->fx_file, fixP->fx_line,
22335 _("invalid expression in load/store multiple"));
22336 newval = value | md_chars_to_number (buf, INSN_SIZE);
22337 md_number_to_chars (buf, newval, INSN_SIZE);
22338 break;
22339
22340 #ifdef OBJ_ELF
22341 case BFD_RELOC_ARM_PCREL_CALL:
22342
22343 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22344 && fixP->fx_addsy
22345 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22346 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22347 && THUMB_IS_FUNC (fixP->fx_addsy))
22348 /* Flip the bl to blx. This is a simple flip
22349 bit here because we generate PCREL_CALL for
22350 unconditional bls. */
22351 {
22352 newval = md_chars_to_number (buf, INSN_SIZE);
22353 newval = newval | 0x10000000;
22354 md_number_to_chars (buf, newval, INSN_SIZE);
22355 temp = 1;
22356 fixP->fx_done = 1;
22357 }
22358 else
22359 temp = 3;
22360 goto arm_branch_common;
22361
22362 case BFD_RELOC_ARM_PCREL_JUMP:
22363 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22364 && fixP->fx_addsy
22365 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22366 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22367 && THUMB_IS_FUNC (fixP->fx_addsy))
22368 {
22369 /* This would map to a bl<cond>, b<cond>,
22370 b<always> to a Thumb function. We
22371 need to force a relocation for this particular
22372 case. */
22373 newval = md_chars_to_number (buf, INSN_SIZE);
22374 fixP->fx_done = 0;
22375 }
22376
22377 case BFD_RELOC_ARM_PLT32:
22378 #endif
22379 case BFD_RELOC_ARM_PCREL_BRANCH:
22380 temp = 3;
22381 goto arm_branch_common;
22382
22383 case BFD_RELOC_ARM_PCREL_BLX:
22384
22385 temp = 1;
22386 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22387 && fixP->fx_addsy
22388 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22389 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22390 && ARM_IS_FUNC (fixP->fx_addsy))
22391 {
22392 /* Flip the blx to a bl and warn. */
22393 const char *name = S_GET_NAME (fixP->fx_addsy);
22394 newval = 0xeb000000;
22395 as_warn_where (fixP->fx_file, fixP->fx_line,
22396 _("blx to '%s' an ARM ISA state function changed to bl"),
22397 name);
22398 md_number_to_chars (buf, newval, INSN_SIZE);
22399 temp = 3;
22400 fixP->fx_done = 1;
22401 }
22402
22403 #ifdef OBJ_ELF
22404 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22405 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22406 #endif
22407
22408 arm_branch_common:
22409 /* We are going to store value (shifted right by two) in the
22410 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22411 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22412 also be be clear. */
22413 if (value & temp)
22414 as_bad_where (fixP->fx_file, fixP->fx_line,
22415 _("misaligned branch destination"));
22416 if ((value & (offsetT)0xfe000000) != (offsetT)0
22417 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22418 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22419
22420 if (fixP->fx_done || !seg->use_rela_p)
22421 {
22422 newval = md_chars_to_number (buf, INSN_SIZE);
22423 newval |= (value >> 2) & 0x00ffffff;
22424 /* Set the H bit on BLX instructions. */
22425 if (temp == 1)
22426 {
22427 if (value & 2)
22428 newval |= 0x01000000;
22429 else
22430 newval &= ~0x01000000;
22431 }
22432 md_number_to_chars (buf, newval, INSN_SIZE);
22433 }
22434 break;
22435
22436 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22437 /* CBZ can only branch forward. */
22438
22439 /* Attempts to use CBZ to branch to the next instruction
22440 (which, strictly speaking, are prohibited) will be turned into
22441 no-ops.
22442
22443 FIXME: It may be better to remove the instruction completely and
22444 perform relaxation. */
22445 if (value == -2)
22446 {
22447 newval = md_chars_to_number (buf, THUMB_SIZE);
22448 newval = 0xbf00; /* NOP encoding T1 */
22449 md_number_to_chars (buf, newval, THUMB_SIZE);
22450 }
22451 else
22452 {
22453 if (value & ~0x7e)
22454 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22455
22456 if (fixP->fx_done || !seg->use_rela_p)
22457 {
22458 newval = md_chars_to_number (buf, THUMB_SIZE);
22459 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22460 md_number_to_chars (buf, newval, THUMB_SIZE);
22461 }
22462 }
22463 break;
22464
22465 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22466 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22467 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22468
22469 if (fixP->fx_done || !seg->use_rela_p)
22470 {
22471 newval = md_chars_to_number (buf, THUMB_SIZE);
22472 newval |= (value & 0x1ff) >> 1;
22473 md_number_to_chars (buf, newval, THUMB_SIZE);
22474 }
22475 break;
22476
22477 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22478 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22479 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22480
22481 if (fixP->fx_done || !seg->use_rela_p)
22482 {
22483 newval = md_chars_to_number (buf, THUMB_SIZE);
22484 newval |= (value & 0xfff) >> 1;
22485 md_number_to_chars (buf, newval, THUMB_SIZE);
22486 }
22487 break;
22488
22489 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22490 if (fixP->fx_addsy
22491 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22492 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22493 && ARM_IS_FUNC (fixP->fx_addsy)
22494 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22495 {
22496 /* Force a relocation for a branch 20 bits wide. */
22497 fixP->fx_done = 0;
22498 }
22499 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22500 as_bad_where (fixP->fx_file, fixP->fx_line,
22501 _("conditional branch out of range"));
22502
22503 if (fixP->fx_done || !seg->use_rela_p)
22504 {
22505 offsetT newval2;
22506 addressT S, J1, J2, lo, hi;
22507
22508 S = (value & 0x00100000) >> 20;
22509 J2 = (value & 0x00080000) >> 19;
22510 J1 = (value & 0x00040000) >> 18;
22511 hi = (value & 0x0003f000) >> 12;
22512 lo = (value & 0x00000ffe) >> 1;
22513
22514 newval = md_chars_to_number (buf, THUMB_SIZE);
22515 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22516 newval |= (S << 10) | hi;
22517 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22518 md_number_to_chars (buf, newval, THUMB_SIZE);
22519 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22520 }
22521 break;
22522
22523 case BFD_RELOC_THUMB_PCREL_BLX:
22524 /* If there is a blx from a thumb state function to
22525 another thumb function flip this to a bl and warn
22526 about it. */
22527
22528 if (fixP->fx_addsy
22529 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22530 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22531 && THUMB_IS_FUNC (fixP->fx_addsy))
22532 {
22533 const char *name = S_GET_NAME (fixP->fx_addsy);
22534 as_warn_where (fixP->fx_file, fixP->fx_line,
22535 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22536 name);
22537 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22538 newval = newval | 0x1000;
22539 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22540 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22541 fixP->fx_done = 1;
22542 }
22543
22544
22545 goto thumb_bl_common;
22546
22547 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22548 /* A bl from Thumb state ISA to an internal ARM state function
22549 is converted to a blx. */
22550 if (fixP->fx_addsy
22551 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22552 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22553 && ARM_IS_FUNC (fixP->fx_addsy)
22554 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22555 {
22556 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22557 newval = newval & ~0x1000;
22558 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22559 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22560 fixP->fx_done = 1;
22561 }
22562
22563 thumb_bl_common:
22564
22565 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22566 /* For a BLX instruction, make sure that the relocation is rounded up
22567 to a word boundary. This follows the semantics of the instruction
22568 which specifies that bit 1 of the target address will come from bit
22569 1 of the base address. */
22570 value = (value + 3) & ~ 3;
22571
22572 #ifdef OBJ_ELF
22573 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22574 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22575 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22576 #endif
22577
22578 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22579 {
22580 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22581 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22582 else if ((value & ~0x1ffffff)
22583 && ((value & ~0x1ffffff) != ~0x1ffffff))
22584 as_bad_where (fixP->fx_file, fixP->fx_line,
22585 _("Thumb2 branch out of range"));
22586 }
22587
22588 if (fixP->fx_done || !seg->use_rela_p)
22589 encode_thumb2_b_bl_offset (buf, value);
22590
22591 break;
22592
22593 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22594 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22595 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22596
22597 if (fixP->fx_done || !seg->use_rela_p)
22598 encode_thumb2_b_bl_offset (buf, value);
22599
22600 break;
22601
22602 case BFD_RELOC_8:
22603 if (fixP->fx_done || !seg->use_rela_p)
22604 *buf = value;
22605 break;
22606
22607 case BFD_RELOC_16:
22608 if (fixP->fx_done || !seg->use_rela_p)
22609 md_number_to_chars (buf, value, 2);
22610 break;
22611
22612 #ifdef OBJ_ELF
22613 case BFD_RELOC_ARM_TLS_CALL:
22614 case BFD_RELOC_ARM_THM_TLS_CALL:
22615 case BFD_RELOC_ARM_TLS_DESCSEQ:
22616 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22617 case BFD_RELOC_ARM_TLS_GOTDESC:
22618 case BFD_RELOC_ARM_TLS_GD32:
22619 case BFD_RELOC_ARM_TLS_LE32:
22620 case BFD_RELOC_ARM_TLS_IE32:
22621 case BFD_RELOC_ARM_TLS_LDM32:
22622 case BFD_RELOC_ARM_TLS_LDO32:
22623 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22624 break;
22625
22626 case BFD_RELOC_ARM_GOT32:
22627 case BFD_RELOC_ARM_GOTOFF:
22628 break;
22629
22630 case BFD_RELOC_ARM_GOT_PREL:
22631 if (fixP->fx_done || !seg->use_rela_p)
22632 md_number_to_chars (buf, value, 4);
22633 break;
22634
22635 case BFD_RELOC_ARM_TARGET2:
22636 /* TARGET2 is not partial-inplace, so we need to write the
22637 addend here for REL targets, because it won't be written out
22638 during reloc processing later. */
22639 if (fixP->fx_done || !seg->use_rela_p)
22640 md_number_to_chars (buf, fixP->fx_offset, 4);
22641 break;
22642 #endif
22643
22644 case BFD_RELOC_RVA:
22645 case BFD_RELOC_32:
22646 case BFD_RELOC_ARM_TARGET1:
22647 case BFD_RELOC_ARM_ROSEGREL32:
22648 case BFD_RELOC_ARM_SBREL32:
22649 case BFD_RELOC_32_PCREL:
22650 #ifdef TE_PE
22651 case BFD_RELOC_32_SECREL:
22652 #endif
22653 if (fixP->fx_done || !seg->use_rela_p)
22654 #ifdef TE_WINCE
22655 /* For WinCE we only do this for pcrel fixups. */
22656 if (fixP->fx_done || fixP->fx_pcrel)
22657 #endif
22658 md_number_to_chars (buf, value, 4);
22659 break;
22660
22661 #ifdef OBJ_ELF
22662 case BFD_RELOC_ARM_PREL31:
22663 if (fixP->fx_done || !seg->use_rela_p)
22664 {
22665 newval = md_chars_to_number (buf, 4) & 0x80000000;
22666 if ((value ^ (value >> 1)) & 0x40000000)
22667 {
22668 as_bad_where (fixP->fx_file, fixP->fx_line,
22669 _("rel31 relocation overflow"));
22670 }
22671 newval |= value & 0x7fffffff;
22672 md_number_to_chars (buf, newval, 4);
22673 }
22674 break;
22675 #endif
22676
22677 case BFD_RELOC_ARM_CP_OFF_IMM:
22678 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22679 if (value < -1023 || value > 1023 || (value & 3))
22680 as_bad_where (fixP->fx_file, fixP->fx_line,
22681 _("co-processor offset out of range"));
22682 cp_off_common:
22683 sign = value > 0;
22684 if (value < 0)
22685 value = -value;
22686 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22687 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22688 newval = md_chars_to_number (buf, INSN_SIZE);
22689 else
22690 newval = get_thumb32_insn (buf);
22691 if (value == 0)
22692 newval &= 0xffffff00;
22693 else
22694 {
22695 newval &= 0xff7fff00;
22696 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22697 }
22698 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22699 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22700 md_number_to_chars (buf, newval, INSN_SIZE);
22701 else
22702 put_thumb32_insn (buf, newval);
22703 break;
22704
22705 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22706 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22707 if (value < -255 || value > 255)
22708 as_bad_where (fixP->fx_file, fixP->fx_line,
22709 _("co-processor offset out of range"));
22710 value *= 4;
22711 goto cp_off_common;
22712
22713 case BFD_RELOC_ARM_THUMB_OFFSET:
22714 newval = md_chars_to_number (buf, THUMB_SIZE);
22715 /* Exactly what ranges, and where the offset is inserted depends
22716 on the type of instruction, we can establish this from the
22717 top 4 bits. */
22718 switch (newval >> 12)
22719 {
22720 case 4: /* PC load. */
22721 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22722 forced to zero for these loads; md_pcrel_from has already
22723 compensated for this. */
22724 if (value & 3)
22725 as_bad_where (fixP->fx_file, fixP->fx_line,
22726 _("invalid offset, target not word aligned (0x%08lX)"),
22727 (((unsigned long) fixP->fx_frag->fr_address
22728 + (unsigned long) fixP->fx_where) & ~3)
22729 + (unsigned long) value);
22730
22731 if (value & ~0x3fc)
22732 as_bad_where (fixP->fx_file, fixP->fx_line,
22733 _("invalid offset, value too big (0x%08lX)"),
22734 (long) value);
22735
22736 newval |= value >> 2;
22737 break;
22738
22739 case 9: /* SP load/store. */
22740 if (value & ~0x3fc)
22741 as_bad_where (fixP->fx_file, fixP->fx_line,
22742 _("invalid offset, value too big (0x%08lX)"),
22743 (long) value);
22744 newval |= value >> 2;
22745 break;
22746
22747 case 6: /* Word load/store. */
22748 if (value & ~0x7c)
22749 as_bad_where (fixP->fx_file, fixP->fx_line,
22750 _("invalid offset, value too big (0x%08lX)"),
22751 (long) value);
22752 newval |= value << 4; /* 6 - 2. */
22753 break;
22754
22755 case 7: /* Byte load/store. */
22756 if (value & ~0x1f)
22757 as_bad_where (fixP->fx_file, fixP->fx_line,
22758 _("invalid offset, value too big (0x%08lX)"),
22759 (long) value);
22760 newval |= value << 6;
22761 break;
22762
22763 case 8: /* Halfword load/store. */
22764 if (value & ~0x3e)
22765 as_bad_where (fixP->fx_file, fixP->fx_line,
22766 _("invalid offset, value too big (0x%08lX)"),
22767 (long) value);
22768 newval |= value << 5; /* 6 - 1. */
22769 break;
22770
22771 default:
22772 as_bad_where (fixP->fx_file, fixP->fx_line,
22773 "Unable to process relocation for thumb opcode: %lx",
22774 (unsigned long) newval);
22775 break;
22776 }
22777 md_number_to_chars (buf, newval, THUMB_SIZE);
22778 break;
22779
22780 case BFD_RELOC_ARM_THUMB_ADD:
22781 /* This is a complicated relocation, since we use it for all of
22782 the following immediate relocations:
22783
22784 3bit ADD/SUB
22785 8bit ADD/SUB
22786 9bit ADD/SUB SP word-aligned
22787 10bit ADD PC/SP word-aligned
22788
22789 The type of instruction being processed is encoded in the
22790 instruction field:
22791
22792 0x8000 SUB
22793 0x00F0 Rd
22794 0x000F Rs
22795 */
22796 newval = md_chars_to_number (buf, THUMB_SIZE);
22797 {
22798 int rd = (newval >> 4) & 0xf;
22799 int rs = newval & 0xf;
22800 int subtract = !!(newval & 0x8000);
22801
22802 /* Check for HI regs, only very restricted cases allowed:
22803 Adjusting SP, and using PC or SP to get an address. */
22804 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22805 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22806 as_bad_where (fixP->fx_file, fixP->fx_line,
22807 _("invalid Hi register with immediate"));
22808
22809 /* If value is negative, choose the opposite instruction. */
22810 if (value < 0)
22811 {
22812 value = -value;
22813 subtract = !subtract;
22814 if (value < 0)
22815 as_bad_where (fixP->fx_file, fixP->fx_line,
22816 _("immediate value out of range"));
22817 }
22818
22819 if (rd == REG_SP)
22820 {
22821 if (value & ~0x1fc)
22822 as_bad_where (fixP->fx_file, fixP->fx_line,
22823 _("invalid immediate for stack address calculation"));
22824 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22825 newval |= value >> 2;
22826 }
22827 else if (rs == REG_PC || rs == REG_SP)
22828 {
22829 if (subtract || value & ~0x3fc)
22830 as_bad_where (fixP->fx_file, fixP->fx_line,
22831 _("invalid immediate for address calculation (value = 0x%08lX)"),
22832 (unsigned long) value);
22833 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22834 newval |= rd << 8;
22835 newval |= value >> 2;
22836 }
22837 else if (rs == rd)
22838 {
22839 if (value & ~0xff)
22840 as_bad_where (fixP->fx_file, fixP->fx_line,
22841 _("immediate value out of range"));
22842 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22843 newval |= (rd << 8) | value;
22844 }
22845 else
22846 {
22847 if (value & ~0x7)
22848 as_bad_where (fixP->fx_file, fixP->fx_line,
22849 _("immediate value out of range"));
22850 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22851 newval |= rd | (rs << 3) | (value << 6);
22852 }
22853 }
22854 md_number_to_chars (buf, newval, THUMB_SIZE);
22855 break;
22856
22857 case BFD_RELOC_ARM_THUMB_IMM:
22858 newval = md_chars_to_number (buf, THUMB_SIZE);
22859 if (value < 0 || value > 255)
22860 as_bad_where (fixP->fx_file, fixP->fx_line,
22861 _("invalid immediate: %ld is out of range"),
22862 (long) value);
22863 newval |= value;
22864 md_number_to_chars (buf, newval, THUMB_SIZE);
22865 break;
22866
22867 case BFD_RELOC_ARM_THUMB_SHIFT:
22868 /* 5bit shift value (0..32). LSL cannot take 32. */
22869 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22870 temp = newval & 0xf800;
22871 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22872 as_bad_where (fixP->fx_file, fixP->fx_line,
22873 _("invalid shift value: %ld"), (long) value);
22874 /* Shifts of zero must be encoded as LSL. */
22875 if (value == 0)
22876 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22877 /* Shifts of 32 are encoded as zero. */
22878 else if (value == 32)
22879 value = 0;
22880 newval |= value << 6;
22881 md_number_to_chars (buf, newval, THUMB_SIZE);
22882 break;
22883
22884 case BFD_RELOC_VTABLE_INHERIT:
22885 case BFD_RELOC_VTABLE_ENTRY:
22886 fixP->fx_done = 0;
22887 return;
22888
22889 case BFD_RELOC_ARM_MOVW:
22890 case BFD_RELOC_ARM_MOVT:
22891 case BFD_RELOC_ARM_THUMB_MOVW:
22892 case BFD_RELOC_ARM_THUMB_MOVT:
22893 if (fixP->fx_done || !seg->use_rela_p)
22894 {
22895 /* REL format relocations are limited to a 16-bit addend. */
22896 if (!fixP->fx_done)
22897 {
22898 if (value < -0x8000 || value > 0x7fff)
22899 as_bad_where (fixP->fx_file, fixP->fx_line,
22900 _("offset out of range"));
22901 }
22902 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22903 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22904 {
22905 value >>= 16;
22906 }
22907
22908 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22909 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22910 {
22911 newval = get_thumb32_insn (buf);
22912 newval &= 0xfbf08f00;
22913 newval |= (value & 0xf000) << 4;
22914 newval |= (value & 0x0800) << 15;
22915 newval |= (value & 0x0700) << 4;
22916 newval |= (value & 0x00ff);
22917 put_thumb32_insn (buf, newval);
22918 }
22919 else
22920 {
22921 newval = md_chars_to_number (buf, 4);
22922 newval &= 0xfff0f000;
22923 newval |= value & 0x0fff;
22924 newval |= (value & 0xf000) << 4;
22925 md_number_to_chars (buf, newval, 4);
22926 }
22927 }
22928 return;
22929
22930 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22931 case BFD_RELOC_ARM_ALU_PC_G0:
22932 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22933 case BFD_RELOC_ARM_ALU_PC_G1:
22934 case BFD_RELOC_ARM_ALU_PC_G2:
22935 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22936 case BFD_RELOC_ARM_ALU_SB_G0:
22937 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22938 case BFD_RELOC_ARM_ALU_SB_G1:
22939 case BFD_RELOC_ARM_ALU_SB_G2:
22940 gas_assert (!fixP->fx_done);
22941 if (!seg->use_rela_p)
22942 {
22943 bfd_vma insn;
22944 bfd_vma encoded_addend;
22945 bfd_vma addend_abs = abs (value);
22946
22947 /* Check that the absolute value of the addend can be
22948 expressed as an 8-bit constant plus a rotation. */
22949 encoded_addend = encode_arm_immediate (addend_abs);
22950 if (encoded_addend == (unsigned int) FAIL)
22951 as_bad_where (fixP->fx_file, fixP->fx_line,
22952 _("the offset 0x%08lX is not representable"),
22953 (unsigned long) addend_abs);
22954
22955 /* Extract the instruction. */
22956 insn = md_chars_to_number (buf, INSN_SIZE);
22957
22958 /* If the addend is positive, use an ADD instruction.
22959 Otherwise use a SUB. Take care not to destroy the S bit. */
22960 insn &= 0xff1fffff;
22961 if (value < 0)
22962 insn |= 1 << 22;
22963 else
22964 insn |= 1 << 23;
22965
22966 /* Place the encoded addend into the first 12 bits of the
22967 instruction. */
22968 insn &= 0xfffff000;
22969 insn |= encoded_addend;
22970
22971 /* Update the instruction. */
22972 md_number_to_chars (buf, insn, INSN_SIZE);
22973 }
22974 break;
22975
22976 case BFD_RELOC_ARM_LDR_PC_G0:
22977 case BFD_RELOC_ARM_LDR_PC_G1:
22978 case BFD_RELOC_ARM_LDR_PC_G2:
22979 case BFD_RELOC_ARM_LDR_SB_G0:
22980 case BFD_RELOC_ARM_LDR_SB_G1:
22981 case BFD_RELOC_ARM_LDR_SB_G2:
22982 gas_assert (!fixP->fx_done);
22983 if (!seg->use_rela_p)
22984 {
22985 bfd_vma insn;
22986 bfd_vma addend_abs = abs (value);
22987
22988 /* Check that the absolute value of the addend can be
22989 encoded in 12 bits. */
22990 if (addend_abs >= 0x1000)
22991 as_bad_where (fixP->fx_file, fixP->fx_line,
22992 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22993 (unsigned long) addend_abs);
22994
22995 /* Extract the instruction. */
22996 insn = md_chars_to_number (buf, INSN_SIZE);
22997
22998 /* If the addend is negative, clear bit 23 of the instruction.
22999 Otherwise set it. */
23000 if (value < 0)
23001 insn &= ~(1 << 23);
23002 else
23003 insn |= 1 << 23;
23004
23005 /* Place the absolute value of the addend into the first 12 bits
23006 of the instruction. */
23007 insn &= 0xfffff000;
23008 insn |= addend_abs;
23009
23010 /* Update the instruction. */
23011 md_number_to_chars (buf, insn, INSN_SIZE);
23012 }
23013 break;
23014
23015 case BFD_RELOC_ARM_LDRS_PC_G0:
23016 case BFD_RELOC_ARM_LDRS_PC_G1:
23017 case BFD_RELOC_ARM_LDRS_PC_G2:
23018 case BFD_RELOC_ARM_LDRS_SB_G0:
23019 case BFD_RELOC_ARM_LDRS_SB_G1:
23020 case BFD_RELOC_ARM_LDRS_SB_G2:
23021 gas_assert (!fixP->fx_done);
23022 if (!seg->use_rela_p)
23023 {
23024 bfd_vma insn;
23025 bfd_vma addend_abs = abs (value);
23026
23027 /* Check that the absolute value of the addend can be
23028 encoded in 8 bits. */
23029 if (addend_abs >= 0x100)
23030 as_bad_where (fixP->fx_file, fixP->fx_line,
23031 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23032 (unsigned long) addend_abs);
23033
23034 /* Extract the instruction. */
23035 insn = md_chars_to_number (buf, INSN_SIZE);
23036
23037 /* If the addend is negative, clear bit 23 of the instruction.
23038 Otherwise set it. */
23039 if (value < 0)
23040 insn &= ~(1 << 23);
23041 else
23042 insn |= 1 << 23;
23043
23044 /* Place the first four bits of the absolute value of the addend
23045 into the first 4 bits of the instruction, and the remaining
23046 four into bits 8 .. 11. */
23047 insn &= 0xfffff0f0;
23048 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23049
23050 /* Update the instruction. */
23051 md_number_to_chars (buf, insn, INSN_SIZE);
23052 }
23053 break;
23054
23055 case BFD_RELOC_ARM_LDC_PC_G0:
23056 case BFD_RELOC_ARM_LDC_PC_G1:
23057 case BFD_RELOC_ARM_LDC_PC_G2:
23058 case BFD_RELOC_ARM_LDC_SB_G0:
23059 case BFD_RELOC_ARM_LDC_SB_G1:
23060 case BFD_RELOC_ARM_LDC_SB_G2:
23061 gas_assert (!fixP->fx_done);
23062 if (!seg->use_rela_p)
23063 {
23064 bfd_vma insn;
23065 bfd_vma addend_abs = abs (value);
23066
23067 /* Check that the absolute value of the addend is a multiple of
23068 four and, when divided by four, fits in 8 bits. */
23069 if (addend_abs & 0x3)
23070 as_bad_where (fixP->fx_file, fixP->fx_line,
23071 _("bad offset 0x%08lX (must be word-aligned)"),
23072 (unsigned long) addend_abs);
23073
23074 if ((addend_abs >> 2) > 0xff)
23075 as_bad_where (fixP->fx_file, fixP->fx_line,
23076 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23077 (unsigned long) addend_abs);
23078
23079 /* Extract the instruction. */
23080 insn = md_chars_to_number (buf, INSN_SIZE);
23081
23082 /* If the addend is negative, clear bit 23 of the instruction.
23083 Otherwise set it. */
23084 if (value < 0)
23085 insn &= ~(1 << 23);
23086 else
23087 insn |= 1 << 23;
23088
23089 /* Place the addend (divided by four) into the first eight
23090 bits of the instruction. */
23091 insn &= 0xfffffff0;
23092 insn |= addend_abs >> 2;
23093
23094 /* Update the instruction. */
23095 md_number_to_chars (buf, insn, INSN_SIZE);
23096 }
23097 break;
23098
23099 case BFD_RELOC_ARM_V4BX:
23100 /* This will need to go in the object file. */
23101 fixP->fx_done = 0;
23102 break;
23103
23104 case BFD_RELOC_UNUSED:
23105 default:
23106 as_bad_where (fixP->fx_file, fixP->fx_line,
23107 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23108 }
23109 }
23110
23111 /* Translate internal representation of relocation info to BFD target
23112 format. */
23113
23114 arelent *
23115 tc_gen_reloc (asection *section, fixS *fixp)
23116 {
23117 arelent * reloc;
23118 bfd_reloc_code_real_type code;
23119
23120 reloc = (arelent *) xmalloc (sizeof (arelent));
23121
23122 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23123 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23124 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23125
23126 if (fixp->fx_pcrel)
23127 {
23128 if (section->use_rela_p)
23129 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23130 else
23131 fixp->fx_offset = reloc->address;
23132 }
23133 reloc->addend = fixp->fx_offset;
23134
23135 switch (fixp->fx_r_type)
23136 {
23137 case BFD_RELOC_8:
23138 if (fixp->fx_pcrel)
23139 {
23140 code = BFD_RELOC_8_PCREL;
23141 break;
23142 }
23143
23144 case BFD_RELOC_16:
23145 if (fixp->fx_pcrel)
23146 {
23147 code = BFD_RELOC_16_PCREL;
23148 break;
23149 }
23150
23151 case BFD_RELOC_32:
23152 if (fixp->fx_pcrel)
23153 {
23154 code = BFD_RELOC_32_PCREL;
23155 break;
23156 }
23157
23158 case BFD_RELOC_ARM_MOVW:
23159 if (fixp->fx_pcrel)
23160 {
23161 code = BFD_RELOC_ARM_MOVW_PCREL;
23162 break;
23163 }
23164
23165 case BFD_RELOC_ARM_MOVT:
23166 if (fixp->fx_pcrel)
23167 {
23168 code = BFD_RELOC_ARM_MOVT_PCREL;
23169 break;
23170 }
23171
23172 case BFD_RELOC_ARM_THUMB_MOVW:
23173 if (fixp->fx_pcrel)
23174 {
23175 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23176 break;
23177 }
23178
23179 case BFD_RELOC_ARM_THUMB_MOVT:
23180 if (fixp->fx_pcrel)
23181 {
23182 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23183 break;
23184 }
23185
23186 case BFD_RELOC_NONE:
23187 case BFD_RELOC_ARM_PCREL_BRANCH:
23188 case BFD_RELOC_ARM_PCREL_BLX:
23189 case BFD_RELOC_RVA:
23190 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23191 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23192 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23193 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23194 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23195 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23196 case BFD_RELOC_VTABLE_ENTRY:
23197 case BFD_RELOC_VTABLE_INHERIT:
23198 #ifdef TE_PE
23199 case BFD_RELOC_32_SECREL:
23200 #endif
23201 code = fixp->fx_r_type;
23202 break;
23203
23204 case BFD_RELOC_THUMB_PCREL_BLX:
23205 #ifdef OBJ_ELF
23206 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23207 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23208 else
23209 #endif
23210 code = BFD_RELOC_THUMB_PCREL_BLX;
23211 break;
23212
23213 case BFD_RELOC_ARM_LITERAL:
23214 case BFD_RELOC_ARM_HWLITERAL:
23215 /* If this is called then the a literal has
23216 been referenced across a section boundary. */
23217 as_bad_where (fixp->fx_file, fixp->fx_line,
23218 _("literal referenced across section boundary"));
23219 return NULL;
23220
23221 #ifdef OBJ_ELF
23222 case BFD_RELOC_ARM_TLS_CALL:
23223 case BFD_RELOC_ARM_THM_TLS_CALL:
23224 case BFD_RELOC_ARM_TLS_DESCSEQ:
23225 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23226 case BFD_RELOC_ARM_GOT32:
23227 case BFD_RELOC_ARM_GOTOFF:
23228 case BFD_RELOC_ARM_GOT_PREL:
23229 case BFD_RELOC_ARM_PLT32:
23230 case BFD_RELOC_ARM_TARGET1:
23231 case BFD_RELOC_ARM_ROSEGREL32:
23232 case BFD_RELOC_ARM_SBREL32:
23233 case BFD_RELOC_ARM_PREL31:
23234 case BFD_RELOC_ARM_TARGET2:
23235 case BFD_RELOC_ARM_TLS_LE32:
23236 case BFD_RELOC_ARM_TLS_LDO32:
23237 case BFD_RELOC_ARM_PCREL_CALL:
23238 case BFD_RELOC_ARM_PCREL_JUMP:
23239 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23240 case BFD_RELOC_ARM_ALU_PC_G0:
23241 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23242 case BFD_RELOC_ARM_ALU_PC_G1:
23243 case BFD_RELOC_ARM_ALU_PC_G2:
23244 case BFD_RELOC_ARM_LDR_PC_G0:
23245 case BFD_RELOC_ARM_LDR_PC_G1:
23246 case BFD_RELOC_ARM_LDR_PC_G2:
23247 case BFD_RELOC_ARM_LDRS_PC_G0:
23248 case BFD_RELOC_ARM_LDRS_PC_G1:
23249 case BFD_RELOC_ARM_LDRS_PC_G2:
23250 case BFD_RELOC_ARM_LDC_PC_G0:
23251 case BFD_RELOC_ARM_LDC_PC_G1:
23252 case BFD_RELOC_ARM_LDC_PC_G2:
23253 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23254 case BFD_RELOC_ARM_ALU_SB_G0:
23255 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23256 case BFD_RELOC_ARM_ALU_SB_G1:
23257 case BFD_RELOC_ARM_ALU_SB_G2:
23258 case BFD_RELOC_ARM_LDR_SB_G0:
23259 case BFD_RELOC_ARM_LDR_SB_G1:
23260 case BFD_RELOC_ARM_LDR_SB_G2:
23261 case BFD_RELOC_ARM_LDRS_SB_G0:
23262 case BFD_RELOC_ARM_LDRS_SB_G1:
23263 case BFD_RELOC_ARM_LDRS_SB_G2:
23264 case BFD_RELOC_ARM_LDC_SB_G0:
23265 case BFD_RELOC_ARM_LDC_SB_G1:
23266 case BFD_RELOC_ARM_LDC_SB_G2:
23267 case BFD_RELOC_ARM_V4BX:
23268 code = fixp->fx_r_type;
23269 break;
23270
23271 case BFD_RELOC_ARM_TLS_GOTDESC:
23272 case BFD_RELOC_ARM_TLS_GD32:
23273 case BFD_RELOC_ARM_TLS_IE32:
23274 case BFD_RELOC_ARM_TLS_LDM32:
23275 /* BFD will include the symbol's address in the addend.
23276 But we don't want that, so subtract it out again here. */
23277 if (!S_IS_COMMON (fixp->fx_addsy))
23278 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23279 code = fixp->fx_r_type;
23280 break;
23281 #endif
23282
23283 case BFD_RELOC_ARM_IMMEDIATE:
23284 as_bad_where (fixp->fx_file, fixp->fx_line,
23285 _("internal relocation (type: IMMEDIATE) not fixed up"));
23286 return NULL;
23287
23288 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23289 as_bad_where (fixp->fx_file, fixp->fx_line,
23290 _("ADRL used for a symbol not defined in the same file"));
23291 return NULL;
23292
23293 case BFD_RELOC_ARM_OFFSET_IMM:
23294 if (section->use_rela_p)
23295 {
23296 code = fixp->fx_r_type;
23297 break;
23298 }
23299
23300 if (fixp->fx_addsy != NULL
23301 && !S_IS_DEFINED (fixp->fx_addsy)
23302 && S_IS_LOCAL (fixp->fx_addsy))
23303 {
23304 as_bad_where (fixp->fx_file, fixp->fx_line,
23305 _("undefined local label `%s'"),
23306 S_GET_NAME (fixp->fx_addsy));
23307 return NULL;
23308 }
23309
23310 as_bad_where (fixp->fx_file, fixp->fx_line,
23311 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23312 return NULL;
23313
23314 default:
23315 {
23316 char * type;
23317
23318 switch (fixp->fx_r_type)
23319 {
23320 case BFD_RELOC_NONE: type = "NONE"; break;
23321 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23322 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23323 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23324 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23325 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23326 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23327 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23328 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23329 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23330 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23331 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23332 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23333 default: type = _("<unknown>"); break;
23334 }
23335 as_bad_where (fixp->fx_file, fixp->fx_line,
23336 _("cannot represent %s relocation in this object file format"),
23337 type);
23338 return NULL;
23339 }
23340 }
23341
23342 #ifdef OBJ_ELF
23343 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23344 && GOT_symbol
23345 && fixp->fx_addsy == GOT_symbol)
23346 {
23347 code = BFD_RELOC_ARM_GOTPC;
23348 reloc->addend = fixp->fx_offset = reloc->address;
23349 }
23350 #endif
23351
23352 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23353
23354 if (reloc->howto == NULL)
23355 {
23356 as_bad_where (fixp->fx_file, fixp->fx_line,
23357 _("cannot represent %s relocation in this object file format"),
23358 bfd_get_reloc_code_name (code));
23359 return NULL;
23360 }
23361
23362 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23363 vtable entry to be used in the relocation's section offset. */
23364 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23365 reloc->address = fixp->fx_offset;
23366
23367 return reloc;
23368 }
23369
23370 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23371
23372 void
23373 cons_fix_new_arm (fragS * frag,
23374 int where,
23375 int size,
23376 expressionS * exp,
23377 bfd_reloc_code_real_type reloc)
23378 {
23379 int pcrel = 0;
23380
23381 /* Pick a reloc.
23382 FIXME: @@ Should look at CPU word size. */
23383 switch (size)
23384 {
23385 case 1:
23386 reloc = BFD_RELOC_8;
23387 break;
23388 case 2:
23389 reloc = BFD_RELOC_16;
23390 break;
23391 case 4:
23392 default:
23393 reloc = BFD_RELOC_32;
23394 break;
23395 case 8:
23396 reloc = BFD_RELOC_64;
23397 break;
23398 }
23399
23400 #ifdef TE_PE
23401 if (exp->X_op == O_secrel)
23402 {
23403 exp->X_op = O_symbol;
23404 reloc = BFD_RELOC_32_SECREL;
23405 }
23406 #endif
23407
23408 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23409 }
23410
23411 #if defined (OBJ_COFF)
23412 void
23413 arm_validate_fix (fixS * fixP)
23414 {
23415 /* If the destination of the branch is a defined symbol which does not have
23416 the THUMB_FUNC attribute, then we must be calling a function which has
23417 the (interfacearm) attribute. We look for the Thumb entry point to that
23418 function and change the branch to refer to that function instead. */
23419 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23420 && fixP->fx_addsy != NULL
23421 && S_IS_DEFINED (fixP->fx_addsy)
23422 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23423 {
23424 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23425 }
23426 }
23427 #endif
23428
23429
23430 int
23431 arm_force_relocation (struct fix * fixp)
23432 {
23433 #if defined (OBJ_COFF) && defined (TE_PE)
23434 if (fixp->fx_r_type == BFD_RELOC_RVA)
23435 return 1;
23436 #endif
23437
23438 /* In case we have a call or a branch to a function in ARM ISA mode from
23439 a thumb function or vice-versa force the relocation. These relocations
23440 are cleared off for some cores that might have blx and simple transformations
23441 are possible. */
23442
23443 #ifdef OBJ_ELF
23444 switch (fixp->fx_r_type)
23445 {
23446 case BFD_RELOC_ARM_PCREL_JUMP:
23447 case BFD_RELOC_ARM_PCREL_CALL:
23448 case BFD_RELOC_THUMB_PCREL_BLX:
23449 if (THUMB_IS_FUNC (fixp->fx_addsy))
23450 return 1;
23451 break;
23452
23453 case BFD_RELOC_ARM_PCREL_BLX:
23454 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23455 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23456 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23457 if (ARM_IS_FUNC (fixp->fx_addsy))
23458 return 1;
23459 break;
23460
23461 default:
23462 break;
23463 }
23464 #endif
23465
23466 /* Resolve these relocations even if the symbol is extern or weak.
23467 Technically this is probably wrong due to symbol preemption.
23468 In practice these relocations do not have enough range to be useful
23469 at dynamic link time, and some code (e.g. in the Linux kernel)
23470 expects these references to be resolved. */
23471 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23472 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23473 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23474 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23475 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23476 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23477 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23478 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23479 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23480 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23481 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23482 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23483 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23484 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23485 return 0;
23486
23487 /* Always leave these relocations for the linker. */
23488 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23489 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23490 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23491 return 1;
23492
23493 /* Always generate relocations against function symbols. */
23494 if (fixp->fx_r_type == BFD_RELOC_32
23495 && fixp->fx_addsy
23496 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23497 return 1;
23498
23499 return generic_force_reloc (fixp);
23500 }
23501
23502 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23503 /* Relocations against function names must be left unadjusted,
23504 so that the linker can use this information to generate interworking
23505 stubs. The MIPS version of this function
23506 also prevents relocations that are mips-16 specific, but I do not
23507 know why it does this.
23508
23509 FIXME:
23510 There is one other problem that ought to be addressed here, but
23511 which currently is not: Taking the address of a label (rather
23512 than a function) and then later jumping to that address. Such
23513 addresses also ought to have their bottom bit set (assuming that
23514 they reside in Thumb code), but at the moment they will not. */
23515
23516 bfd_boolean
23517 arm_fix_adjustable (fixS * fixP)
23518 {
23519 if (fixP->fx_addsy == NULL)
23520 return 1;
23521
23522 /* Preserve relocations against symbols with function type. */
23523 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23524 return FALSE;
23525
23526 if (THUMB_IS_FUNC (fixP->fx_addsy)
23527 && fixP->fx_subsy == NULL)
23528 return FALSE;
23529
23530 /* We need the symbol name for the VTABLE entries. */
23531 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23532 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23533 return FALSE;
23534
23535 /* Don't allow symbols to be discarded on GOT related relocs. */
23536 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23537 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23538 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23539 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23540 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23541 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23542 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23543 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23544 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23545 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23546 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23547 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23548 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23549 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23550 return FALSE;
23551
23552 /* Similarly for group relocations. */
23553 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23554 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23555 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23556 return FALSE;
23557
23558 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23559 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23560 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23561 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23562 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23563 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23564 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23565 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23566 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23567 return FALSE;
23568
23569 return TRUE;
23570 }
23571 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23572
23573 #ifdef OBJ_ELF
23574
23575 const char *
23576 elf32_arm_target_format (void)
23577 {
23578 #ifdef TE_SYMBIAN
23579 return (target_big_endian
23580 ? "elf32-bigarm-symbian"
23581 : "elf32-littlearm-symbian");
23582 #elif defined (TE_VXWORKS)
23583 return (target_big_endian
23584 ? "elf32-bigarm-vxworks"
23585 : "elf32-littlearm-vxworks");
23586 #elif defined (TE_NACL)
23587 return (target_big_endian
23588 ? "elf32-bigarm-nacl"
23589 : "elf32-littlearm-nacl");
23590 #else
23591 if (target_big_endian)
23592 return "elf32-bigarm";
23593 else
23594 return "elf32-littlearm";
23595 #endif
23596 }
23597
23598 void
23599 armelf_frob_symbol (symbolS * symp,
23600 int * puntp)
23601 {
23602 elf_frob_symbol (symp, puntp);
23603 }
23604 #endif
23605
23606 /* MD interface: Finalization. */
23607
23608 void
23609 arm_cleanup (void)
23610 {
23611 literal_pool * pool;
23612
23613 /* Ensure that all the IT blocks are properly closed. */
23614 check_it_blocks_finished ();
23615
23616 for (pool = list_of_pools; pool; pool = pool->next)
23617 {
23618 /* Put it at the end of the relevant section. */
23619 subseg_set (pool->section, pool->sub_section);
23620 #ifdef OBJ_ELF
23621 arm_elf_change_section ();
23622 #endif
23623 s_ltorg (0);
23624 }
23625 }
23626
23627 #ifdef OBJ_ELF
23628 /* Remove any excess mapping symbols generated for alignment frags in
23629 SEC. We may have created a mapping symbol before a zero byte
23630 alignment; remove it if there's a mapping symbol after the
23631 alignment. */
23632 static void
23633 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23634 void *dummy ATTRIBUTE_UNUSED)
23635 {
23636 segment_info_type *seginfo = seg_info (sec);
23637 fragS *fragp;
23638
23639 if (seginfo == NULL || seginfo->frchainP == NULL)
23640 return;
23641
23642 for (fragp = seginfo->frchainP->frch_root;
23643 fragp != NULL;
23644 fragp = fragp->fr_next)
23645 {
23646 symbolS *sym = fragp->tc_frag_data.last_map;
23647 fragS *next = fragp->fr_next;
23648
23649 /* Variable-sized frags have been converted to fixed size by
23650 this point. But if this was variable-sized to start with,
23651 there will be a fixed-size frag after it. So don't handle
23652 next == NULL. */
23653 if (sym == NULL || next == NULL)
23654 continue;
23655
23656 if (S_GET_VALUE (sym) < next->fr_address)
23657 /* Not at the end of this frag. */
23658 continue;
23659 know (S_GET_VALUE (sym) == next->fr_address);
23660
23661 do
23662 {
23663 if (next->tc_frag_data.first_map != NULL)
23664 {
23665 /* Next frag starts with a mapping symbol. Discard this
23666 one. */
23667 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23668 break;
23669 }
23670
23671 if (next->fr_next == NULL)
23672 {
23673 /* This mapping symbol is at the end of the section. Discard
23674 it. */
23675 know (next->fr_fix == 0 && next->fr_var == 0);
23676 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23677 break;
23678 }
23679
23680 /* As long as we have empty frags without any mapping symbols,
23681 keep looking. */
23682 /* If the next frag is non-empty and does not start with a
23683 mapping symbol, then this mapping symbol is required. */
23684 if (next->fr_address != next->fr_next->fr_address)
23685 break;
23686
23687 next = next->fr_next;
23688 }
23689 while (next != NULL);
23690 }
23691 }
23692 #endif
23693
23694 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23695 ARM ones. */
23696
23697 void
23698 arm_adjust_symtab (void)
23699 {
23700 #ifdef OBJ_COFF
23701 symbolS * sym;
23702
23703 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23704 {
23705 if (ARM_IS_THUMB (sym))
23706 {
23707 if (THUMB_IS_FUNC (sym))
23708 {
23709 /* Mark the symbol as a Thumb function. */
23710 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23711 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23712 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23713
23714 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23715 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23716 else
23717 as_bad (_("%s: unexpected function type: %d"),
23718 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23719 }
23720 else switch (S_GET_STORAGE_CLASS (sym))
23721 {
23722 case C_EXT:
23723 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23724 break;
23725 case C_STAT:
23726 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23727 break;
23728 case C_LABEL:
23729 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23730 break;
23731 default:
23732 /* Do nothing. */
23733 break;
23734 }
23735 }
23736
23737 if (ARM_IS_INTERWORK (sym))
23738 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23739 }
23740 #endif
23741 #ifdef OBJ_ELF
23742 symbolS * sym;
23743 char bind;
23744
23745 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23746 {
23747 if (ARM_IS_THUMB (sym))
23748 {
23749 elf_symbol_type * elf_sym;
23750
23751 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23752 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23753
23754 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23755 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23756 {
23757 /* If it's a .thumb_func, declare it as so,
23758 otherwise tag label as .code 16. */
23759 if (THUMB_IS_FUNC (sym))
23760 elf_sym->internal_elf_sym.st_target_internal
23761 = ST_BRANCH_TO_THUMB;
23762 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23763 elf_sym->internal_elf_sym.st_info =
23764 ELF_ST_INFO (bind, STT_ARM_16BIT);
23765 }
23766 }
23767 }
23768
23769 /* Remove any overlapping mapping symbols generated by alignment frags. */
23770 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23771 /* Now do generic ELF adjustments. */
23772 elf_adjust_symtab ();
23773 #endif
23774 }
23775
23776 /* MD interface: Initialization. */
23777
23778 static void
23779 set_constant_flonums (void)
23780 {
23781 int i;
23782
23783 for (i = 0; i < NUM_FLOAT_VALS; i++)
23784 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23785 abort ();
23786 }
23787
23788 /* Auto-select Thumb mode if it's the only available instruction set for the
23789 given architecture. */
23790
23791 static void
23792 autoselect_thumb_from_cpu_variant (void)
23793 {
23794 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23795 opcode_select (16);
23796 }
23797
23798 void
23799 md_begin (void)
23800 {
23801 unsigned mach;
23802 unsigned int i;
23803
23804 if ( (arm_ops_hsh = hash_new ()) == NULL
23805 || (arm_cond_hsh = hash_new ()) == NULL
23806 || (arm_shift_hsh = hash_new ()) == NULL
23807 || (arm_psr_hsh = hash_new ()) == NULL
23808 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23809 || (arm_reg_hsh = hash_new ()) == NULL
23810 || (arm_reloc_hsh = hash_new ()) == NULL
23811 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23812 as_fatal (_("virtual memory exhausted"));
23813
23814 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23815 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23816 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23817 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23818 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23819 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23820 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23821 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23822 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23823 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23824 (void *) (v7m_psrs + i));
23825 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23826 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23827 for (i = 0;
23828 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23829 i++)
23830 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23831 (void *) (barrier_opt_names + i));
23832 #ifdef OBJ_ELF
23833 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23834 {
23835 struct reloc_entry * entry = reloc_names + i;
23836
23837 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23838 /* This makes encode_branch() use the EABI versions of this relocation. */
23839 entry->reloc = BFD_RELOC_UNUSED;
23840
23841 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23842 }
23843 #endif
23844
23845 set_constant_flonums ();
23846
23847 /* Set the cpu variant based on the command-line options. We prefer
23848 -mcpu= over -march= if both are set (as for GCC); and we prefer
23849 -mfpu= over any other way of setting the floating point unit.
23850 Use of legacy options with new options are faulted. */
23851 if (legacy_cpu)
23852 {
23853 if (mcpu_cpu_opt || march_cpu_opt)
23854 as_bad (_("use of old and new-style options to set CPU type"));
23855
23856 mcpu_cpu_opt = legacy_cpu;
23857 }
23858 else if (!mcpu_cpu_opt)
23859 mcpu_cpu_opt = march_cpu_opt;
23860
23861 if (legacy_fpu)
23862 {
23863 if (mfpu_opt)
23864 as_bad (_("use of old and new-style options to set FPU type"));
23865
23866 mfpu_opt = legacy_fpu;
23867 }
23868 else if (!mfpu_opt)
23869 {
23870 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23871 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23872 /* Some environments specify a default FPU. If they don't, infer it
23873 from the processor. */
23874 if (mcpu_fpu_opt)
23875 mfpu_opt = mcpu_fpu_opt;
23876 else
23877 mfpu_opt = march_fpu_opt;
23878 #else
23879 mfpu_opt = &fpu_default;
23880 #endif
23881 }
23882
23883 if (!mfpu_opt)
23884 {
23885 if (mcpu_cpu_opt != NULL)
23886 mfpu_opt = &fpu_default;
23887 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23888 mfpu_opt = &fpu_arch_vfp_v2;
23889 else
23890 mfpu_opt = &fpu_arch_fpa;
23891 }
23892
23893 #ifdef CPU_DEFAULT
23894 if (!mcpu_cpu_opt)
23895 {
23896 mcpu_cpu_opt = &cpu_default;
23897 selected_cpu = cpu_default;
23898 }
23899 #else
23900 if (mcpu_cpu_opt)
23901 selected_cpu = *mcpu_cpu_opt;
23902 else
23903 mcpu_cpu_opt = &arm_arch_any;
23904 #endif
23905
23906 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23907
23908 autoselect_thumb_from_cpu_variant ();
23909
23910 arm_arch_used = thumb_arch_used = arm_arch_none;
23911
23912 #if defined OBJ_COFF || defined OBJ_ELF
23913 {
23914 unsigned int flags = 0;
23915
23916 #if defined OBJ_ELF
23917 flags = meabi_flags;
23918
23919 switch (meabi_flags)
23920 {
23921 case EF_ARM_EABI_UNKNOWN:
23922 #endif
23923 /* Set the flags in the private structure. */
23924 if (uses_apcs_26) flags |= F_APCS26;
23925 if (support_interwork) flags |= F_INTERWORK;
23926 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23927 if (pic_code) flags |= F_PIC;
23928 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23929 flags |= F_SOFT_FLOAT;
23930
23931 switch (mfloat_abi_opt)
23932 {
23933 case ARM_FLOAT_ABI_SOFT:
23934 case ARM_FLOAT_ABI_SOFTFP:
23935 flags |= F_SOFT_FLOAT;
23936 break;
23937
23938 case ARM_FLOAT_ABI_HARD:
23939 if (flags & F_SOFT_FLOAT)
23940 as_bad (_("hard-float conflicts with specified fpu"));
23941 break;
23942 }
23943
23944 /* Using pure-endian doubles (even if soft-float). */
23945 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23946 flags |= F_VFP_FLOAT;
23947
23948 #if defined OBJ_ELF
23949 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23950 flags |= EF_ARM_MAVERICK_FLOAT;
23951 break;
23952
23953 case EF_ARM_EABI_VER4:
23954 case EF_ARM_EABI_VER5:
23955 /* No additional flags to set. */
23956 break;
23957
23958 default:
23959 abort ();
23960 }
23961 #endif
23962 bfd_set_private_flags (stdoutput, flags);
23963
23964 /* We have run out flags in the COFF header to encode the
23965 status of ATPCS support, so instead we create a dummy,
23966 empty, debug section called .arm.atpcs. */
23967 if (atpcs)
23968 {
23969 asection * sec;
23970
23971 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23972
23973 if (sec != NULL)
23974 {
23975 bfd_set_section_flags
23976 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23977 bfd_set_section_size (stdoutput, sec, 0);
23978 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23979 }
23980 }
23981 }
23982 #endif
23983
23984 /* Record the CPU type as well. */
23985 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23986 mach = bfd_mach_arm_iWMMXt2;
23987 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23988 mach = bfd_mach_arm_iWMMXt;
23989 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23990 mach = bfd_mach_arm_XScale;
23991 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23992 mach = bfd_mach_arm_ep9312;
23993 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23994 mach = bfd_mach_arm_5TE;
23995 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23996 {
23997 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23998 mach = bfd_mach_arm_5T;
23999 else
24000 mach = bfd_mach_arm_5;
24001 }
24002 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24003 {
24004 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24005 mach = bfd_mach_arm_4T;
24006 else
24007 mach = bfd_mach_arm_4;
24008 }
24009 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24010 mach = bfd_mach_arm_3M;
24011 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24012 mach = bfd_mach_arm_3;
24013 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24014 mach = bfd_mach_arm_2a;
24015 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24016 mach = bfd_mach_arm_2;
24017 else
24018 mach = bfd_mach_arm_unknown;
24019
24020 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24021 }
24022
24023 /* Command line processing. */
24024
24025 /* md_parse_option
24026 Invocation line includes a switch not recognized by the base assembler.
24027 See if it's a processor-specific option.
24028
24029 This routine is somewhat complicated by the need for backwards
24030 compatibility (since older releases of gcc can't be changed).
24031 The new options try to make the interface as compatible as
24032 possible with GCC.
24033
24034 New options (supported) are:
24035
24036 -mcpu=<cpu name> Assemble for selected processor
24037 -march=<architecture name> Assemble for selected architecture
24038 -mfpu=<fpu architecture> Assemble for selected FPU.
24039 -EB/-mbig-endian Big-endian
24040 -EL/-mlittle-endian Little-endian
24041 -k Generate PIC code
24042 -mthumb Start in Thumb mode
24043 -mthumb-interwork Code supports ARM/Thumb interworking
24044
24045 -m[no-]warn-deprecated Warn about deprecated features
24046
24047 For now we will also provide support for:
24048
24049 -mapcs-32 32-bit Program counter
24050 -mapcs-26 26-bit Program counter
24051 -macps-float Floats passed in FP registers
24052 -mapcs-reentrant Reentrant code
24053 -matpcs
24054 (sometime these will probably be replaced with -mapcs=<list of options>
24055 and -matpcs=<list of options>)
24056
24057 The remaining options are only supported for back-wards compatibility.
24058 Cpu variants, the arm part is optional:
24059 -m[arm]1 Currently not supported.
24060 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24061 -m[arm]3 Arm 3 processor
24062 -m[arm]6[xx], Arm 6 processors
24063 -m[arm]7[xx][t][[d]m] Arm 7 processors
24064 -m[arm]8[10] Arm 8 processors
24065 -m[arm]9[20][tdmi] Arm 9 processors
24066 -mstrongarm[110[0]] StrongARM processors
24067 -mxscale XScale processors
24068 -m[arm]v[2345[t[e]]] Arm architectures
24069 -mall All (except the ARM1)
24070 FP variants:
24071 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24072 -mfpe-old (No float load/store multiples)
24073 -mvfpxd VFP Single precision
24074 -mvfp All VFP
24075 -mno-fpu Disable all floating point instructions
24076
24077 The following CPU names are recognized:
24078 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24079 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24080 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24081 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24082 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24083 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24084 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24085
24086 */
24087
24088 const char * md_shortopts = "m:k";
24089
24090 #ifdef ARM_BI_ENDIAN
24091 #define OPTION_EB (OPTION_MD_BASE + 0)
24092 #define OPTION_EL (OPTION_MD_BASE + 1)
24093 #else
24094 #if TARGET_BYTES_BIG_ENDIAN
24095 #define OPTION_EB (OPTION_MD_BASE + 0)
24096 #else
24097 #define OPTION_EL (OPTION_MD_BASE + 1)
24098 #endif
24099 #endif
24100 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24101
24102 struct option md_longopts[] =
24103 {
24104 #ifdef OPTION_EB
24105 {"EB", no_argument, NULL, OPTION_EB},
24106 #endif
24107 #ifdef OPTION_EL
24108 {"EL", no_argument, NULL, OPTION_EL},
24109 #endif
24110 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24111 {NULL, no_argument, NULL, 0}
24112 };
24113
24114 size_t md_longopts_size = sizeof (md_longopts);
24115
24116 struct arm_option_table
24117 {
24118 char *option; /* Option name to match. */
24119 char *help; /* Help information. */
24120 int *var; /* Variable to change. */
24121 int value; /* What to change it to. */
24122 char *deprecated; /* If non-null, print this message. */
24123 };
24124
24125 struct arm_option_table arm_opts[] =
24126 {
24127 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24128 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24129 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24130 &support_interwork, 1, NULL},
24131 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24132 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24133 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24134 1, NULL},
24135 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24136 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24137 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24138 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24139 NULL},
24140
24141 /* These are recognized by the assembler, but have no affect on code. */
24142 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24143 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24144
24145 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24146 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24147 &warn_on_deprecated, 0, NULL},
24148 {NULL, NULL, NULL, 0, NULL}
24149 };
24150
24151 struct arm_legacy_option_table
24152 {
24153 char *option; /* Option name to match. */
24154 const arm_feature_set **var; /* Variable to change. */
24155 const arm_feature_set value; /* What to change it to. */
24156 char *deprecated; /* If non-null, print this message. */
24157 };
24158
24159 const struct arm_legacy_option_table arm_legacy_opts[] =
24160 {
24161 /* DON'T add any new processors to this list -- we want the whole list
24162 to go away... Add them to the processors table instead. */
24163 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24164 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24165 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24166 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24167 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24168 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24169 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24170 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24171 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24172 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24173 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24174 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24175 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24176 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24177 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24178 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24179 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24180 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24181 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24182 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24183 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24184 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24185 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24186 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24187 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24188 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24189 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24190 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24191 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24192 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24193 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24194 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24195 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24196 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24197 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24198 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24199 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24200 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24201 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24202 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24203 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24204 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24205 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24206 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24207 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24208 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24209 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24210 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24211 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24212 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24213 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24214 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24215 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24216 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24217 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24218 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24219 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24220 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24221 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24222 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24223 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24224 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24225 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24226 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24227 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24228 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24229 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24230 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24231 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24232 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24233 N_("use -mcpu=strongarm110")},
24234 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24235 N_("use -mcpu=strongarm1100")},
24236 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24237 N_("use -mcpu=strongarm1110")},
24238 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24239 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24240 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24241
24242 /* Architecture variants -- don't add any more to this list either. */
24243 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24244 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24245 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24246 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24247 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24248 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24249 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24250 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24251 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24252 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24253 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24254 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24255 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24256 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24257 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24258 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24259 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24260 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24261
24262 /* Floating point variants -- don't add any more to this list either. */
24263 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24264 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24265 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24266 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24267 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24268
24269 {NULL, NULL, ARM_ARCH_NONE, NULL}
24270 };
24271
24272 struct arm_cpu_option_table
24273 {
24274 char *name;
24275 size_t name_len;
24276 const arm_feature_set value;
24277 /* For some CPUs we assume an FPU unless the user explicitly sets
24278 -mfpu=... */
24279 const arm_feature_set default_fpu;
24280 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24281 case. */
24282 const char *canonical_name;
24283 };
24284
24285 /* This list should, at a minimum, contain all the cpu names
24286 recognized by GCC. */
24287 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24288 static const struct arm_cpu_option_table arm_cpus[] =
24289 {
24290 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24291 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24292 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24293 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24294 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24295 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24296 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24297 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24298 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24299 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24300 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24301 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24302 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24303 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24304 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24305 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24306 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24307 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24308 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24309 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24310 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24311 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24312 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24313 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24314 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24315 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24316 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24317 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24318 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24319 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24320 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24321 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24322 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24323 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24324 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24325 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24326 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24327 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24328 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24329 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24330 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24331 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24332 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24333 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24334 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24335 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24336 /* For V5 or later processors we default to using VFP; but the user
24337 should really set the FPU type explicitly. */
24338 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24339 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24340 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24341 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24342 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24343 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24344 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24345 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24346 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24347 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24348 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24349 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24350 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24351 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24352 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24353 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24354 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24355 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24356 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24357 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24358 "ARM1026EJ-S"),
24359 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24360 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24361 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24362 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24363 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24364 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24365 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24366 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24367 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24368 "ARM1136JF-S"),
24369 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24370 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24371 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24372 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24373 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24374 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24375 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24376 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24377 FPU_NONE, "Cortex-A5"),
24378 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24379 "Cortex-A7"),
24380 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24381 ARM_FEATURE (0, FPU_VFP_V3
24382 | FPU_NEON_EXT_V1),
24383 "Cortex-A8"),
24384 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24385 ARM_FEATURE (0, FPU_VFP_V3
24386 | FPU_NEON_EXT_V1),
24387 "Cortex-A9"),
24388 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24389 "Cortex-A12"),
24390 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24391 "Cortex-A15"),
24392 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24393 "Cortex-A53"),
24394 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24395 "Cortex-A57"),
24396 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24397 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24398 "Cortex-R4F"),
24399 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24400 FPU_NONE, "Cortex-R5"),
24401 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24402 FPU_ARCH_VFP_V3D16,
24403 "Cortex-R7"),
24404 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24405 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24406 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24407 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24408 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24409 /* ??? XSCALE is really an architecture. */
24410 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24411 /* ??? iwmmxt is not a processor. */
24412 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24413 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24414 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24415 /* Maverick */
24416 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24417 FPU_ARCH_MAVERICK, "ARM920T"),
24418 /* Marvell processors. */
24419 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
24420 FPU_ARCH_VFP_V3D16, NULL),
24421
24422 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24423 };
24424 #undef ARM_CPU_OPT
24425
24426 struct arm_arch_option_table
24427 {
24428 char *name;
24429 size_t name_len;
24430 const arm_feature_set value;
24431 const arm_feature_set default_fpu;
24432 };
24433
24434 /* This list should, at a minimum, contain all the architecture names
24435 recognized by GCC. */
24436 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24437 static const struct arm_arch_option_table arm_archs[] =
24438 {
24439 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24440 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24441 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24442 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24443 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24444 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24445 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24446 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24447 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24448 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24449 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24450 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24451 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24452 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24453 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24454 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24455 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24456 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24457 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24458 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24459 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24460 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24461 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24462 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24463 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24464 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24465 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24466 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24467 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24468 /* The official spelling of the ARMv7 profile variants is the dashed form.
24469 Accept the non-dashed form for compatibility with old toolchains. */
24470 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24471 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24472 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24473 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24474 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24475 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24476 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24477 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24478 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24479 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24480 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24481 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24482 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24483 };
24484 #undef ARM_ARCH_OPT
24485
24486 /* ISA extensions in the co-processor and main instruction set space. */
24487 struct arm_option_extension_value_table
24488 {
24489 char *name;
24490 size_t name_len;
24491 const arm_feature_set value;
24492 const arm_feature_set allowed_archs;
24493 };
24494
24495 /* The following table must be in alphabetical order with a NULL last entry.
24496 */
24497 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24498 static const struct arm_option_extension_value_table arm_extensions[] =
24499 {
24500 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
24501 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24502 ARM_FEATURE (ARM_EXT_V8, 0)),
24503 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
24504 ARM_FEATURE (ARM_EXT_V8, 0)),
24505 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24506 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24507 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
24508 ARM_EXT_OPT ("iwmmxt2",
24509 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
24510 ARM_EXT_OPT ("maverick",
24511 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
24512 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
24513 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24514 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24515 ARM_FEATURE (ARM_EXT_V8, 0)),
24516 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
24517 ARM_FEATURE (ARM_EXT_V6M, 0)),
24518 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
24519 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24520 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24521 | ARM_EXT_DIV, 0),
24522 ARM_FEATURE (ARM_EXT_V7A, 0)),
24523 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
24524 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24525 };
24526 #undef ARM_EXT_OPT
24527
24528 /* ISA floating-point and Advanced SIMD extensions. */
24529 struct arm_option_fpu_value_table
24530 {
24531 char *name;
24532 const arm_feature_set value;
24533 };
24534
24535 /* This list should, at a minimum, contain all the fpu names
24536 recognized by GCC. */
24537 static const struct arm_option_fpu_value_table arm_fpus[] =
24538 {
24539 {"softfpa", FPU_NONE},
24540 {"fpe", FPU_ARCH_FPE},
24541 {"fpe2", FPU_ARCH_FPE},
24542 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24543 {"fpa", FPU_ARCH_FPA},
24544 {"fpa10", FPU_ARCH_FPA},
24545 {"fpa11", FPU_ARCH_FPA},
24546 {"arm7500fe", FPU_ARCH_FPA},
24547 {"softvfp", FPU_ARCH_VFP},
24548 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24549 {"vfp", FPU_ARCH_VFP_V2},
24550 {"vfp9", FPU_ARCH_VFP_V2},
24551 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24552 {"vfp10", FPU_ARCH_VFP_V2},
24553 {"vfp10-r0", FPU_ARCH_VFP_V1},
24554 {"vfpxd", FPU_ARCH_VFP_V1xD},
24555 {"vfpv2", FPU_ARCH_VFP_V2},
24556 {"vfpv3", FPU_ARCH_VFP_V3},
24557 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24558 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24559 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24560 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24561 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24562 {"arm1020t", FPU_ARCH_VFP_V1},
24563 {"arm1020e", FPU_ARCH_VFP_V2},
24564 {"arm1136jfs", FPU_ARCH_VFP_V2},
24565 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24566 {"maverick", FPU_ARCH_MAVERICK},
24567 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24568 {"neon-fp16", FPU_ARCH_NEON_FP16},
24569 {"vfpv4", FPU_ARCH_VFP_V4},
24570 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24571 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24572 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24573 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24574 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24575 {"crypto-neon-fp-armv8",
24576 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24577 {NULL, ARM_ARCH_NONE}
24578 };
24579
24580 struct arm_option_value_table
24581 {
24582 char *name;
24583 long value;
24584 };
24585
24586 static const struct arm_option_value_table arm_float_abis[] =
24587 {
24588 {"hard", ARM_FLOAT_ABI_HARD},
24589 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24590 {"soft", ARM_FLOAT_ABI_SOFT},
24591 {NULL, 0}
24592 };
24593
24594 #ifdef OBJ_ELF
24595 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24596 static const struct arm_option_value_table arm_eabis[] =
24597 {
24598 {"gnu", EF_ARM_EABI_UNKNOWN},
24599 {"4", EF_ARM_EABI_VER4},
24600 {"5", EF_ARM_EABI_VER5},
24601 {NULL, 0}
24602 };
24603 #endif
24604
24605 struct arm_long_option_table
24606 {
24607 char * option; /* Substring to match. */
24608 char * help; /* Help information. */
24609 int (* func) (char * subopt); /* Function to decode sub-option. */
24610 char * deprecated; /* If non-null, print this message. */
24611 };
24612
24613 static bfd_boolean
24614 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24615 {
24616 arm_feature_set *ext_set = (arm_feature_set *)
24617 xmalloc (sizeof (arm_feature_set));
24618
24619 /* We insist on extensions being specified in alphabetical order, and with
24620 extensions being added before being removed. We achieve this by having
24621 the global ARM_EXTENSIONS table in alphabetical order, and using the
24622 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24623 or removing it (0) and only allowing it to change in the order
24624 -1 -> 1 -> 0. */
24625 const struct arm_option_extension_value_table * opt = NULL;
24626 int adding_value = -1;
24627
24628 /* Copy the feature set, so that we can modify it. */
24629 *ext_set = **opt_p;
24630 *opt_p = ext_set;
24631
24632 while (str != NULL && *str != 0)
24633 {
24634 char *ext;
24635 size_t len;
24636
24637 if (*str != '+')
24638 {
24639 as_bad (_("invalid architectural extension"));
24640 return FALSE;
24641 }
24642
24643 str++;
24644 ext = strchr (str, '+');
24645
24646 if (ext != NULL)
24647 len = ext - str;
24648 else
24649 len = strlen (str);
24650
24651 if (len >= 2 && strncmp (str, "no", 2) == 0)
24652 {
24653 if (adding_value != 0)
24654 {
24655 adding_value = 0;
24656 opt = arm_extensions;
24657 }
24658
24659 len -= 2;
24660 str += 2;
24661 }
24662 else if (len > 0)
24663 {
24664 if (adding_value == -1)
24665 {
24666 adding_value = 1;
24667 opt = arm_extensions;
24668 }
24669 else if (adding_value != 1)
24670 {
24671 as_bad (_("must specify extensions to add before specifying "
24672 "those to remove"));
24673 return FALSE;
24674 }
24675 }
24676
24677 if (len == 0)
24678 {
24679 as_bad (_("missing architectural extension"));
24680 return FALSE;
24681 }
24682
24683 gas_assert (adding_value != -1);
24684 gas_assert (opt != NULL);
24685
24686 /* Scan over the options table trying to find an exact match. */
24687 for (; opt->name != NULL; opt++)
24688 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24689 {
24690 /* Check we can apply the extension to this architecture. */
24691 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24692 {
24693 as_bad (_("extension does not apply to the base architecture"));
24694 return FALSE;
24695 }
24696
24697 /* Add or remove the extension. */
24698 if (adding_value)
24699 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24700 else
24701 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24702
24703 break;
24704 }
24705
24706 if (opt->name == NULL)
24707 {
24708 /* Did we fail to find an extension because it wasn't specified in
24709 alphabetical order, or because it does not exist? */
24710
24711 for (opt = arm_extensions; opt->name != NULL; opt++)
24712 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24713 break;
24714
24715 if (opt->name == NULL)
24716 as_bad (_("unknown architectural extension `%s'"), str);
24717 else
24718 as_bad (_("architectural extensions must be specified in "
24719 "alphabetical order"));
24720
24721 return FALSE;
24722 }
24723 else
24724 {
24725 /* We should skip the extension we've just matched the next time
24726 round. */
24727 opt++;
24728 }
24729
24730 str = ext;
24731 };
24732
24733 return TRUE;
24734 }
24735
24736 static bfd_boolean
24737 arm_parse_cpu (char *str)
24738 {
24739 const struct arm_cpu_option_table *opt;
24740 char *ext = strchr (str, '+');
24741 size_t len;
24742
24743 if (ext != NULL)
24744 len = ext - str;
24745 else
24746 len = strlen (str);
24747
24748 if (len == 0)
24749 {
24750 as_bad (_("missing cpu name `%s'"), str);
24751 return FALSE;
24752 }
24753
24754 for (opt = arm_cpus; opt->name != NULL; opt++)
24755 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24756 {
24757 mcpu_cpu_opt = &opt->value;
24758 mcpu_fpu_opt = &opt->default_fpu;
24759 if (opt->canonical_name)
24760 strcpy (selected_cpu_name, opt->canonical_name);
24761 else
24762 {
24763 size_t i;
24764
24765 for (i = 0; i < len; i++)
24766 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24767 selected_cpu_name[i] = 0;
24768 }
24769
24770 if (ext != NULL)
24771 return arm_parse_extension (ext, &mcpu_cpu_opt);
24772
24773 return TRUE;
24774 }
24775
24776 as_bad (_("unknown cpu `%s'"), str);
24777 return FALSE;
24778 }
24779
24780 static bfd_boolean
24781 arm_parse_arch (char *str)
24782 {
24783 const struct arm_arch_option_table *opt;
24784 char *ext = strchr (str, '+');
24785 size_t len;
24786
24787 if (ext != NULL)
24788 len = ext - str;
24789 else
24790 len = strlen (str);
24791
24792 if (len == 0)
24793 {
24794 as_bad (_("missing architecture name `%s'"), str);
24795 return FALSE;
24796 }
24797
24798 for (opt = arm_archs; opt->name != NULL; opt++)
24799 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24800 {
24801 march_cpu_opt = &opt->value;
24802 march_fpu_opt = &opt->default_fpu;
24803 strcpy (selected_cpu_name, opt->name);
24804
24805 if (ext != NULL)
24806 return arm_parse_extension (ext, &march_cpu_opt);
24807
24808 return TRUE;
24809 }
24810
24811 as_bad (_("unknown architecture `%s'\n"), str);
24812 return FALSE;
24813 }
24814
24815 static bfd_boolean
24816 arm_parse_fpu (char * str)
24817 {
24818 const struct arm_option_fpu_value_table * opt;
24819
24820 for (opt = arm_fpus; opt->name != NULL; opt++)
24821 if (streq (opt->name, str))
24822 {
24823 mfpu_opt = &opt->value;
24824 return TRUE;
24825 }
24826
24827 as_bad (_("unknown floating point format `%s'\n"), str);
24828 return FALSE;
24829 }
24830
24831 static bfd_boolean
24832 arm_parse_float_abi (char * str)
24833 {
24834 const struct arm_option_value_table * opt;
24835
24836 for (opt = arm_float_abis; opt->name != NULL; opt++)
24837 if (streq (opt->name, str))
24838 {
24839 mfloat_abi_opt = opt->value;
24840 return TRUE;
24841 }
24842
24843 as_bad (_("unknown floating point abi `%s'\n"), str);
24844 return FALSE;
24845 }
24846
24847 #ifdef OBJ_ELF
24848 static bfd_boolean
24849 arm_parse_eabi (char * str)
24850 {
24851 const struct arm_option_value_table *opt;
24852
24853 for (opt = arm_eabis; opt->name != NULL; opt++)
24854 if (streq (opt->name, str))
24855 {
24856 meabi_flags = opt->value;
24857 return TRUE;
24858 }
24859 as_bad (_("unknown EABI `%s'\n"), str);
24860 return FALSE;
24861 }
24862 #endif
24863
24864 static bfd_boolean
24865 arm_parse_it_mode (char * str)
24866 {
24867 bfd_boolean ret = TRUE;
24868
24869 if (streq ("arm", str))
24870 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24871 else if (streq ("thumb", str))
24872 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24873 else if (streq ("always", str))
24874 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24875 else if (streq ("never", str))
24876 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24877 else
24878 {
24879 as_bad (_("unknown implicit IT mode `%s', should be "\
24880 "arm, thumb, always, or never."), str);
24881 ret = FALSE;
24882 }
24883
24884 return ret;
24885 }
24886
24887 static bfd_boolean
24888 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24889 {
24890 codecomposer_syntax = TRUE;
24891 arm_comment_chars[0] = ';';
24892 arm_line_separator_chars[0] = 0;
24893 return TRUE;
24894 }
24895
24896 struct arm_long_option_table arm_long_opts[] =
24897 {
24898 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24899 arm_parse_cpu, NULL},
24900 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24901 arm_parse_arch, NULL},
24902 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24903 arm_parse_fpu, NULL},
24904 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24905 arm_parse_float_abi, NULL},
24906 #ifdef OBJ_ELF
24907 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24908 arm_parse_eabi, NULL},
24909 #endif
24910 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24911 arm_parse_it_mode, NULL},
24912 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
24913 arm_ccs_mode, NULL},
24914 {NULL, NULL, 0, NULL}
24915 };
24916
24917 int
24918 md_parse_option (int c, char * arg)
24919 {
24920 struct arm_option_table *opt;
24921 const struct arm_legacy_option_table *fopt;
24922 struct arm_long_option_table *lopt;
24923
24924 switch (c)
24925 {
24926 #ifdef OPTION_EB
24927 case OPTION_EB:
24928 target_big_endian = 1;
24929 break;
24930 #endif
24931
24932 #ifdef OPTION_EL
24933 case OPTION_EL:
24934 target_big_endian = 0;
24935 break;
24936 #endif
24937
24938 case OPTION_FIX_V4BX:
24939 fix_v4bx = TRUE;
24940 break;
24941
24942 case 'a':
24943 /* Listing option. Just ignore these, we don't support additional
24944 ones. */
24945 return 0;
24946
24947 default:
24948 for (opt = arm_opts; opt->option != NULL; opt++)
24949 {
24950 if (c == opt->option[0]
24951 && ((arg == NULL && opt->option[1] == 0)
24952 || streq (arg, opt->option + 1)))
24953 {
24954 /* If the option is deprecated, tell the user. */
24955 if (warn_on_deprecated && opt->deprecated != NULL)
24956 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24957 arg ? arg : "", _(opt->deprecated));
24958
24959 if (opt->var != NULL)
24960 *opt->var = opt->value;
24961
24962 return 1;
24963 }
24964 }
24965
24966 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24967 {
24968 if (c == fopt->option[0]
24969 && ((arg == NULL && fopt->option[1] == 0)
24970 || streq (arg, fopt->option + 1)))
24971 {
24972 /* If the option is deprecated, tell the user. */
24973 if (warn_on_deprecated && fopt->deprecated != NULL)
24974 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24975 arg ? arg : "", _(fopt->deprecated));
24976
24977 if (fopt->var != NULL)
24978 *fopt->var = &fopt->value;
24979
24980 return 1;
24981 }
24982 }
24983
24984 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24985 {
24986 /* These options are expected to have an argument. */
24987 if (c == lopt->option[0]
24988 && arg != NULL
24989 && strncmp (arg, lopt->option + 1,
24990 strlen (lopt->option + 1)) == 0)
24991 {
24992 /* If the option is deprecated, tell the user. */
24993 if (warn_on_deprecated && lopt->deprecated != NULL)
24994 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24995 _(lopt->deprecated));
24996
24997 /* Call the sup-option parser. */
24998 return lopt->func (arg + strlen (lopt->option) - 1);
24999 }
25000 }
25001
25002 return 0;
25003 }
25004
25005 return 1;
25006 }
25007
25008 void
25009 md_show_usage (FILE * fp)
25010 {
25011 struct arm_option_table *opt;
25012 struct arm_long_option_table *lopt;
25013
25014 fprintf (fp, _(" ARM-specific assembler options:\n"));
25015
25016 for (opt = arm_opts; opt->option != NULL; opt++)
25017 if (opt->help != NULL)
25018 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25019
25020 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25021 if (lopt->help != NULL)
25022 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25023
25024 #ifdef OPTION_EB
25025 fprintf (fp, _("\
25026 -EB assemble code for a big-endian cpu\n"));
25027 #endif
25028
25029 #ifdef OPTION_EL
25030 fprintf (fp, _("\
25031 -EL assemble code for a little-endian cpu\n"));
25032 #endif
25033
25034 fprintf (fp, _("\
25035 --fix-v4bx Allow BX in ARMv4 code\n"));
25036 }
25037
25038
25039 #ifdef OBJ_ELF
25040 typedef struct
25041 {
25042 int val;
25043 arm_feature_set flags;
25044 } cpu_arch_ver_table;
25045
25046 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25047 least features first. */
25048 static const cpu_arch_ver_table cpu_arch_ver[] =
25049 {
25050 {1, ARM_ARCH_V4},
25051 {2, ARM_ARCH_V4T},
25052 {3, ARM_ARCH_V5},
25053 {3, ARM_ARCH_V5T},
25054 {4, ARM_ARCH_V5TE},
25055 {5, ARM_ARCH_V5TEJ},
25056 {6, ARM_ARCH_V6},
25057 {9, ARM_ARCH_V6K},
25058 {7, ARM_ARCH_V6Z},
25059 {11, ARM_ARCH_V6M},
25060 {12, ARM_ARCH_V6SM},
25061 {8, ARM_ARCH_V6T2},
25062 {10, ARM_ARCH_V7VE},
25063 {10, ARM_ARCH_V7R},
25064 {10, ARM_ARCH_V7M},
25065 {14, ARM_ARCH_V8A},
25066 {0, ARM_ARCH_NONE}
25067 };
25068
25069 /* Set an attribute if it has not already been set by the user. */
25070 static void
25071 aeabi_set_attribute_int (int tag, int value)
25072 {
25073 if (tag < 1
25074 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25075 || !attributes_set_explicitly[tag])
25076 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25077 }
25078
25079 static void
25080 aeabi_set_attribute_string (int tag, const char *value)
25081 {
25082 if (tag < 1
25083 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25084 || !attributes_set_explicitly[tag])
25085 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25086 }
25087
25088 /* Set the public EABI object attributes. */
25089 static void
25090 aeabi_set_public_attributes (void)
25091 {
25092 int arch;
25093 char profile;
25094 int virt_sec = 0;
25095 int fp16_optional = 0;
25096 arm_feature_set flags;
25097 arm_feature_set tmp;
25098 const cpu_arch_ver_table *p;
25099
25100 /* Choose the architecture based on the capabilities of the requested cpu
25101 (if any) and/or the instructions actually used. */
25102 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25103 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25104 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25105
25106 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25107 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25108
25109 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25110 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25111
25112 /* Allow the user to override the reported architecture. */
25113 if (object_arch)
25114 {
25115 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25116 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25117 }
25118
25119 /* We need to make sure that the attributes do not identify us as v6S-M
25120 when the only v6S-M feature in use is the Operating System Extensions. */
25121 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25122 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25123 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25124
25125 tmp = flags;
25126 arch = 0;
25127 for (p = cpu_arch_ver; p->val; p++)
25128 {
25129 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25130 {
25131 arch = p->val;
25132 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25133 }
25134 }
25135
25136 /* The table lookup above finds the last architecture to contribute
25137 a new feature. Unfortunately, Tag13 is a subset of the union of
25138 v6T2 and v7-M, so it is never seen as contributing a new feature.
25139 We can not search for the last entry which is entirely used,
25140 because if no CPU is specified we build up only those flags
25141 actually used. Perhaps we should separate out the specified
25142 and implicit cases. Avoid taking this path for -march=all by
25143 checking for contradictory v7-A / v7-M features. */
25144 if (arch == 10
25145 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25146 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25147 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25148 arch = 13;
25149
25150 /* Tag_CPU_name. */
25151 if (selected_cpu_name[0])
25152 {
25153 char *q;
25154
25155 q = selected_cpu_name;
25156 if (strncmp (q, "armv", 4) == 0)
25157 {
25158 int i;
25159
25160 q += 4;
25161 for (i = 0; q[i]; i++)
25162 q[i] = TOUPPER (q[i]);
25163 }
25164 aeabi_set_attribute_string (Tag_CPU_name, q);
25165 }
25166
25167 /* Tag_CPU_arch. */
25168 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25169
25170 /* Tag_CPU_arch_profile. */
25171 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25172 profile = 'A';
25173 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25174 profile = 'R';
25175 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25176 profile = 'M';
25177 else
25178 profile = '\0';
25179
25180 if (profile != '\0')
25181 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25182
25183 /* Tag_ARM_ISA_use. */
25184 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25185 || arch == 0)
25186 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25187
25188 /* Tag_THUMB_ISA_use. */
25189 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25190 || arch == 0)
25191 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25192 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25193
25194 /* Tag_VFP_arch. */
25195 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
25196 aeabi_set_attribute_int (Tag_VFP_arch, 7);
25197 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25198 aeabi_set_attribute_int (Tag_VFP_arch,
25199 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25200 ? 5 : 6);
25201 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25202 {
25203 fp16_optional = 1;
25204 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25205 }
25206 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25207 {
25208 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25209 fp16_optional = 1;
25210 }
25211 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25212 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25213 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25214 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25215 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25216
25217 /* Tag_ABI_HardFP_use. */
25218 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25219 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25220 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25221
25222 /* Tag_WMMX_arch. */
25223 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25224 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25225 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25226 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25227
25228 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25229 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25230 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25231 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25232 {
25233 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25234 {
25235 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25236 }
25237 else
25238 {
25239 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25240 fp16_optional = 1;
25241 }
25242 }
25243
25244 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25245 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25246 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25247
25248 /* Tag_DIV_use.
25249
25250 We set Tag_DIV_use to two when integer divide instructions have been used
25251 in ARM state, or when Thumb integer divide instructions have been used,
25252 but we have no architecture profile set, nor have we any ARM instructions.
25253
25254 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25255 architecture.
25256
25257 For new architectures we will have to check these tests. */
25258 gas_assert (arch <= TAG_CPU_ARCH_V8);
25259 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25260 aeabi_set_attribute_int (Tag_DIV_use, 0);
25261 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25262 || (profile == '\0'
25263 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25264 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25265 aeabi_set_attribute_int (Tag_DIV_use, 2);
25266
25267 /* Tag_MP_extension_use. */
25268 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25269 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25270
25271 /* Tag Virtualization_use. */
25272 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25273 virt_sec |= 1;
25274 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25275 virt_sec |= 2;
25276 if (virt_sec != 0)
25277 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25278 }
25279
25280 /* Add the default contents for the .ARM.attributes section. */
25281 void
25282 arm_md_end (void)
25283 {
25284 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25285 return;
25286
25287 aeabi_set_public_attributes ();
25288 }
25289 #endif /* OBJ_ELF */
25290
25291
25292 /* Parse a .cpu directive. */
25293
25294 static void
25295 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25296 {
25297 const struct arm_cpu_option_table *opt;
25298 char *name;
25299 char saved_char;
25300
25301 name = input_line_pointer;
25302 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25303 input_line_pointer++;
25304 saved_char = *input_line_pointer;
25305 *input_line_pointer = 0;
25306
25307 /* Skip the first "all" entry. */
25308 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25309 if (streq (opt->name, name))
25310 {
25311 mcpu_cpu_opt = &opt->value;
25312 selected_cpu = opt->value;
25313 if (opt->canonical_name)
25314 strcpy (selected_cpu_name, opt->canonical_name);
25315 else
25316 {
25317 int i;
25318 for (i = 0; opt->name[i]; i++)
25319 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25320
25321 selected_cpu_name[i] = 0;
25322 }
25323 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25324 *input_line_pointer = saved_char;
25325 demand_empty_rest_of_line ();
25326 return;
25327 }
25328 as_bad (_("unknown cpu `%s'"), name);
25329 *input_line_pointer = saved_char;
25330 ignore_rest_of_line ();
25331 }
25332
25333
25334 /* Parse a .arch directive. */
25335
25336 static void
25337 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25338 {
25339 const struct arm_arch_option_table *opt;
25340 char saved_char;
25341 char *name;
25342
25343 name = input_line_pointer;
25344 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25345 input_line_pointer++;
25346 saved_char = *input_line_pointer;
25347 *input_line_pointer = 0;
25348
25349 /* Skip the first "all" entry. */
25350 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25351 if (streq (opt->name, name))
25352 {
25353 mcpu_cpu_opt = &opt->value;
25354 selected_cpu = opt->value;
25355 strcpy (selected_cpu_name, opt->name);
25356 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25357 *input_line_pointer = saved_char;
25358 demand_empty_rest_of_line ();
25359 return;
25360 }
25361
25362 as_bad (_("unknown architecture `%s'\n"), name);
25363 *input_line_pointer = saved_char;
25364 ignore_rest_of_line ();
25365 }
25366
25367
25368 /* Parse a .object_arch directive. */
25369
25370 static void
25371 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25372 {
25373 const struct arm_arch_option_table *opt;
25374 char saved_char;
25375 char *name;
25376
25377 name = input_line_pointer;
25378 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25379 input_line_pointer++;
25380 saved_char = *input_line_pointer;
25381 *input_line_pointer = 0;
25382
25383 /* Skip the first "all" entry. */
25384 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25385 if (streq (opt->name, name))
25386 {
25387 object_arch = &opt->value;
25388 *input_line_pointer = saved_char;
25389 demand_empty_rest_of_line ();
25390 return;
25391 }
25392
25393 as_bad (_("unknown architecture `%s'\n"), name);
25394 *input_line_pointer = saved_char;
25395 ignore_rest_of_line ();
25396 }
25397
25398 /* Parse a .arch_extension directive. */
25399
25400 static void
25401 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25402 {
25403 const struct arm_option_extension_value_table *opt;
25404 char saved_char;
25405 char *name;
25406 int adding_value = 1;
25407
25408 name = input_line_pointer;
25409 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25410 input_line_pointer++;
25411 saved_char = *input_line_pointer;
25412 *input_line_pointer = 0;
25413
25414 if (strlen (name) >= 2
25415 && strncmp (name, "no", 2) == 0)
25416 {
25417 adding_value = 0;
25418 name += 2;
25419 }
25420
25421 for (opt = arm_extensions; opt->name != NULL; opt++)
25422 if (streq (opt->name, name))
25423 {
25424 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25425 {
25426 as_bad (_("architectural extension `%s' is not allowed for the "
25427 "current base architecture"), name);
25428 break;
25429 }
25430
25431 if (adding_value)
25432 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
25433 else
25434 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
25435
25436 mcpu_cpu_opt = &selected_cpu;
25437 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25438 *input_line_pointer = saved_char;
25439 demand_empty_rest_of_line ();
25440 return;
25441 }
25442
25443 if (opt->name == NULL)
25444 as_bad (_("unknown architecture extension `%s'\n"), name);
25445
25446 *input_line_pointer = saved_char;
25447 ignore_rest_of_line ();
25448 }
25449
25450 /* Parse a .fpu directive. */
25451
25452 static void
25453 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25454 {
25455 const struct arm_option_fpu_value_table *opt;
25456 char saved_char;
25457 char *name;
25458
25459 name = input_line_pointer;
25460 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25461 input_line_pointer++;
25462 saved_char = *input_line_pointer;
25463 *input_line_pointer = 0;
25464
25465 for (opt = arm_fpus; opt->name != NULL; opt++)
25466 if (streq (opt->name, name))
25467 {
25468 mfpu_opt = &opt->value;
25469 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25470 *input_line_pointer = saved_char;
25471 demand_empty_rest_of_line ();
25472 return;
25473 }
25474
25475 as_bad (_("unknown floating point format `%s'\n"), name);
25476 *input_line_pointer = saved_char;
25477 ignore_rest_of_line ();
25478 }
25479
25480 /* Copy symbol information. */
25481
25482 void
25483 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25484 {
25485 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25486 }
25487
25488 #ifdef OBJ_ELF
25489 /* Given a symbolic attribute NAME, return the proper integer value.
25490 Returns -1 if the attribute is not known. */
25491
25492 int
25493 arm_convert_symbolic_attribute (const char *name)
25494 {
25495 static const struct
25496 {
25497 const char * name;
25498 const int tag;
25499 }
25500 attribute_table[] =
25501 {
25502 /* When you modify this table you should
25503 also modify the list in doc/c-arm.texi. */
25504 #define T(tag) {#tag, tag}
25505 T (Tag_CPU_raw_name),
25506 T (Tag_CPU_name),
25507 T (Tag_CPU_arch),
25508 T (Tag_CPU_arch_profile),
25509 T (Tag_ARM_ISA_use),
25510 T (Tag_THUMB_ISA_use),
25511 T (Tag_FP_arch),
25512 T (Tag_VFP_arch),
25513 T (Tag_WMMX_arch),
25514 T (Tag_Advanced_SIMD_arch),
25515 T (Tag_PCS_config),
25516 T (Tag_ABI_PCS_R9_use),
25517 T (Tag_ABI_PCS_RW_data),
25518 T (Tag_ABI_PCS_RO_data),
25519 T (Tag_ABI_PCS_GOT_use),
25520 T (Tag_ABI_PCS_wchar_t),
25521 T (Tag_ABI_FP_rounding),
25522 T (Tag_ABI_FP_denormal),
25523 T (Tag_ABI_FP_exceptions),
25524 T (Tag_ABI_FP_user_exceptions),
25525 T (Tag_ABI_FP_number_model),
25526 T (Tag_ABI_align_needed),
25527 T (Tag_ABI_align8_needed),
25528 T (Tag_ABI_align_preserved),
25529 T (Tag_ABI_align8_preserved),
25530 T (Tag_ABI_enum_size),
25531 T (Tag_ABI_HardFP_use),
25532 T (Tag_ABI_VFP_args),
25533 T (Tag_ABI_WMMX_args),
25534 T (Tag_ABI_optimization_goals),
25535 T (Tag_ABI_FP_optimization_goals),
25536 T (Tag_compatibility),
25537 T (Tag_CPU_unaligned_access),
25538 T (Tag_FP_HP_extension),
25539 T (Tag_VFP_HP_extension),
25540 T (Tag_ABI_FP_16bit_format),
25541 T (Tag_MPextension_use),
25542 T (Tag_DIV_use),
25543 T (Tag_nodefaults),
25544 T (Tag_also_compatible_with),
25545 T (Tag_conformance),
25546 T (Tag_T2EE_use),
25547 T (Tag_Virtualization_use),
25548 /* We deliberately do not include Tag_MPextension_use_legacy. */
25549 #undef T
25550 };
25551 unsigned int i;
25552
25553 if (name == NULL)
25554 return -1;
25555
25556 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25557 if (streq (name, attribute_table[i].name))
25558 return attribute_table[i].tag;
25559
25560 return -1;
25561 }
25562
25563
25564 /* Apply sym value for relocations only in the case that
25565 they are for local symbols and you have the respective
25566 architectural feature for blx and simple switches. */
25567 int
25568 arm_apply_sym_value (struct fix * fixP)
25569 {
25570 if (fixP->fx_addsy
25571 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25572 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25573 {
25574 switch (fixP->fx_r_type)
25575 {
25576 case BFD_RELOC_ARM_PCREL_BLX:
25577 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25578 if (ARM_IS_FUNC (fixP->fx_addsy))
25579 return 1;
25580 break;
25581
25582 case BFD_RELOC_ARM_PCREL_CALL:
25583 case BFD_RELOC_THUMB_PCREL_BLX:
25584 if (THUMB_IS_FUNC (fixP->fx_addsy))
25585 return 1;
25586 break;
25587
25588 default:
25589 break;
25590 }
25591
25592 }
25593 return 0;
25594 }
25595 #endif /* OBJ_ELF */
This page took 0.651901 seconds and 5 git commands to generate.