Convert more variables to a constant form.
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, ARM_EXT2_V8M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
212 static const arm_feature_set arm_ext_v6t2_v8m =
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics =
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
217 static const arm_feature_set arm_ext_v8_2 =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16 =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
222
223 static const arm_feature_set arm_arch_any = ARM_ANY;
224 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
226 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
227 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
228
229 static const arm_feature_set arm_cext_iwmmxt2 =
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
231 static const arm_feature_set arm_cext_iwmmxt =
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
233 static const arm_feature_set arm_cext_xscale =
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
235 static const arm_feature_set arm_cext_maverick =
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
237 static const arm_feature_set fpu_fpa_ext_v1 =
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
239 static const arm_feature_set fpu_fpa_ext_v2 =
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
241 static const arm_feature_set fpu_vfp_ext_v1xd =
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
243 static const arm_feature_set fpu_vfp_ext_v1 =
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
245 static const arm_feature_set fpu_vfp_ext_v2 =
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
247 static const arm_feature_set fpu_vfp_ext_v3xd =
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
249 static const arm_feature_set fpu_vfp_ext_v3 =
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
251 static const arm_feature_set fpu_vfp_ext_d32 =
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
253 static const arm_feature_set fpu_neon_ext_v1 =
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
257 static const arm_feature_set fpu_vfp_fp16 =
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
259 static const arm_feature_set fpu_neon_ext_fma =
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
261 static const arm_feature_set fpu_vfp_ext_fma =
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
263 static const arm_feature_set fpu_vfp_ext_armv8 =
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
265 static const arm_feature_set fpu_vfp_ext_armv8xd =
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
267 static const arm_feature_set fpu_neon_ext_armv8 =
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
269 static const arm_feature_set fpu_crypto_ext_armv8 =
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
271 static const arm_feature_set crc_ext_armv8 =
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
273 static const arm_feature_set fpu_neon_ext_v8_1 =
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
275
276 static int mfloat_abi_opt = -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name[20];
281
282 extern FLONUM_TYPE generic_floating_point_number;
283
284 /* Return if no cpu was selected on command-line. */
285 static bfd_boolean
286 no_cpu_selected (void)
287 {
288 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
289 }
290
291 #ifdef OBJ_ELF
292 # ifdef EABI_DEFAULT
293 static int meabi_flags = EABI_DEFAULT;
294 # else
295 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
296 # endif
297
298 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
299
300 bfd_boolean
301 arm_is_eabi (void)
302 {
303 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
304 }
305 #endif
306
307 #ifdef OBJ_ELF
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS * GOT_symbol;
310 #endif
311
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
315 instructions. */
316 static int thumb_mode = 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
321
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
324 {
325 IMPLICIT_IT_MODE_NEVER = 0x00,
326 IMPLICIT_IT_MODE_ARM = 0x01,
327 IMPLICIT_IT_MODE_THUMB = 0x02,
328 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
329 };
330 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
331
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
334
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
339 there.)
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
342 machine code.
343
344 Important differences from the old Thumb mode:
345
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
354
355 static bfd_boolean unified_syntax = FALSE;
356
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars[] = "#[]{}";
362
363 enum neon_el_type
364 {
365 NT_invtype,
366 NT_untyped,
367 NT_integer,
368 NT_float,
369 NT_poly,
370 NT_signed,
371 NT_unsigned
372 };
373
374 struct neon_type_el
375 {
376 enum neon_el_type type;
377 unsigned size;
378 };
379
380 #define NEON_MAX_TYPE_ELS 4
381
382 struct neon_type
383 {
384 struct neon_type_el el[NEON_MAX_TYPE_ELS];
385 unsigned elems;
386 };
387
388 enum it_instruction_type
389 {
390 OUTSIDE_IT_INSN,
391 INSIDE_IT_INSN,
392 INSIDE_IT_LAST_INSN,
393 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN /* The IT insn has been parsed. */
398 };
399
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
402
403 struct arm_it
404 {
405 const char * error;
406 unsigned long instruction;
407 int size;
408 int size_req;
409 int cond;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
412 appropriate. */
413 int uncond_value;
414 struct neon_type vectype;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
417 int is_neon;
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
420 unsigned long relax;
421 struct
422 {
423 bfd_reloc_code_real_type type;
424 expressionS exp;
425 int pc_rel;
426 } reloc;
427
428 enum it_instruction_type it_insn_type;
429
430 struct
431 {
432 unsigned reg;
433 signed int imm;
434 struct neon_type_el vectype;
435 unsigned present : 1; /* Operand present. */
436 unsigned isreg : 1; /* Operand was a register. */
437 unsigned immisreg : 1; /* .imm field is a second register. */
438 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle : 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc : 1; /* Operand has relocation suffix. */
448 unsigned writeback : 1; /* Operand has trailing ! */
449 unsigned preind : 1; /* Preindexed address. */
450 unsigned postind : 1; /* Postindexed address. */
451 unsigned negative : 1; /* Index register was negated. */
452 unsigned shifted : 1; /* Shift applied to operation. */
453 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
454 } operands[ARM_IT_MAX_OPERANDS];
455 };
456
457 static struct arm_it inst;
458
459 #define NUM_FLOAT_VALS 8
460
461 const char * fp_const[] =
462 {
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
464 };
465
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
468
469 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
470
471 #define FAIL (-1)
472 #define SUCCESS (0)
473
474 #define SUFF_S 1
475 #define SUFF_D 2
476 #define SUFF_E 3
477 #define SUFF_P 4
478
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
481
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
484
485 #define DOUBLE_LOAD_FLAG 0x00000001
486
487 struct asm_cond
488 {
489 const char * template_name;
490 unsigned long value;
491 };
492
493 #define COND_ALWAYS 0xE
494
495 struct asm_psr
496 {
497 const char * template_name;
498 unsigned long field;
499 };
500
501 struct asm_barrier_opt
502 {
503 const char * template_name;
504 unsigned long value;
505 const arm_feature_set arch;
506 };
507
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
510
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
516
517 struct reloc_entry
518 {
519 const char * name;
520 bfd_reloc_code_real_type reloc;
521 };
522
523 enum vfp_reg_pos
524 {
525 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
526 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
527 };
528
529 enum vfp_ldstm_type
530 {
531 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
532 };
533
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
537
538 struct neon_typed_alias
539 {
540 unsigned char defined;
541 unsigned char index;
542 struct neon_type_el eltype;
543 };
544
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
547 enum arm_reg_type
548 {
549 REG_TYPE_RN,
550 REG_TYPE_CP,
551 REG_TYPE_CN,
552 REG_TYPE_FN,
553 REG_TYPE_VFS,
554 REG_TYPE_VFD,
555 REG_TYPE_NQ,
556 REG_TYPE_VFSD,
557 REG_TYPE_NDQ,
558 REG_TYPE_NSDQ,
559 REG_TYPE_VFC,
560 REG_TYPE_MVF,
561 REG_TYPE_MVD,
562 REG_TYPE_MVFX,
563 REG_TYPE_MVDX,
564 REG_TYPE_MVAX,
565 REG_TYPE_DSPSC,
566 REG_TYPE_MMXWR,
567 REG_TYPE_MMXWC,
568 REG_TYPE_MMXWCG,
569 REG_TYPE_XSCALE,
570 REG_TYPE_RNB
571 };
572
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
577 struct reg_entry
578 {
579 const char * name;
580 unsigned int number;
581 unsigned char type;
582 unsigned char builtin;
583 struct neon_typed_alias * neon;
584 };
585
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs[] =
588 {
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
610 };
611
612 /* Some well known registers that we refer to directly elsewhere. */
613 #define REG_R12 12
614 #define REG_SP 13
615 #define REG_LR 14
616 #define REG_PC 15
617
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
619 take 2: */
620 #define INSN_SIZE 4
621
622 struct asm_opcode
623 {
624 /* Basic string to match. */
625 const char * template_name;
626
627 /* Parameters to instruction. */
628 unsigned int operands[8];
629
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag : 4;
632
633 /* Basic instruction code. */
634 unsigned int avalue : 28;
635
636 /* Thumb-format instruction code. */
637 unsigned int tvalue;
638
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set * avariant;
641 const arm_feature_set * tvariant;
642
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode) (void);
645
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode) (void);
648 };
649
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
660
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
665
666 #define T2_SUBS_PC_LR 0xf3de8f00
667
668 #define DATA_OP_SHIFT 21
669
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
672
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
675
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
680
681 /* Codes to distinguish the arithmetic instructions. */
682 #define OPCODE_AND 0
683 #define OPCODE_EOR 1
684 #define OPCODE_SUB 2
685 #define OPCODE_RSB 3
686 #define OPCODE_ADD 4
687 #define OPCODE_ADC 5
688 #define OPCODE_SBC 6
689 #define OPCODE_RSC 7
690 #define OPCODE_TST 8
691 #define OPCODE_TEQ 9
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
698
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
709
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
715
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
727
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
735
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
741
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
757
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
760
761 #define T_OPCODE_BRANCH 0xe000
762
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
767
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define BAD_FP16 _("selected processor does not support fp16 instruction")
789 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
790
791 static struct hash_control * arm_ops_hsh;
792 static struct hash_control * arm_cond_hsh;
793 static struct hash_control * arm_shift_hsh;
794 static struct hash_control * arm_psr_hsh;
795 static struct hash_control * arm_v7m_psr_hsh;
796 static struct hash_control * arm_reg_hsh;
797 static struct hash_control * arm_reloc_hsh;
798 static struct hash_control * arm_barrier_opt_hsh;
799
800 /* Stuff needed to resolve the label ambiguity
801 As:
802 ...
803 label: <insn>
804 may differ from:
805 ...
806 label:
807 <insn> */
808
809 symbolS * last_label_seen;
810 static int label_is_thumb_function_name = FALSE;
811
812 /* Literal pool structure. Held on a per-section
813 and per-sub-section basis. */
814
815 #define MAX_LITERAL_POOL_SIZE 1024
816 typedef struct literal_pool
817 {
818 expressionS literals [MAX_LITERAL_POOL_SIZE];
819 unsigned int next_free_entry;
820 unsigned int id;
821 symbolS * symbol;
822 segT section;
823 subsegT sub_section;
824 #ifdef OBJ_ELF
825 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
826 #endif
827 struct literal_pool * next;
828 unsigned int alignment;
829 } literal_pool;
830
831 /* Pointer to a linked list of literal pools. */
832 literal_pool * list_of_pools = NULL;
833
834 typedef enum asmfunc_states
835 {
836 OUTSIDE_ASMFUNC,
837 WAITING_ASMFUNC_NAME,
838 WAITING_ENDASMFUNC
839 } asmfunc_states;
840
841 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
842
843 #ifdef OBJ_ELF
844 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
845 #else
846 static struct current_it now_it;
847 #endif
848
849 static inline int
850 now_it_compatible (int cond)
851 {
852 return (cond & ~1) == (now_it.cc & ~1);
853 }
854
855 static inline int
856 conditional_insn (void)
857 {
858 return inst.cond != COND_ALWAYS;
859 }
860
861 static int in_it_block (void);
862
863 static int handle_it_state (void);
864
865 static void force_automatic_it_block_close (void);
866
867 static void it_fsm_post_encode (void);
868
869 #define set_it_insn_type(type) \
870 do \
871 { \
872 inst.it_insn_type = type; \
873 if (handle_it_state () == FAIL) \
874 return; \
875 } \
876 while (0)
877
878 #define set_it_insn_type_nonvoid(type, failret) \
879 do \
880 { \
881 inst.it_insn_type = type; \
882 if (handle_it_state () == FAIL) \
883 return failret; \
884 } \
885 while(0)
886
887 #define set_it_insn_type_last() \
888 do \
889 { \
890 if (inst.cond == COND_ALWAYS) \
891 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
892 else \
893 set_it_insn_type (INSIDE_IT_LAST_INSN); \
894 } \
895 while (0)
896
897 /* Pure syntax. */
898
899 /* This array holds the chars that always start a comment. If the
900 pre-processor is disabled, these aren't very useful. */
901 char arm_comment_chars[] = "@";
902
903 /* This array holds the chars that only start a comment at the beginning of
904 a line. If the line seems to have the form '# 123 filename'
905 .line and .file directives will appear in the pre-processed output. */
906 /* Note that input_file.c hand checks for '#' at the beginning of the
907 first line of the input file. This is because the compiler outputs
908 #NO_APP at the beginning of its output. */
909 /* Also note that comments like this one will always work. */
910 const char line_comment_chars[] = "#";
911
912 char arm_line_separator_chars[] = ";";
913
914 /* Chars that can be used to separate mant
915 from exp in floating point numbers. */
916 const char EXP_CHARS[] = "eE";
917
918 /* Chars that mean this number is a floating point constant. */
919 /* As in 0f12.456 */
920 /* or 0d1.2345e12 */
921
922 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
923
924 /* Prefix characters that indicate the start of an immediate
925 value. */
926 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
927
928 /* Separator character handling. */
929
930 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
931
932 static inline int
933 skip_past_char (char ** str, char c)
934 {
935 /* PR gas/14987: Allow for whitespace before the expected character. */
936 skip_whitespace (*str);
937
938 if (**str == c)
939 {
940 (*str)++;
941 return SUCCESS;
942 }
943 else
944 return FAIL;
945 }
946
947 #define skip_past_comma(str) skip_past_char (str, ',')
948
949 /* Arithmetic expressions (possibly involving symbols). */
950
951 /* Return TRUE if anything in the expression is a bignum. */
952
953 static int
954 walk_no_bignums (symbolS * sp)
955 {
956 if (symbol_get_value_expression (sp)->X_op == O_big)
957 return 1;
958
959 if (symbol_get_value_expression (sp)->X_add_symbol)
960 {
961 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
962 || (symbol_get_value_expression (sp)->X_op_symbol
963 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
964 }
965
966 return 0;
967 }
968
969 static int in_my_get_expression = 0;
970
971 /* Third argument to my_get_expression. */
972 #define GE_NO_PREFIX 0
973 #define GE_IMM_PREFIX 1
974 #define GE_OPT_PREFIX 2
975 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
976 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
977 #define GE_OPT_PREFIX_BIG 3
978
979 static int
980 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
981 {
982 char * save_in;
983 segT seg;
984
985 /* In unified syntax, all prefixes are optional. */
986 if (unified_syntax)
987 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
988 : GE_OPT_PREFIX;
989
990 switch (prefix_mode)
991 {
992 case GE_NO_PREFIX: break;
993 case GE_IMM_PREFIX:
994 if (!is_immediate_prefix (**str))
995 {
996 inst.error = _("immediate expression requires a # prefix");
997 return FAIL;
998 }
999 (*str)++;
1000 break;
1001 case GE_OPT_PREFIX:
1002 case GE_OPT_PREFIX_BIG:
1003 if (is_immediate_prefix (**str))
1004 (*str)++;
1005 break;
1006 default: abort ();
1007 }
1008
1009 memset (ep, 0, sizeof (expressionS));
1010
1011 save_in = input_line_pointer;
1012 input_line_pointer = *str;
1013 in_my_get_expression = 1;
1014 seg = expression (ep);
1015 in_my_get_expression = 0;
1016
1017 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1018 {
1019 /* We found a bad or missing expression in md_operand(). */
1020 *str = input_line_pointer;
1021 input_line_pointer = save_in;
1022 if (inst.error == NULL)
1023 inst.error = (ep->X_op == O_absent
1024 ? _("missing expression") :_("bad expression"));
1025 return 1;
1026 }
1027
1028 #ifdef OBJ_AOUT
1029 if (seg != absolute_section
1030 && seg != text_section
1031 && seg != data_section
1032 && seg != bss_section
1033 && seg != undefined_section)
1034 {
1035 inst.error = _("bad segment");
1036 *str = input_line_pointer;
1037 input_line_pointer = save_in;
1038 return 1;
1039 }
1040 #else
1041 (void) seg;
1042 #endif
1043
1044 /* Get rid of any bignums now, so that we don't generate an error for which
1045 we can't establish a line number later on. Big numbers are never valid
1046 in instructions, which is where this routine is always called. */
1047 if (prefix_mode != GE_OPT_PREFIX_BIG
1048 && (ep->X_op == O_big
1049 || (ep->X_add_symbol
1050 && (walk_no_bignums (ep->X_add_symbol)
1051 || (ep->X_op_symbol
1052 && walk_no_bignums (ep->X_op_symbol))))))
1053 {
1054 inst.error = _("invalid constant");
1055 *str = input_line_pointer;
1056 input_line_pointer = save_in;
1057 return 1;
1058 }
1059
1060 *str = input_line_pointer;
1061 input_line_pointer = save_in;
1062 return 0;
1063 }
1064
1065 /* Turn a string in input_line_pointer into a floating point constant
1066 of type TYPE, and store the appropriate bytes in *LITP. The number
1067 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1068 returned, or NULL on OK.
1069
1070 Note that fp constants aren't represent in the normal way on the ARM.
1071 In big endian mode, things are as expected. However, in little endian
1072 mode fp constants are big-endian word-wise, and little-endian byte-wise
1073 within the words. For example, (double) 1.1 in big endian mode is
1074 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1075 the byte sequence 99 99 f1 3f 9a 99 99 99.
1076
1077 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1078
1079 char *
1080 md_atof (int type, char * litP, int * sizeP)
1081 {
1082 int prec;
1083 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1084 char *t;
1085 int i;
1086
1087 switch (type)
1088 {
1089 case 'f':
1090 case 'F':
1091 case 's':
1092 case 'S':
1093 prec = 2;
1094 break;
1095
1096 case 'd':
1097 case 'D':
1098 case 'r':
1099 case 'R':
1100 prec = 4;
1101 break;
1102
1103 case 'x':
1104 case 'X':
1105 prec = 5;
1106 break;
1107
1108 case 'p':
1109 case 'P':
1110 prec = 5;
1111 break;
1112
1113 default:
1114 *sizeP = 0;
1115 return _("Unrecognized or unsupported floating point constant");
1116 }
1117
1118 t = atof_ieee (input_line_pointer, type, words);
1119 if (t)
1120 input_line_pointer = t;
1121 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1122
1123 if (target_big_endian)
1124 {
1125 for (i = 0; i < prec; i++)
1126 {
1127 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1128 litP += sizeof (LITTLENUM_TYPE);
1129 }
1130 }
1131 else
1132 {
1133 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1134 for (i = prec - 1; i >= 0; i--)
1135 {
1136 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1137 litP += sizeof (LITTLENUM_TYPE);
1138 }
1139 else
1140 /* For a 4 byte float the order of elements in `words' is 1 0.
1141 For an 8 byte float the order is 1 0 3 2. */
1142 for (i = 0; i < prec; i += 2)
1143 {
1144 md_number_to_chars (litP, (valueT) words[i + 1],
1145 sizeof (LITTLENUM_TYPE));
1146 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1147 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1148 litP += 2 * sizeof (LITTLENUM_TYPE);
1149 }
1150 }
1151
1152 return NULL;
1153 }
1154
1155 /* We handle all bad expressions here, so that we can report the faulty
1156 instruction in the error message. */
1157 void
1158 md_operand (expressionS * exp)
1159 {
1160 if (in_my_get_expression)
1161 exp->X_op = O_illegal;
1162 }
1163
1164 /* Immediate values. */
1165
1166 /* Generic immediate-value read function for use in directives.
1167 Accepts anything that 'expression' can fold to a constant.
1168 *val receives the number. */
1169 #ifdef OBJ_ELF
1170 static int
1171 immediate_for_directive (int *val)
1172 {
1173 expressionS exp;
1174 exp.X_op = O_illegal;
1175
1176 if (is_immediate_prefix (*input_line_pointer))
1177 {
1178 input_line_pointer++;
1179 expression (&exp);
1180 }
1181
1182 if (exp.X_op != O_constant)
1183 {
1184 as_bad (_("expected #constant"));
1185 ignore_rest_of_line ();
1186 return FAIL;
1187 }
1188 *val = exp.X_add_number;
1189 return SUCCESS;
1190 }
1191 #endif
1192
1193 /* Register parsing. */
1194
1195 /* Generic register parser. CCP points to what should be the
1196 beginning of a register name. If it is indeed a valid register
1197 name, advance CCP over it and return the reg_entry structure;
1198 otherwise return NULL. Does not issue diagnostics. */
1199
1200 static struct reg_entry *
1201 arm_reg_parse_multi (char **ccp)
1202 {
1203 char *start = *ccp;
1204 char *p;
1205 struct reg_entry *reg;
1206
1207 skip_whitespace (start);
1208
1209 #ifdef REGISTER_PREFIX
1210 if (*start != REGISTER_PREFIX)
1211 return NULL;
1212 start++;
1213 #endif
1214 #ifdef OPTIONAL_REGISTER_PREFIX
1215 if (*start == OPTIONAL_REGISTER_PREFIX)
1216 start++;
1217 #endif
1218
1219 p = start;
1220 if (!ISALPHA (*p) || !is_name_beginner (*p))
1221 return NULL;
1222
1223 do
1224 p++;
1225 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1226
1227 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1228
1229 if (!reg)
1230 return NULL;
1231
1232 *ccp = p;
1233 return reg;
1234 }
1235
1236 static int
1237 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1238 enum arm_reg_type type)
1239 {
1240 /* Alternative syntaxes are accepted for a few register classes. */
1241 switch (type)
1242 {
1243 case REG_TYPE_MVF:
1244 case REG_TYPE_MVD:
1245 case REG_TYPE_MVFX:
1246 case REG_TYPE_MVDX:
1247 /* Generic coprocessor register names are allowed for these. */
1248 if (reg && reg->type == REG_TYPE_CN)
1249 return reg->number;
1250 break;
1251
1252 case REG_TYPE_CP:
1253 /* For backward compatibility, a bare number is valid here. */
1254 {
1255 unsigned long processor = strtoul (start, ccp, 10);
1256 if (*ccp != start && processor <= 15)
1257 return processor;
1258 }
1259
1260 case REG_TYPE_MMXWC:
1261 /* WC includes WCG. ??? I'm not sure this is true for all
1262 instructions that take WC registers. */
1263 if (reg && reg->type == REG_TYPE_MMXWCG)
1264 return reg->number;
1265 break;
1266
1267 default:
1268 break;
1269 }
1270
1271 return FAIL;
1272 }
1273
1274 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1275 return value is the register number or FAIL. */
1276
1277 static int
1278 arm_reg_parse (char **ccp, enum arm_reg_type type)
1279 {
1280 char *start = *ccp;
1281 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1282 int ret;
1283
1284 /* Do not allow a scalar (reg+index) to parse as a register. */
1285 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1286 return FAIL;
1287
1288 if (reg && reg->type == type)
1289 return reg->number;
1290
1291 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1292 return ret;
1293
1294 *ccp = start;
1295 return FAIL;
1296 }
1297
1298 /* Parse a Neon type specifier. *STR should point at the leading '.'
1299 character. Does no verification at this stage that the type fits the opcode
1300 properly. E.g.,
1301
1302 .i32.i32.s16
1303 .s32.f32
1304 .u16
1305
1306 Can all be legally parsed by this function.
1307
1308 Fills in neon_type struct pointer with parsed information, and updates STR
1309 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1310 type, FAIL if not. */
1311
1312 static int
1313 parse_neon_type (struct neon_type *type, char **str)
1314 {
1315 char *ptr = *str;
1316
1317 if (type)
1318 type->elems = 0;
1319
1320 while (type->elems < NEON_MAX_TYPE_ELS)
1321 {
1322 enum neon_el_type thistype = NT_untyped;
1323 unsigned thissize = -1u;
1324
1325 if (*ptr != '.')
1326 break;
1327
1328 ptr++;
1329
1330 /* Just a size without an explicit type. */
1331 if (ISDIGIT (*ptr))
1332 goto parsesize;
1333
1334 switch (TOLOWER (*ptr))
1335 {
1336 case 'i': thistype = NT_integer; break;
1337 case 'f': thistype = NT_float; break;
1338 case 'p': thistype = NT_poly; break;
1339 case 's': thistype = NT_signed; break;
1340 case 'u': thistype = NT_unsigned; break;
1341 case 'd':
1342 thistype = NT_float;
1343 thissize = 64;
1344 ptr++;
1345 goto done;
1346 default:
1347 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1348 return FAIL;
1349 }
1350
1351 ptr++;
1352
1353 /* .f is an abbreviation for .f32. */
1354 if (thistype == NT_float && !ISDIGIT (*ptr))
1355 thissize = 32;
1356 else
1357 {
1358 parsesize:
1359 thissize = strtoul (ptr, &ptr, 10);
1360
1361 if (thissize != 8 && thissize != 16 && thissize != 32
1362 && thissize != 64)
1363 {
1364 as_bad (_("bad size %d in type specifier"), thissize);
1365 return FAIL;
1366 }
1367 }
1368
1369 done:
1370 if (type)
1371 {
1372 type->el[type->elems].type = thistype;
1373 type->el[type->elems].size = thissize;
1374 type->elems++;
1375 }
1376 }
1377
1378 /* Empty/missing type is not a successful parse. */
1379 if (type->elems == 0)
1380 return FAIL;
1381
1382 *str = ptr;
1383
1384 return SUCCESS;
1385 }
1386
1387 /* Errors may be set multiple times during parsing or bit encoding
1388 (particularly in the Neon bits), but usually the earliest error which is set
1389 will be the most meaningful. Avoid overwriting it with later (cascading)
1390 errors by calling this function. */
1391
1392 static void
1393 first_error (const char *err)
1394 {
1395 if (!inst.error)
1396 inst.error = err;
1397 }
1398
1399 /* Parse a single type, e.g. ".s32", leading period included. */
1400 static int
1401 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1402 {
1403 char *str = *ccp;
1404 struct neon_type optype;
1405
1406 if (*str == '.')
1407 {
1408 if (parse_neon_type (&optype, &str) == SUCCESS)
1409 {
1410 if (optype.elems == 1)
1411 *vectype = optype.el[0];
1412 else
1413 {
1414 first_error (_("only one type should be specified for operand"));
1415 return FAIL;
1416 }
1417 }
1418 else
1419 {
1420 first_error (_("vector type expected"));
1421 return FAIL;
1422 }
1423 }
1424 else
1425 return FAIL;
1426
1427 *ccp = str;
1428
1429 return SUCCESS;
1430 }
1431
1432 /* Special meanings for indices (which have a range of 0-7), which will fit into
1433 a 4-bit integer. */
1434
1435 #define NEON_ALL_LANES 15
1436 #define NEON_INTERLEAVE_LANES 14
1437
1438 /* Parse either a register or a scalar, with an optional type. Return the
1439 register number, and optionally fill in the actual type of the register
1440 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1441 type/index information in *TYPEINFO. */
1442
1443 static int
1444 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1445 enum arm_reg_type *rtype,
1446 struct neon_typed_alias *typeinfo)
1447 {
1448 char *str = *ccp;
1449 struct reg_entry *reg = arm_reg_parse_multi (&str);
1450 struct neon_typed_alias atype;
1451 struct neon_type_el parsetype;
1452
1453 atype.defined = 0;
1454 atype.index = -1;
1455 atype.eltype.type = NT_invtype;
1456 atype.eltype.size = -1;
1457
1458 /* Try alternate syntax for some types of register. Note these are mutually
1459 exclusive with the Neon syntax extensions. */
1460 if (reg == NULL)
1461 {
1462 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1463 if (altreg != FAIL)
1464 *ccp = str;
1465 if (typeinfo)
1466 *typeinfo = atype;
1467 return altreg;
1468 }
1469
1470 /* Undo polymorphism when a set of register types may be accepted. */
1471 if ((type == REG_TYPE_NDQ
1472 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1473 || (type == REG_TYPE_VFSD
1474 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1475 || (type == REG_TYPE_NSDQ
1476 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1477 || reg->type == REG_TYPE_NQ))
1478 || (type == REG_TYPE_MMXWC
1479 && (reg->type == REG_TYPE_MMXWCG)))
1480 type = (enum arm_reg_type) reg->type;
1481
1482 if (type != reg->type)
1483 return FAIL;
1484
1485 if (reg->neon)
1486 atype = *reg->neon;
1487
1488 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1489 {
1490 if ((atype.defined & NTA_HASTYPE) != 0)
1491 {
1492 first_error (_("can't redefine type for operand"));
1493 return FAIL;
1494 }
1495 atype.defined |= NTA_HASTYPE;
1496 atype.eltype = parsetype;
1497 }
1498
1499 if (skip_past_char (&str, '[') == SUCCESS)
1500 {
1501 if (type != REG_TYPE_VFD)
1502 {
1503 first_error (_("only D registers may be indexed"));
1504 return FAIL;
1505 }
1506
1507 if ((atype.defined & NTA_HASINDEX) != 0)
1508 {
1509 first_error (_("can't change index for operand"));
1510 return FAIL;
1511 }
1512
1513 atype.defined |= NTA_HASINDEX;
1514
1515 if (skip_past_char (&str, ']') == SUCCESS)
1516 atype.index = NEON_ALL_LANES;
1517 else
1518 {
1519 expressionS exp;
1520
1521 my_get_expression (&exp, &str, GE_NO_PREFIX);
1522
1523 if (exp.X_op != O_constant)
1524 {
1525 first_error (_("constant expression required"));
1526 return FAIL;
1527 }
1528
1529 if (skip_past_char (&str, ']') == FAIL)
1530 return FAIL;
1531
1532 atype.index = exp.X_add_number;
1533 }
1534 }
1535
1536 if (typeinfo)
1537 *typeinfo = atype;
1538
1539 if (rtype)
1540 *rtype = type;
1541
1542 *ccp = str;
1543
1544 return reg->number;
1545 }
1546
1547 /* Like arm_reg_parse, but allow allow the following extra features:
1548 - If RTYPE is non-zero, return the (possibly restricted) type of the
1549 register (e.g. Neon double or quad reg when either has been requested).
1550 - If this is a Neon vector type with additional type information, fill
1551 in the struct pointed to by VECTYPE (if non-NULL).
1552 This function will fault on encountering a scalar. */
1553
1554 static int
1555 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1556 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1557 {
1558 struct neon_typed_alias atype;
1559 char *str = *ccp;
1560 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1561
1562 if (reg == FAIL)
1563 return FAIL;
1564
1565 /* Do not allow regname(... to parse as a register. */
1566 if (*str == '(')
1567 return FAIL;
1568
1569 /* Do not allow a scalar (reg+index) to parse as a register. */
1570 if ((atype.defined & NTA_HASINDEX) != 0)
1571 {
1572 first_error (_("register operand expected, but got scalar"));
1573 return FAIL;
1574 }
1575
1576 if (vectype)
1577 *vectype = atype.eltype;
1578
1579 *ccp = str;
1580
1581 return reg;
1582 }
1583
1584 #define NEON_SCALAR_REG(X) ((X) >> 4)
1585 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1586
1587 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1588 have enough information to be able to do a good job bounds-checking. So, we
1589 just do easy checks here, and do further checks later. */
1590
1591 static int
1592 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1593 {
1594 int reg;
1595 char *str = *ccp;
1596 struct neon_typed_alias atype;
1597
1598 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1599
1600 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1601 return FAIL;
1602
1603 if (atype.index == NEON_ALL_LANES)
1604 {
1605 first_error (_("scalar must have an index"));
1606 return FAIL;
1607 }
1608 else if (atype.index >= 64 / elsize)
1609 {
1610 first_error (_("scalar index out of range"));
1611 return FAIL;
1612 }
1613
1614 if (type)
1615 *type = atype.eltype;
1616
1617 *ccp = str;
1618
1619 return reg * 16 + atype.index;
1620 }
1621
1622 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1623
1624 static long
1625 parse_reg_list (char ** strp)
1626 {
1627 char * str = * strp;
1628 long range = 0;
1629 int another_range;
1630
1631 /* We come back here if we get ranges concatenated by '+' or '|'. */
1632 do
1633 {
1634 skip_whitespace (str);
1635
1636 another_range = 0;
1637
1638 if (*str == '{')
1639 {
1640 int in_range = 0;
1641 int cur_reg = -1;
1642
1643 str++;
1644 do
1645 {
1646 int reg;
1647
1648 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1649 {
1650 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1651 return FAIL;
1652 }
1653
1654 if (in_range)
1655 {
1656 int i;
1657
1658 if (reg <= cur_reg)
1659 {
1660 first_error (_("bad range in register list"));
1661 return FAIL;
1662 }
1663
1664 for (i = cur_reg + 1; i < reg; i++)
1665 {
1666 if (range & (1 << i))
1667 as_tsktsk
1668 (_("Warning: duplicated register (r%d) in register list"),
1669 i);
1670 else
1671 range |= 1 << i;
1672 }
1673 in_range = 0;
1674 }
1675
1676 if (range & (1 << reg))
1677 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1678 reg);
1679 else if (reg <= cur_reg)
1680 as_tsktsk (_("Warning: register range not in ascending order"));
1681
1682 range |= 1 << reg;
1683 cur_reg = reg;
1684 }
1685 while (skip_past_comma (&str) != FAIL
1686 || (in_range = 1, *str++ == '-'));
1687 str--;
1688
1689 if (skip_past_char (&str, '}') == FAIL)
1690 {
1691 first_error (_("missing `}'"));
1692 return FAIL;
1693 }
1694 }
1695 else
1696 {
1697 expressionS exp;
1698
1699 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1700 return FAIL;
1701
1702 if (exp.X_op == O_constant)
1703 {
1704 if (exp.X_add_number
1705 != (exp.X_add_number & 0x0000ffff))
1706 {
1707 inst.error = _("invalid register mask");
1708 return FAIL;
1709 }
1710
1711 if ((range & exp.X_add_number) != 0)
1712 {
1713 int regno = range & exp.X_add_number;
1714
1715 regno &= -regno;
1716 regno = (1 << regno) - 1;
1717 as_tsktsk
1718 (_("Warning: duplicated register (r%d) in register list"),
1719 regno);
1720 }
1721
1722 range |= exp.X_add_number;
1723 }
1724 else
1725 {
1726 if (inst.reloc.type != 0)
1727 {
1728 inst.error = _("expression too complex");
1729 return FAIL;
1730 }
1731
1732 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1733 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1734 inst.reloc.pc_rel = 0;
1735 }
1736 }
1737
1738 if (*str == '|' || *str == '+')
1739 {
1740 str++;
1741 another_range = 1;
1742 }
1743 }
1744 while (another_range);
1745
1746 *strp = str;
1747 return range;
1748 }
1749
1750 /* Types of registers in a list. */
1751
1752 enum reg_list_els
1753 {
1754 REGLIST_VFP_S,
1755 REGLIST_VFP_D,
1756 REGLIST_NEON_D
1757 };
1758
1759 /* Parse a VFP register list. If the string is invalid return FAIL.
1760 Otherwise return the number of registers, and set PBASE to the first
1761 register. Parses registers of type ETYPE.
1762 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1763 - Q registers can be used to specify pairs of D registers
1764 - { } can be omitted from around a singleton register list
1765 FIXME: This is not implemented, as it would require backtracking in
1766 some cases, e.g.:
1767 vtbl.8 d3,d4,d5
1768 This could be done (the meaning isn't really ambiguous), but doesn't
1769 fit in well with the current parsing framework.
1770 - 32 D registers may be used (also true for VFPv3).
1771 FIXME: Types are ignored in these register lists, which is probably a
1772 bug. */
1773
1774 static int
1775 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1776 {
1777 char *str = *ccp;
1778 int base_reg;
1779 int new_base;
1780 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1781 int max_regs = 0;
1782 int count = 0;
1783 int warned = 0;
1784 unsigned long mask = 0;
1785 int i;
1786
1787 if (skip_past_char (&str, '{') == FAIL)
1788 {
1789 inst.error = _("expecting {");
1790 return FAIL;
1791 }
1792
1793 switch (etype)
1794 {
1795 case REGLIST_VFP_S:
1796 regtype = REG_TYPE_VFS;
1797 max_regs = 32;
1798 break;
1799
1800 case REGLIST_VFP_D:
1801 regtype = REG_TYPE_VFD;
1802 break;
1803
1804 case REGLIST_NEON_D:
1805 regtype = REG_TYPE_NDQ;
1806 break;
1807 }
1808
1809 if (etype != REGLIST_VFP_S)
1810 {
1811 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1812 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1813 {
1814 max_regs = 32;
1815 if (thumb_mode)
1816 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1817 fpu_vfp_ext_d32);
1818 else
1819 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1820 fpu_vfp_ext_d32);
1821 }
1822 else
1823 max_regs = 16;
1824 }
1825
1826 base_reg = max_regs;
1827
1828 do
1829 {
1830 int setmask = 1, addregs = 1;
1831
1832 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1833
1834 if (new_base == FAIL)
1835 {
1836 first_error (_(reg_expected_msgs[regtype]));
1837 return FAIL;
1838 }
1839
1840 if (new_base >= max_regs)
1841 {
1842 first_error (_("register out of range in list"));
1843 return FAIL;
1844 }
1845
1846 /* Note: a value of 2 * n is returned for the register Q<n>. */
1847 if (regtype == REG_TYPE_NQ)
1848 {
1849 setmask = 3;
1850 addregs = 2;
1851 }
1852
1853 if (new_base < base_reg)
1854 base_reg = new_base;
1855
1856 if (mask & (setmask << new_base))
1857 {
1858 first_error (_("invalid register list"));
1859 return FAIL;
1860 }
1861
1862 if ((mask >> new_base) != 0 && ! warned)
1863 {
1864 as_tsktsk (_("register list not in ascending order"));
1865 warned = 1;
1866 }
1867
1868 mask |= setmask << new_base;
1869 count += addregs;
1870
1871 if (*str == '-') /* We have the start of a range expression */
1872 {
1873 int high_range;
1874
1875 str++;
1876
1877 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1878 == FAIL)
1879 {
1880 inst.error = gettext (reg_expected_msgs[regtype]);
1881 return FAIL;
1882 }
1883
1884 if (high_range >= max_regs)
1885 {
1886 first_error (_("register out of range in list"));
1887 return FAIL;
1888 }
1889
1890 if (regtype == REG_TYPE_NQ)
1891 high_range = high_range + 1;
1892
1893 if (high_range <= new_base)
1894 {
1895 inst.error = _("register range not in ascending order");
1896 return FAIL;
1897 }
1898
1899 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1900 {
1901 if (mask & (setmask << new_base))
1902 {
1903 inst.error = _("invalid register list");
1904 return FAIL;
1905 }
1906
1907 mask |= setmask << new_base;
1908 count += addregs;
1909 }
1910 }
1911 }
1912 while (skip_past_comma (&str) != FAIL);
1913
1914 str++;
1915
1916 /* Sanity check -- should have raised a parse error above. */
1917 if (count == 0 || count > max_regs)
1918 abort ();
1919
1920 *pbase = base_reg;
1921
1922 /* Final test -- the registers must be consecutive. */
1923 mask >>= base_reg;
1924 for (i = 0; i < count; i++)
1925 {
1926 if ((mask & (1u << i)) == 0)
1927 {
1928 inst.error = _("non-contiguous register range");
1929 return FAIL;
1930 }
1931 }
1932
1933 *ccp = str;
1934
1935 return count;
1936 }
1937
1938 /* True if two alias types are the same. */
1939
1940 static bfd_boolean
1941 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1942 {
1943 if (!a && !b)
1944 return TRUE;
1945
1946 if (!a || !b)
1947 return FALSE;
1948
1949 if (a->defined != b->defined)
1950 return FALSE;
1951
1952 if ((a->defined & NTA_HASTYPE) != 0
1953 && (a->eltype.type != b->eltype.type
1954 || a->eltype.size != b->eltype.size))
1955 return FALSE;
1956
1957 if ((a->defined & NTA_HASINDEX) != 0
1958 && (a->index != b->index))
1959 return FALSE;
1960
1961 return TRUE;
1962 }
1963
1964 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1965 The base register is put in *PBASE.
1966 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1967 the return value.
1968 The register stride (minus one) is put in bit 4 of the return value.
1969 Bits [6:5] encode the list length (minus one).
1970 The type of the list elements is put in *ELTYPE, if non-NULL. */
1971
1972 #define NEON_LANE(X) ((X) & 0xf)
1973 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1974 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1975
1976 static int
1977 parse_neon_el_struct_list (char **str, unsigned *pbase,
1978 struct neon_type_el *eltype)
1979 {
1980 char *ptr = *str;
1981 int base_reg = -1;
1982 int reg_incr = -1;
1983 int count = 0;
1984 int lane = -1;
1985 int leading_brace = 0;
1986 enum arm_reg_type rtype = REG_TYPE_NDQ;
1987 const char *const incr_error = _("register stride must be 1 or 2");
1988 const char *const type_error = _("mismatched element/structure types in list");
1989 struct neon_typed_alias firsttype;
1990
1991 if (skip_past_char (&ptr, '{') == SUCCESS)
1992 leading_brace = 1;
1993
1994 do
1995 {
1996 struct neon_typed_alias atype;
1997 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1998
1999 if (getreg == FAIL)
2000 {
2001 first_error (_(reg_expected_msgs[rtype]));
2002 return FAIL;
2003 }
2004
2005 if (base_reg == -1)
2006 {
2007 base_reg = getreg;
2008 if (rtype == REG_TYPE_NQ)
2009 {
2010 reg_incr = 1;
2011 }
2012 firsttype = atype;
2013 }
2014 else if (reg_incr == -1)
2015 {
2016 reg_incr = getreg - base_reg;
2017 if (reg_incr < 1 || reg_incr > 2)
2018 {
2019 first_error (_(incr_error));
2020 return FAIL;
2021 }
2022 }
2023 else if (getreg != base_reg + reg_incr * count)
2024 {
2025 first_error (_(incr_error));
2026 return FAIL;
2027 }
2028
2029 if (! neon_alias_types_same (&atype, &firsttype))
2030 {
2031 first_error (_(type_error));
2032 return FAIL;
2033 }
2034
2035 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2036 modes. */
2037 if (ptr[0] == '-')
2038 {
2039 struct neon_typed_alias htype;
2040 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2041 if (lane == -1)
2042 lane = NEON_INTERLEAVE_LANES;
2043 else if (lane != NEON_INTERLEAVE_LANES)
2044 {
2045 first_error (_(type_error));
2046 return FAIL;
2047 }
2048 if (reg_incr == -1)
2049 reg_incr = 1;
2050 else if (reg_incr != 1)
2051 {
2052 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2053 return FAIL;
2054 }
2055 ptr++;
2056 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2057 if (hireg == FAIL)
2058 {
2059 first_error (_(reg_expected_msgs[rtype]));
2060 return FAIL;
2061 }
2062 if (! neon_alias_types_same (&htype, &firsttype))
2063 {
2064 first_error (_(type_error));
2065 return FAIL;
2066 }
2067 count += hireg + dregs - getreg;
2068 continue;
2069 }
2070
2071 /* If we're using Q registers, we can't use [] or [n] syntax. */
2072 if (rtype == REG_TYPE_NQ)
2073 {
2074 count += 2;
2075 continue;
2076 }
2077
2078 if ((atype.defined & NTA_HASINDEX) != 0)
2079 {
2080 if (lane == -1)
2081 lane = atype.index;
2082 else if (lane != atype.index)
2083 {
2084 first_error (_(type_error));
2085 return FAIL;
2086 }
2087 }
2088 else if (lane == -1)
2089 lane = NEON_INTERLEAVE_LANES;
2090 else if (lane != NEON_INTERLEAVE_LANES)
2091 {
2092 first_error (_(type_error));
2093 return FAIL;
2094 }
2095 count++;
2096 }
2097 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2098
2099 /* No lane set by [x]. We must be interleaving structures. */
2100 if (lane == -1)
2101 lane = NEON_INTERLEAVE_LANES;
2102
2103 /* Sanity check. */
2104 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2105 || (count > 1 && reg_incr == -1))
2106 {
2107 first_error (_("error parsing element/structure list"));
2108 return FAIL;
2109 }
2110
2111 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2112 {
2113 first_error (_("expected }"));
2114 return FAIL;
2115 }
2116
2117 if (reg_incr == -1)
2118 reg_incr = 1;
2119
2120 if (eltype)
2121 *eltype = firsttype.eltype;
2122
2123 *pbase = base_reg;
2124 *str = ptr;
2125
2126 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2127 }
2128
2129 /* Parse an explicit relocation suffix on an expression. This is
2130 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2131 arm_reloc_hsh contains no entries, so this function can only
2132 succeed if there is no () after the word. Returns -1 on error,
2133 BFD_RELOC_UNUSED if there wasn't any suffix. */
2134
2135 static int
2136 parse_reloc (char **str)
2137 {
2138 struct reloc_entry *r;
2139 char *p, *q;
2140
2141 if (**str != '(')
2142 return BFD_RELOC_UNUSED;
2143
2144 p = *str + 1;
2145 q = p;
2146
2147 while (*q && *q != ')' && *q != ',')
2148 q++;
2149 if (*q != ')')
2150 return -1;
2151
2152 if ((r = (struct reloc_entry *)
2153 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2154 return -1;
2155
2156 *str = q + 1;
2157 return r->reloc;
2158 }
2159
2160 /* Directives: register aliases. */
2161
2162 static struct reg_entry *
2163 insert_reg_alias (char *str, unsigned number, int type)
2164 {
2165 struct reg_entry *new_reg;
2166 const char *name;
2167
2168 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2169 {
2170 if (new_reg->builtin)
2171 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2172
2173 /* Only warn about a redefinition if it's not defined as the
2174 same register. */
2175 else if (new_reg->number != number || new_reg->type != type)
2176 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2177
2178 return NULL;
2179 }
2180
2181 name = xstrdup (str);
2182 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2183
2184 new_reg->name = name;
2185 new_reg->number = number;
2186 new_reg->type = type;
2187 new_reg->builtin = FALSE;
2188 new_reg->neon = NULL;
2189
2190 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2191 abort ();
2192
2193 return new_reg;
2194 }
2195
2196 static void
2197 insert_neon_reg_alias (char *str, int number, int type,
2198 struct neon_typed_alias *atype)
2199 {
2200 struct reg_entry *reg = insert_reg_alias (str, number, type);
2201
2202 if (!reg)
2203 {
2204 first_error (_("attempt to redefine typed alias"));
2205 return;
2206 }
2207
2208 if (atype)
2209 {
2210 reg->neon = (struct neon_typed_alias *)
2211 xmalloc (sizeof (struct neon_typed_alias));
2212 *reg->neon = *atype;
2213 }
2214 }
2215
2216 /* Look for the .req directive. This is of the form:
2217
2218 new_register_name .req existing_register_name
2219
2220 If we find one, or if it looks sufficiently like one that we want to
2221 handle any error here, return TRUE. Otherwise return FALSE. */
2222
2223 static bfd_boolean
2224 create_register_alias (char * newname, char *p)
2225 {
2226 struct reg_entry *old;
2227 char *oldname, *nbuf;
2228 size_t nlen;
2229
2230 /* The input scrubber ensures that whitespace after the mnemonic is
2231 collapsed to single spaces. */
2232 oldname = p;
2233 if (strncmp (oldname, " .req ", 6) != 0)
2234 return FALSE;
2235
2236 oldname += 6;
2237 if (*oldname == '\0')
2238 return FALSE;
2239
2240 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2241 if (!old)
2242 {
2243 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2244 return TRUE;
2245 }
2246
2247 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2248 the desired alias name, and p points to its end. If not, then
2249 the desired alias name is in the global original_case_string. */
2250 #ifdef TC_CASE_SENSITIVE
2251 nlen = p - newname;
2252 #else
2253 newname = original_case_string;
2254 nlen = strlen (newname);
2255 #endif
2256
2257 nbuf = (char *) alloca (nlen + 1);
2258 memcpy (nbuf, newname, nlen);
2259 nbuf[nlen] = '\0';
2260
2261 /* Create aliases under the new name as stated; an all-lowercase
2262 version of the new name; and an all-uppercase version of the new
2263 name. */
2264 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2265 {
2266 for (p = nbuf; *p; p++)
2267 *p = TOUPPER (*p);
2268
2269 if (strncmp (nbuf, newname, nlen))
2270 {
2271 /* If this attempt to create an additional alias fails, do not bother
2272 trying to create the all-lower case alias. We will fail and issue
2273 a second, duplicate error message. This situation arises when the
2274 programmer does something like:
2275 foo .req r0
2276 Foo .req r1
2277 The second .req creates the "Foo" alias but then fails to create
2278 the artificial FOO alias because it has already been created by the
2279 first .req. */
2280 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2281 return TRUE;
2282 }
2283
2284 for (p = nbuf; *p; p++)
2285 *p = TOLOWER (*p);
2286
2287 if (strncmp (nbuf, newname, nlen))
2288 insert_reg_alias (nbuf, old->number, old->type);
2289 }
2290
2291 return TRUE;
2292 }
2293
2294 /* Create a Neon typed/indexed register alias using directives, e.g.:
2295 X .dn d5.s32[1]
2296 Y .qn 6.s16
2297 Z .dn d7
2298 T .dn Z[0]
2299 These typed registers can be used instead of the types specified after the
2300 Neon mnemonic, so long as all operands given have types. Types can also be
2301 specified directly, e.g.:
2302 vadd d0.s32, d1.s32, d2.s32 */
2303
2304 static bfd_boolean
2305 create_neon_reg_alias (char *newname, char *p)
2306 {
2307 enum arm_reg_type basetype;
2308 struct reg_entry *basereg;
2309 struct reg_entry mybasereg;
2310 struct neon_type ntype;
2311 struct neon_typed_alias typeinfo;
2312 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2313 int namelen;
2314
2315 typeinfo.defined = 0;
2316 typeinfo.eltype.type = NT_invtype;
2317 typeinfo.eltype.size = -1;
2318 typeinfo.index = -1;
2319
2320 nameend = p;
2321
2322 if (strncmp (p, " .dn ", 5) == 0)
2323 basetype = REG_TYPE_VFD;
2324 else if (strncmp (p, " .qn ", 5) == 0)
2325 basetype = REG_TYPE_NQ;
2326 else
2327 return FALSE;
2328
2329 p += 5;
2330
2331 if (*p == '\0')
2332 return FALSE;
2333
2334 basereg = arm_reg_parse_multi (&p);
2335
2336 if (basereg && basereg->type != basetype)
2337 {
2338 as_bad (_("bad type for register"));
2339 return FALSE;
2340 }
2341
2342 if (basereg == NULL)
2343 {
2344 expressionS exp;
2345 /* Try parsing as an integer. */
2346 my_get_expression (&exp, &p, GE_NO_PREFIX);
2347 if (exp.X_op != O_constant)
2348 {
2349 as_bad (_("expression must be constant"));
2350 return FALSE;
2351 }
2352 basereg = &mybasereg;
2353 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2354 : exp.X_add_number;
2355 basereg->neon = 0;
2356 }
2357
2358 if (basereg->neon)
2359 typeinfo = *basereg->neon;
2360
2361 if (parse_neon_type (&ntype, &p) == SUCCESS)
2362 {
2363 /* We got a type. */
2364 if (typeinfo.defined & NTA_HASTYPE)
2365 {
2366 as_bad (_("can't redefine the type of a register alias"));
2367 return FALSE;
2368 }
2369
2370 typeinfo.defined |= NTA_HASTYPE;
2371 if (ntype.elems != 1)
2372 {
2373 as_bad (_("you must specify a single type only"));
2374 return FALSE;
2375 }
2376 typeinfo.eltype = ntype.el[0];
2377 }
2378
2379 if (skip_past_char (&p, '[') == SUCCESS)
2380 {
2381 expressionS exp;
2382 /* We got a scalar index. */
2383
2384 if (typeinfo.defined & NTA_HASINDEX)
2385 {
2386 as_bad (_("can't redefine the index of a scalar alias"));
2387 return FALSE;
2388 }
2389
2390 my_get_expression (&exp, &p, GE_NO_PREFIX);
2391
2392 if (exp.X_op != O_constant)
2393 {
2394 as_bad (_("scalar index must be constant"));
2395 return FALSE;
2396 }
2397
2398 typeinfo.defined |= NTA_HASINDEX;
2399 typeinfo.index = exp.X_add_number;
2400
2401 if (skip_past_char (&p, ']') == FAIL)
2402 {
2403 as_bad (_("expecting ]"));
2404 return FALSE;
2405 }
2406 }
2407
2408 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2409 the desired alias name, and p points to its end. If not, then
2410 the desired alias name is in the global original_case_string. */
2411 #ifdef TC_CASE_SENSITIVE
2412 namelen = nameend - newname;
2413 #else
2414 newname = original_case_string;
2415 namelen = strlen (newname);
2416 #endif
2417
2418 namebuf = (char *) alloca (namelen + 1);
2419 strncpy (namebuf, newname, namelen);
2420 namebuf[namelen] = '\0';
2421
2422 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2423 typeinfo.defined != 0 ? &typeinfo : NULL);
2424
2425 /* Insert name in all uppercase. */
2426 for (p = namebuf; *p; p++)
2427 *p = TOUPPER (*p);
2428
2429 if (strncmp (namebuf, newname, namelen))
2430 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2431 typeinfo.defined != 0 ? &typeinfo : NULL);
2432
2433 /* Insert name in all lowercase. */
2434 for (p = namebuf; *p; p++)
2435 *p = TOLOWER (*p);
2436
2437 if (strncmp (namebuf, newname, namelen))
2438 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2439 typeinfo.defined != 0 ? &typeinfo : NULL);
2440
2441 return TRUE;
2442 }
2443
2444 /* Should never be called, as .req goes between the alias and the
2445 register name, not at the beginning of the line. */
2446
2447 static void
2448 s_req (int a ATTRIBUTE_UNUSED)
2449 {
2450 as_bad (_("invalid syntax for .req directive"));
2451 }
2452
2453 static void
2454 s_dn (int a ATTRIBUTE_UNUSED)
2455 {
2456 as_bad (_("invalid syntax for .dn directive"));
2457 }
2458
2459 static void
2460 s_qn (int a ATTRIBUTE_UNUSED)
2461 {
2462 as_bad (_("invalid syntax for .qn directive"));
2463 }
2464
2465 /* The .unreq directive deletes an alias which was previously defined
2466 by .req. For example:
2467
2468 my_alias .req r11
2469 .unreq my_alias */
2470
2471 static void
2472 s_unreq (int a ATTRIBUTE_UNUSED)
2473 {
2474 char * name;
2475 char saved_char;
2476
2477 name = input_line_pointer;
2478
2479 while (*input_line_pointer != 0
2480 && *input_line_pointer != ' '
2481 && *input_line_pointer != '\n')
2482 ++input_line_pointer;
2483
2484 saved_char = *input_line_pointer;
2485 *input_line_pointer = 0;
2486
2487 if (!*name)
2488 as_bad (_("invalid syntax for .unreq directive"));
2489 else
2490 {
2491 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2492 name);
2493
2494 if (!reg)
2495 as_bad (_("unknown register alias '%s'"), name);
2496 else if (reg->builtin)
2497 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2498 name);
2499 else
2500 {
2501 char * p;
2502 char * nbuf;
2503
2504 hash_delete (arm_reg_hsh, name, FALSE);
2505 free ((char *) reg->name);
2506 if (reg->neon)
2507 free (reg->neon);
2508 free (reg);
2509
2510 /* Also locate the all upper case and all lower case versions.
2511 Do not complain if we cannot find one or the other as it
2512 was probably deleted above. */
2513
2514 nbuf = strdup (name);
2515 for (p = nbuf; *p; p++)
2516 *p = TOUPPER (*p);
2517 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2518 if (reg)
2519 {
2520 hash_delete (arm_reg_hsh, nbuf, FALSE);
2521 free ((char *) reg->name);
2522 if (reg->neon)
2523 free (reg->neon);
2524 free (reg);
2525 }
2526
2527 for (p = nbuf; *p; p++)
2528 *p = TOLOWER (*p);
2529 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2530 if (reg)
2531 {
2532 hash_delete (arm_reg_hsh, nbuf, FALSE);
2533 free ((char *) reg->name);
2534 if (reg->neon)
2535 free (reg->neon);
2536 free (reg);
2537 }
2538
2539 free (nbuf);
2540 }
2541 }
2542
2543 *input_line_pointer = saved_char;
2544 demand_empty_rest_of_line ();
2545 }
2546
2547 /* Directives: Instruction set selection. */
2548
2549 #ifdef OBJ_ELF
2550 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2551 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2552 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2553 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2554
2555 /* Create a new mapping symbol for the transition to STATE. */
2556
2557 static void
2558 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2559 {
2560 symbolS * symbolP;
2561 const char * symname;
2562 int type;
2563
2564 switch (state)
2565 {
2566 case MAP_DATA:
2567 symname = "$d";
2568 type = BSF_NO_FLAGS;
2569 break;
2570 case MAP_ARM:
2571 symname = "$a";
2572 type = BSF_NO_FLAGS;
2573 break;
2574 case MAP_THUMB:
2575 symname = "$t";
2576 type = BSF_NO_FLAGS;
2577 break;
2578 default:
2579 abort ();
2580 }
2581
2582 symbolP = symbol_new (symname, now_seg, value, frag);
2583 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2584
2585 switch (state)
2586 {
2587 case MAP_ARM:
2588 THUMB_SET_FUNC (symbolP, 0);
2589 ARM_SET_THUMB (symbolP, 0);
2590 ARM_SET_INTERWORK (symbolP, support_interwork);
2591 break;
2592
2593 case MAP_THUMB:
2594 THUMB_SET_FUNC (symbolP, 1);
2595 ARM_SET_THUMB (symbolP, 1);
2596 ARM_SET_INTERWORK (symbolP, support_interwork);
2597 break;
2598
2599 case MAP_DATA:
2600 default:
2601 break;
2602 }
2603
2604 /* Save the mapping symbols for future reference. Also check that
2605 we do not place two mapping symbols at the same offset within a
2606 frag. We'll handle overlap between frags in
2607 check_mapping_symbols.
2608
2609 If .fill or other data filling directive generates zero sized data,
2610 the mapping symbol for the following code will have the same value
2611 as the one generated for the data filling directive. In this case,
2612 we replace the old symbol with the new one at the same address. */
2613 if (value == 0)
2614 {
2615 if (frag->tc_frag_data.first_map != NULL)
2616 {
2617 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2618 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2619 }
2620 frag->tc_frag_data.first_map = symbolP;
2621 }
2622 if (frag->tc_frag_data.last_map != NULL)
2623 {
2624 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2625 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2626 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2627 }
2628 frag->tc_frag_data.last_map = symbolP;
2629 }
2630
2631 /* We must sometimes convert a region marked as code to data during
2632 code alignment, if an odd number of bytes have to be padded. The
2633 code mapping symbol is pushed to an aligned address. */
2634
2635 static void
2636 insert_data_mapping_symbol (enum mstate state,
2637 valueT value, fragS *frag, offsetT bytes)
2638 {
2639 /* If there was already a mapping symbol, remove it. */
2640 if (frag->tc_frag_data.last_map != NULL
2641 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2642 {
2643 symbolS *symp = frag->tc_frag_data.last_map;
2644
2645 if (value == 0)
2646 {
2647 know (frag->tc_frag_data.first_map == symp);
2648 frag->tc_frag_data.first_map = NULL;
2649 }
2650 frag->tc_frag_data.last_map = NULL;
2651 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2652 }
2653
2654 make_mapping_symbol (MAP_DATA, value, frag);
2655 make_mapping_symbol (state, value + bytes, frag);
2656 }
2657
2658 static void mapping_state_2 (enum mstate state, int max_chars);
2659
2660 /* Set the mapping state to STATE. Only call this when about to
2661 emit some STATE bytes to the file. */
2662
2663 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2664 void
2665 mapping_state (enum mstate state)
2666 {
2667 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2668
2669 if (mapstate == state)
2670 /* The mapping symbol has already been emitted.
2671 There is nothing else to do. */
2672 return;
2673
2674 if (state == MAP_ARM || state == MAP_THUMB)
2675 /* PR gas/12931
2676 All ARM instructions require 4-byte alignment.
2677 (Almost) all Thumb instructions require 2-byte alignment.
2678
2679 When emitting instructions into any section, mark the section
2680 appropriately.
2681
2682 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2683 but themselves require 2-byte alignment; this applies to some
2684 PC- relative forms. However, these cases will invovle implicit
2685 literal pool generation or an explicit .align >=2, both of
2686 which will cause the section to me marked with sufficient
2687 alignment. Thus, we don't handle those cases here. */
2688 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2689
2690 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2691 /* This case will be evaluated later. */
2692 return;
2693
2694 mapping_state_2 (state, 0);
2695 }
2696
2697 /* Same as mapping_state, but MAX_CHARS bytes have already been
2698 allocated. Put the mapping symbol that far back. */
2699
2700 static void
2701 mapping_state_2 (enum mstate state, int max_chars)
2702 {
2703 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2704
2705 if (!SEG_NORMAL (now_seg))
2706 return;
2707
2708 if (mapstate == state)
2709 /* The mapping symbol has already been emitted.
2710 There is nothing else to do. */
2711 return;
2712
2713 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2714 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2715 {
2716 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2717 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2718
2719 if (add_symbol)
2720 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2721 }
2722
2723 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2724 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2725 }
2726 #undef TRANSITION
2727 #else
2728 #define mapping_state(x) ((void)0)
2729 #define mapping_state_2(x, y) ((void)0)
2730 #endif
2731
2732 /* Find the real, Thumb encoded start of a Thumb function. */
2733
2734 #ifdef OBJ_COFF
2735 static symbolS *
2736 find_real_start (symbolS * symbolP)
2737 {
2738 char * real_start;
2739 const char * name = S_GET_NAME (symbolP);
2740 symbolS * new_target;
2741
2742 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2743 #define STUB_NAME ".real_start_of"
2744
2745 if (name == NULL)
2746 abort ();
2747
2748 /* The compiler may generate BL instructions to local labels because
2749 it needs to perform a branch to a far away location. These labels
2750 do not have a corresponding ".real_start_of" label. We check
2751 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2752 the ".real_start_of" convention for nonlocal branches. */
2753 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2754 return symbolP;
2755
2756 real_start = ACONCAT ((STUB_NAME, name, NULL));
2757 new_target = symbol_find (real_start);
2758
2759 if (new_target == NULL)
2760 {
2761 as_warn (_("Failed to find real start of function: %s\n"), name);
2762 new_target = symbolP;
2763 }
2764
2765 return new_target;
2766 }
2767 #endif
2768
2769 static void
2770 opcode_select (int width)
2771 {
2772 switch (width)
2773 {
2774 case 16:
2775 if (! thumb_mode)
2776 {
2777 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2778 as_bad (_("selected processor does not support THUMB opcodes"));
2779
2780 thumb_mode = 1;
2781 /* No need to force the alignment, since we will have been
2782 coming from ARM mode, which is word-aligned. */
2783 record_alignment (now_seg, 1);
2784 }
2785 break;
2786
2787 case 32:
2788 if (thumb_mode)
2789 {
2790 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2791 as_bad (_("selected processor does not support ARM opcodes"));
2792
2793 thumb_mode = 0;
2794
2795 if (!need_pass_2)
2796 frag_align (2, 0, 0);
2797
2798 record_alignment (now_seg, 1);
2799 }
2800 break;
2801
2802 default:
2803 as_bad (_("invalid instruction size selected (%d)"), width);
2804 }
2805 }
2806
2807 static void
2808 s_arm (int ignore ATTRIBUTE_UNUSED)
2809 {
2810 opcode_select (32);
2811 demand_empty_rest_of_line ();
2812 }
2813
2814 static void
2815 s_thumb (int ignore ATTRIBUTE_UNUSED)
2816 {
2817 opcode_select (16);
2818 demand_empty_rest_of_line ();
2819 }
2820
2821 static void
2822 s_code (int unused ATTRIBUTE_UNUSED)
2823 {
2824 int temp;
2825
2826 temp = get_absolute_expression ();
2827 switch (temp)
2828 {
2829 case 16:
2830 case 32:
2831 opcode_select (temp);
2832 break;
2833
2834 default:
2835 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2836 }
2837 }
2838
2839 static void
2840 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2841 {
2842 /* If we are not already in thumb mode go into it, EVEN if
2843 the target processor does not support thumb instructions.
2844 This is used by gcc/config/arm/lib1funcs.asm for example
2845 to compile interworking support functions even if the
2846 target processor should not support interworking. */
2847 if (! thumb_mode)
2848 {
2849 thumb_mode = 2;
2850 record_alignment (now_seg, 1);
2851 }
2852
2853 demand_empty_rest_of_line ();
2854 }
2855
2856 static void
2857 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2858 {
2859 s_thumb (0);
2860
2861 /* The following label is the name/address of the start of a Thumb function.
2862 We need to know this for the interworking support. */
2863 label_is_thumb_function_name = TRUE;
2864 }
2865
2866 /* Perform a .set directive, but also mark the alias as
2867 being a thumb function. */
2868
2869 static void
2870 s_thumb_set (int equiv)
2871 {
2872 /* XXX the following is a duplicate of the code for s_set() in read.c
2873 We cannot just call that code as we need to get at the symbol that
2874 is created. */
2875 char * name;
2876 char delim;
2877 char * end_name;
2878 symbolS * symbolP;
2879
2880 /* Especial apologies for the random logic:
2881 This just grew, and could be parsed much more simply!
2882 Dean - in haste. */
2883 delim = get_symbol_name (& name);
2884 end_name = input_line_pointer;
2885 (void) restore_line_pointer (delim);
2886
2887 if (*input_line_pointer != ',')
2888 {
2889 *end_name = 0;
2890 as_bad (_("expected comma after name \"%s\""), name);
2891 *end_name = delim;
2892 ignore_rest_of_line ();
2893 return;
2894 }
2895
2896 input_line_pointer++;
2897 *end_name = 0;
2898
2899 if (name[0] == '.' && name[1] == '\0')
2900 {
2901 /* XXX - this should not happen to .thumb_set. */
2902 abort ();
2903 }
2904
2905 if ((symbolP = symbol_find (name)) == NULL
2906 && (symbolP = md_undefined_symbol (name)) == NULL)
2907 {
2908 #ifndef NO_LISTING
2909 /* When doing symbol listings, play games with dummy fragments living
2910 outside the normal fragment chain to record the file and line info
2911 for this symbol. */
2912 if (listing & LISTING_SYMBOLS)
2913 {
2914 extern struct list_info_struct * listing_tail;
2915 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2916
2917 memset (dummy_frag, 0, sizeof (fragS));
2918 dummy_frag->fr_type = rs_fill;
2919 dummy_frag->line = listing_tail;
2920 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2921 dummy_frag->fr_symbol = symbolP;
2922 }
2923 else
2924 #endif
2925 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2926
2927 #ifdef OBJ_COFF
2928 /* "set" symbols are local unless otherwise specified. */
2929 SF_SET_LOCAL (symbolP);
2930 #endif /* OBJ_COFF */
2931 } /* Make a new symbol. */
2932
2933 symbol_table_insert (symbolP);
2934
2935 * end_name = delim;
2936
2937 if (equiv
2938 && S_IS_DEFINED (symbolP)
2939 && S_GET_SEGMENT (symbolP) != reg_section)
2940 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2941
2942 pseudo_set (symbolP);
2943
2944 demand_empty_rest_of_line ();
2945
2946 /* XXX Now we come to the Thumb specific bit of code. */
2947
2948 THUMB_SET_FUNC (symbolP, 1);
2949 ARM_SET_THUMB (symbolP, 1);
2950 #if defined OBJ_ELF || defined OBJ_COFF
2951 ARM_SET_INTERWORK (symbolP, support_interwork);
2952 #endif
2953 }
2954
2955 /* Directives: Mode selection. */
2956
2957 /* .syntax [unified|divided] - choose the new unified syntax
2958 (same for Arm and Thumb encoding, modulo slight differences in what
2959 can be represented) or the old divergent syntax for each mode. */
2960 static void
2961 s_syntax (int unused ATTRIBUTE_UNUSED)
2962 {
2963 char *name, delim;
2964
2965 delim = get_symbol_name (& name);
2966
2967 if (!strcasecmp (name, "unified"))
2968 unified_syntax = TRUE;
2969 else if (!strcasecmp (name, "divided"))
2970 unified_syntax = FALSE;
2971 else
2972 {
2973 as_bad (_("unrecognized syntax mode \"%s\""), name);
2974 return;
2975 }
2976 (void) restore_line_pointer (delim);
2977 demand_empty_rest_of_line ();
2978 }
2979
2980 /* Directives: sectioning and alignment. */
2981
2982 static void
2983 s_bss (int ignore ATTRIBUTE_UNUSED)
2984 {
2985 /* We don't support putting frags in the BSS segment, we fake it by
2986 marking in_bss, then looking at s_skip for clues. */
2987 subseg_set (bss_section, 0);
2988 demand_empty_rest_of_line ();
2989
2990 #ifdef md_elf_section_change_hook
2991 md_elf_section_change_hook ();
2992 #endif
2993 }
2994
2995 static void
2996 s_even (int ignore ATTRIBUTE_UNUSED)
2997 {
2998 /* Never make frag if expect extra pass. */
2999 if (!need_pass_2)
3000 frag_align (1, 0, 0);
3001
3002 record_alignment (now_seg, 1);
3003
3004 demand_empty_rest_of_line ();
3005 }
3006
3007 /* Directives: CodeComposer Studio. */
3008
3009 /* .ref (for CodeComposer Studio syntax only). */
3010 static void
3011 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3012 {
3013 if (codecomposer_syntax)
3014 ignore_rest_of_line ();
3015 else
3016 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3017 }
3018
3019 /* If name is not NULL, then it is used for marking the beginning of a
3020 function, wherease if it is NULL then it means the function end. */
3021 static void
3022 asmfunc_debug (const char * name)
3023 {
3024 static const char * last_name = NULL;
3025
3026 if (name != NULL)
3027 {
3028 gas_assert (last_name == NULL);
3029 last_name = name;
3030
3031 if (debug_type == DEBUG_STABS)
3032 stabs_generate_asm_func (name, name);
3033 }
3034 else
3035 {
3036 gas_assert (last_name != NULL);
3037
3038 if (debug_type == DEBUG_STABS)
3039 stabs_generate_asm_endfunc (last_name, last_name);
3040
3041 last_name = NULL;
3042 }
3043 }
3044
3045 static void
3046 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3047 {
3048 if (codecomposer_syntax)
3049 {
3050 switch (asmfunc_state)
3051 {
3052 case OUTSIDE_ASMFUNC:
3053 asmfunc_state = WAITING_ASMFUNC_NAME;
3054 break;
3055
3056 case WAITING_ASMFUNC_NAME:
3057 as_bad (_(".asmfunc repeated."));
3058 break;
3059
3060 case WAITING_ENDASMFUNC:
3061 as_bad (_(".asmfunc without function."));
3062 break;
3063 }
3064 demand_empty_rest_of_line ();
3065 }
3066 else
3067 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3068 }
3069
3070 static void
3071 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3072 {
3073 if (codecomposer_syntax)
3074 {
3075 switch (asmfunc_state)
3076 {
3077 case OUTSIDE_ASMFUNC:
3078 as_bad (_(".endasmfunc without a .asmfunc."));
3079 break;
3080
3081 case WAITING_ASMFUNC_NAME:
3082 as_bad (_(".endasmfunc without function."));
3083 break;
3084
3085 case WAITING_ENDASMFUNC:
3086 asmfunc_state = OUTSIDE_ASMFUNC;
3087 asmfunc_debug (NULL);
3088 break;
3089 }
3090 demand_empty_rest_of_line ();
3091 }
3092 else
3093 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3094 }
3095
3096 static void
3097 s_ccs_def (int name)
3098 {
3099 if (codecomposer_syntax)
3100 s_globl (name);
3101 else
3102 as_bad (_(".def pseudo-op only available with -mccs flag."));
3103 }
3104
3105 /* Directives: Literal pools. */
3106
3107 static literal_pool *
3108 find_literal_pool (void)
3109 {
3110 literal_pool * pool;
3111
3112 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3113 {
3114 if (pool->section == now_seg
3115 && pool->sub_section == now_subseg)
3116 break;
3117 }
3118
3119 return pool;
3120 }
3121
3122 static literal_pool *
3123 find_or_make_literal_pool (void)
3124 {
3125 /* Next literal pool ID number. */
3126 static unsigned int latest_pool_num = 1;
3127 literal_pool * pool;
3128
3129 pool = find_literal_pool ();
3130
3131 if (pool == NULL)
3132 {
3133 /* Create a new pool. */
3134 pool = (literal_pool *) xmalloc (sizeof (* pool));
3135 if (! pool)
3136 return NULL;
3137
3138 pool->next_free_entry = 0;
3139 pool->section = now_seg;
3140 pool->sub_section = now_subseg;
3141 pool->next = list_of_pools;
3142 pool->symbol = NULL;
3143 pool->alignment = 2;
3144
3145 /* Add it to the list. */
3146 list_of_pools = pool;
3147 }
3148
3149 /* New pools, and emptied pools, will have a NULL symbol. */
3150 if (pool->symbol == NULL)
3151 {
3152 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3153 (valueT) 0, &zero_address_frag);
3154 pool->id = latest_pool_num ++;
3155 }
3156
3157 /* Done. */
3158 return pool;
3159 }
3160
3161 /* Add the literal in the global 'inst'
3162 structure to the relevant literal pool. */
3163
3164 static int
3165 add_to_lit_pool (unsigned int nbytes)
3166 {
3167 #define PADDING_SLOT 0x1
3168 #define LIT_ENTRY_SIZE_MASK 0xFF
3169 literal_pool * pool;
3170 unsigned int entry, pool_size = 0;
3171 bfd_boolean padding_slot_p = FALSE;
3172 unsigned imm1 = 0;
3173 unsigned imm2 = 0;
3174
3175 if (nbytes == 8)
3176 {
3177 imm1 = inst.operands[1].imm;
3178 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3179 : inst.reloc.exp.X_unsigned ? 0
3180 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3181 if (target_big_endian)
3182 {
3183 imm1 = imm2;
3184 imm2 = inst.operands[1].imm;
3185 }
3186 }
3187
3188 pool = find_or_make_literal_pool ();
3189
3190 /* Check if this literal value is already in the pool. */
3191 for (entry = 0; entry < pool->next_free_entry; entry ++)
3192 {
3193 if (nbytes == 4)
3194 {
3195 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3196 && (inst.reloc.exp.X_op == O_constant)
3197 && (pool->literals[entry].X_add_number
3198 == inst.reloc.exp.X_add_number)
3199 && (pool->literals[entry].X_md == nbytes)
3200 && (pool->literals[entry].X_unsigned
3201 == inst.reloc.exp.X_unsigned))
3202 break;
3203
3204 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3205 && (inst.reloc.exp.X_op == O_symbol)
3206 && (pool->literals[entry].X_add_number
3207 == inst.reloc.exp.X_add_number)
3208 && (pool->literals[entry].X_add_symbol
3209 == inst.reloc.exp.X_add_symbol)
3210 && (pool->literals[entry].X_op_symbol
3211 == inst.reloc.exp.X_op_symbol)
3212 && (pool->literals[entry].X_md == nbytes))
3213 break;
3214 }
3215 else if ((nbytes == 8)
3216 && !(pool_size & 0x7)
3217 && ((entry + 1) != pool->next_free_entry)
3218 && (pool->literals[entry].X_op == O_constant)
3219 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3220 && (pool->literals[entry].X_unsigned
3221 == inst.reloc.exp.X_unsigned)
3222 && (pool->literals[entry + 1].X_op == O_constant)
3223 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3224 && (pool->literals[entry + 1].X_unsigned
3225 == inst.reloc.exp.X_unsigned))
3226 break;
3227
3228 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3229 if (padding_slot_p && (nbytes == 4))
3230 break;
3231
3232 pool_size += 4;
3233 }
3234
3235 /* Do we need to create a new entry? */
3236 if (entry == pool->next_free_entry)
3237 {
3238 if (entry >= MAX_LITERAL_POOL_SIZE)
3239 {
3240 inst.error = _("literal pool overflow");
3241 return FAIL;
3242 }
3243
3244 if (nbytes == 8)
3245 {
3246 /* For 8-byte entries, we align to an 8-byte boundary,
3247 and split it into two 4-byte entries, because on 32-bit
3248 host, 8-byte constants are treated as big num, thus
3249 saved in "generic_bignum" which will be overwritten
3250 by later assignments.
3251
3252 We also need to make sure there is enough space for
3253 the split.
3254
3255 We also check to make sure the literal operand is a
3256 constant number. */
3257 if (!(inst.reloc.exp.X_op == O_constant
3258 || inst.reloc.exp.X_op == O_big))
3259 {
3260 inst.error = _("invalid type for literal pool");
3261 return FAIL;
3262 }
3263 else if (pool_size & 0x7)
3264 {
3265 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3266 {
3267 inst.error = _("literal pool overflow");
3268 return FAIL;
3269 }
3270
3271 pool->literals[entry] = inst.reloc.exp;
3272 pool->literals[entry].X_add_number = 0;
3273 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3274 pool->next_free_entry += 1;
3275 pool_size += 4;
3276 }
3277 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3278 {
3279 inst.error = _("literal pool overflow");
3280 return FAIL;
3281 }
3282
3283 pool->literals[entry] = inst.reloc.exp;
3284 pool->literals[entry].X_op = O_constant;
3285 pool->literals[entry].X_add_number = imm1;
3286 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3287 pool->literals[entry++].X_md = 4;
3288 pool->literals[entry] = inst.reloc.exp;
3289 pool->literals[entry].X_op = O_constant;
3290 pool->literals[entry].X_add_number = imm2;
3291 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3292 pool->literals[entry].X_md = 4;
3293 pool->alignment = 3;
3294 pool->next_free_entry += 1;
3295 }
3296 else
3297 {
3298 pool->literals[entry] = inst.reloc.exp;
3299 pool->literals[entry].X_md = 4;
3300 }
3301
3302 #ifdef OBJ_ELF
3303 /* PR ld/12974: Record the location of the first source line to reference
3304 this entry in the literal pool. If it turns out during linking that the
3305 symbol does not exist we will be able to give an accurate line number for
3306 the (first use of the) missing reference. */
3307 if (debug_type == DEBUG_DWARF2)
3308 dwarf2_where (pool->locs + entry);
3309 #endif
3310 pool->next_free_entry += 1;
3311 }
3312 else if (padding_slot_p)
3313 {
3314 pool->literals[entry] = inst.reloc.exp;
3315 pool->literals[entry].X_md = nbytes;
3316 }
3317
3318 inst.reloc.exp.X_op = O_symbol;
3319 inst.reloc.exp.X_add_number = pool_size;
3320 inst.reloc.exp.X_add_symbol = pool->symbol;
3321
3322 return SUCCESS;
3323 }
3324
3325 bfd_boolean
3326 tc_start_label_without_colon (void)
3327 {
3328 bfd_boolean ret = TRUE;
3329
3330 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3331 {
3332 const char *label = input_line_pointer;
3333
3334 while (!is_end_of_line[(int) label[-1]])
3335 --label;
3336
3337 if (*label == '.')
3338 {
3339 as_bad (_("Invalid label '%s'"), label);
3340 ret = FALSE;
3341 }
3342
3343 asmfunc_debug (label);
3344
3345 asmfunc_state = WAITING_ENDASMFUNC;
3346 }
3347
3348 return ret;
3349 }
3350
3351 /* Can't use symbol_new here, so have to create a symbol and then at
3352 a later date assign it a value. Thats what these functions do. */
3353
3354 static void
3355 symbol_locate (symbolS * symbolP,
3356 const char * name, /* It is copied, the caller can modify. */
3357 segT segment, /* Segment identifier (SEG_<something>). */
3358 valueT valu, /* Symbol value. */
3359 fragS * frag) /* Associated fragment. */
3360 {
3361 size_t name_length;
3362 char * preserved_copy_of_name;
3363
3364 name_length = strlen (name) + 1; /* +1 for \0. */
3365 obstack_grow (&notes, name, name_length);
3366 preserved_copy_of_name = (char *) obstack_finish (&notes);
3367
3368 #ifdef tc_canonicalize_symbol_name
3369 preserved_copy_of_name =
3370 tc_canonicalize_symbol_name (preserved_copy_of_name);
3371 #endif
3372
3373 S_SET_NAME (symbolP, preserved_copy_of_name);
3374
3375 S_SET_SEGMENT (symbolP, segment);
3376 S_SET_VALUE (symbolP, valu);
3377 symbol_clear_list_pointers (symbolP);
3378
3379 symbol_set_frag (symbolP, frag);
3380
3381 /* Link to end of symbol chain. */
3382 {
3383 extern int symbol_table_frozen;
3384
3385 if (symbol_table_frozen)
3386 abort ();
3387 }
3388
3389 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3390
3391 obj_symbol_new_hook (symbolP);
3392
3393 #ifdef tc_symbol_new_hook
3394 tc_symbol_new_hook (symbolP);
3395 #endif
3396
3397 #ifdef DEBUG_SYMS
3398 verify_symbol_chain (symbol_rootP, symbol_lastP);
3399 #endif /* DEBUG_SYMS */
3400 }
3401
3402 static void
3403 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3404 {
3405 unsigned int entry;
3406 literal_pool * pool;
3407 char sym_name[20];
3408
3409 pool = find_literal_pool ();
3410 if (pool == NULL
3411 || pool->symbol == NULL
3412 || pool->next_free_entry == 0)
3413 return;
3414
3415 /* Align pool as you have word accesses.
3416 Only make a frag if we have to. */
3417 if (!need_pass_2)
3418 frag_align (pool->alignment, 0, 0);
3419
3420 record_alignment (now_seg, 2);
3421
3422 #ifdef OBJ_ELF
3423 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3424 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3425 #endif
3426 sprintf (sym_name, "$$lit_\002%x", pool->id);
3427
3428 symbol_locate (pool->symbol, sym_name, now_seg,
3429 (valueT) frag_now_fix (), frag_now);
3430 symbol_table_insert (pool->symbol);
3431
3432 ARM_SET_THUMB (pool->symbol, thumb_mode);
3433
3434 #if defined OBJ_COFF || defined OBJ_ELF
3435 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3436 #endif
3437
3438 for (entry = 0; entry < pool->next_free_entry; entry ++)
3439 {
3440 #ifdef OBJ_ELF
3441 if (debug_type == DEBUG_DWARF2)
3442 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3443 #endif
3444 /* First output the expression in the instruction to the pool. */
3445 emit_expr (&(pool->literals[entry]),
3446 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3447 }
3448
3449 /* Mark the pool as empty. */
3450 pool->next_free_entry = 0;
3451 pool->symbol = NULL;
3452 }
3453
3454 #ifdef OBJ_ELF
3455 /* Forward declarations for functions below, in the MD interface
3456 section. */
3457 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3458 static valueT create_unwind_entry (int);
3459 static void start_unwind_section (const segT, int);
3460 static void add_unwind_opcode (valueT, int);
3461 static void flush_pending_unwind (void);
3462
3463 /* Directives: Data. */
3464
3465 static void
3466 s_arm_elf_cons (int nbytes)
3467 {
3468 expressionS exp;
3469
3470 #ifdef md_flush_pending_output
3471 md_flush_pending_output ();
3472 #endif
3473
3474 if (is_it_end_of_statement ())
3475 {
3476 demand_empty_rest_of_line ();
3477 return;
3478 }
3479
3480 #ifdef md_cons_align
3481 md_cons_align (nbytes);
3482 #endif
3483
3484 mapping_state (MAP_DATA);
3485 do
3486 {
3487 int reloc;
3488 char *base = input_line_pointer;
3489
3490 expression (& exp);
3491
3492 if (exp.X_op != O_symbol)
3493 emit_expr (&exp, (unsigned int) nbytes);
3494 else
3495 {
3496 char *before_reloc = input_line_pointer;
3497 reloc = parse_reloc (&input_line_pointer);
3498 if (reloc == -1)
3499 {
3500 as_bad (_("unrecognized relocation suffix"));
3501 ignore_rest_of_line ();
3502 return;
3503 }
3504 else if (reloc == BFD_RELOC_UNUSED)
3505 emit_expr (&exp, (unsigned int) nbytes);
3506 else
3507 {
3508 reloc_howto_type *howto = (reloc_howto_type *)
3509 bfd_reloc_type_lookup (stdoutput,
3510 (bfd_reloc_code_real_type) reloc);
3511 int size = bfd_get_reloc_size (howto);
3512
3513 if (reloc == BFD_RELOC_ARM_PLT32)
3514 {
3515 as_bad (_("(plt) is only valid on branch targets"));
3516 reloc = BFD_RELOC_UNUSED;
3517 size = 0;
3518 }
3519
3520 if (size > nbytes)
3521 as_bad (_("%s relocations do not fit in %d bytes"),
3522 howto->name, nbytes);
3523 else
3524 {
3525 /* We've parsed an expression stopping at O_symbol.
3526 But there may be more expression left now that we
3527 have parsed the relocation marker. Parse it again.
3528 XXX Surely there is a cleaner way to do this. */
3529 char *p = input_line_pointer;
3530 int offset;
3531 char *save_buf = (char *) alloca (input_line_pointer - base);
3532 memcpy (save_buf, base, input_line_pointer - base);
3533 memmove (base + (input_line_pointer - before_reloc),
3534 base, before_reloc - base);
3535
3536 input_line_pointer = base + (input_line_pointer-before_reloc);
3537 expression (&exp);
3538 memcpy (base, save_buf, p - base);
3539
3540 offset = nbytes - size;
3541 p = frag_more (nbytes);
3542 memset (p, 0, nbytes);
3543 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3544 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3545 }
3546 }
3547 }
3548 }
3549 while (*input_line_pointer++ == ',');
3550
3551 /* Put terminator back into stream. */
3552 input_line_pointer --;
3553 demand_empty_rest_of_line ();
3554 }
3555
3556 /* Emit an expression containing a 32-bit thumb instruction.
3557 Implementation based on put_thumb32_insn. */
3558
3559 static void
3560 emit_thumb32_expr (expressionS * exp)
3561 {
3562 expressionS exp_high = *exp;
3563
3564 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3565 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3566 exp->X_add_number &= 0xffff;
3567 emit_expr (exp, (unsigned int) THUMB_SIZE);
3568 }
3569
3570 /* Guess the instruction size based on the opcode. */
3571
3572 static int
3573 thumb_insn_size (int opcode)
3574 {
3575 if ((unsigned int) opcode < 0xe800u)
3576 return 2;
3577 else if ((unsigned int) opcode >= 0xe8000000u)
3578 return 4;
3579 else
3580 return 0;
3581 }
3582
3583 static bfd_boolean
3584 emit_insn (expressionS *exp, int nbytes)
3585 {
3586 int size = 0;
3587
3588 if (exp->X_op == O_constant)
3589 {
3590 size = nbytes;
3591
3592 if (size == 0)
3593 size = thumb_insn_size (exp->X_add_number);
3594
3595 if (size != 0)
3596 {
3597 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3598 {
3599 as_bad (_(".inst.n operand too big. "\
3600 "Use .inst.w instead"));
3601 size = 0;
3602 }
3603 else
3604 {
3605 if (now_it.state == AUTOMATIC_IT_BLOCK)
3606 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3607 else
3608 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3609
3610 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3611 emit_thumb32_expr (exp);
3612 else
3613 emit_expr (exp, (unsigned int) size);
3614
3615 it_fsm_post_encode ();
3616 }
3617 }
3618 else
3619 as_bad (_("cannot determine Thumb instruction size. " \
3620 "Use .inst.n/.inst.w instead"));
3621 }
3622 else
3623 as_bad (_("constant expression required"));
3624
3625 return (size != 0);
3626 }
3627
3628 /* Like s_arm_elf_cons but do not use md_cons_align and
3629 set the mapping state to MAP_ARM/MAP_THUMB. */
3630
3631 static void
3632 s_arm_elf_inst (int nbytes)
3633 {
3634 if (is_it_end_of_statement ())
3635 {
3636 demand_empty_rest_of_line ();
3637 return;
3638 }
3639
3640 /* Calling mapping_state () here will not change ARM/THUMB,
3641 but will ensure not to be in DATA state. */
3642
3643 if (thumb_mode)
3644 mapping_state (MAP_THUMB);
3645 else
3646 {
3647 if (nbytes != 0)
3648 {
3649 as_bad (_("width suffixes are invalid in ARM mode"));
3650 ignore_rest_of_line ();
3651 return;
3652 }
3653
3654 nbytes = 4;
3655
3656 mapping_state (MAP_ARM);
3657 }
3658
3659 do
3660 {
3661 expressionS exp;
3662
3663 expression (& exp);
3664
3665 if (! emit_insn (& exp, nbytes))
3666 {
3667 ignore_rest_of_line ();
3668 return;
3669 }
3670 }
3671 while (*input_line_pointer++ == ',');
3672
3673 /* Put terminator back into stream. */
3674 input_line_pointer --;
3675 demand_empty_rest_of_line ();
3676 }
3677
3678 /* Parse a .rel31 directive. */
3679
3680 static void
3681 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3682 {
3683 expressionS exp;
3684 char *p;
3685 valueT highbit;
3686
3687 highbit = 0;
3688 if (*input_line_pointer == '1')
3689 highbit = 0x80000000;
3690 else if (*input_line_pointer != '0')
3691 as_bad (_("expected 0 or 1"));
3692
3693 input_line_pointer++;
3694 if (*input_line_pointer != ',')
3695 as_bad (_("missing comma"));
3696 input_line_pointer++;
3697
3698 #ifdef md_flush_pending_output
3699 md_flush_pending_output ();
3700 #endif
3701
3702 #ifdef md_cons_align
3703 md_cons_align (4);
3704 #endif
3705
3706 mapping_state (MAP_DATA);
3707
3708 expression (&exp);
3709
3710 p = frag_more (4);
3711 md_number_to_chars (p, highbit, 4);
3712 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3713 BFD_RELOC_ARM_PREL31);
3714
3715 demand_empty_rest_of_line ();
3716 }
3717
3718 /* Directives: AEABI stack-unwind tables. */
3719
3720 /* Parse an unwind_fnstart directive. Simply records the current location. */
3721
3722 static void
3723 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3724 {
3725 demand_empty_rest_of_line ();
3726 if (unwind.proc_start)
3727 {
3728 as_bad (_("duplicate .fnstart directive"));
3729 return;
3730 }
3731
3732 /* Mark the start of the function. */
3733 unwind.proc_start = expr_build_dot ();
3734
3735 /* Reset the rest of the unwind info. */
3736 unwind.opcode_count = 0;
3737 unwind.table_entry = NULL;
3738 unwind.personality_routine = NULL;
3739 unwind.personality_index = -1;
3740 unwind.frame_size = 0;
3741 unwind.fp_offset = 0;
3742 unwind.fp_reg = REG_SP;
3743 unwind.fp_used = 0;
3744 unwind.sp_restored = 0;
3745 }
3746
3747
3748 /* Parse a handlerdata directive. Creates the exception handling table entry
3749 for the function. */
3750
3751 static void
3752 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3753 {
3754 demand_empty_rest_of_line ();
3755 if (!unwind.proc_start)
3756 as_bad (MISSING_FNSTART);
3757
3758 if (unwind.table_entry)
3759 as_bad (_("duplicate .handlerdata directive"));
3760
3761 create_unwind_entry (1);
3762 }
3763
3764 /* Parse an unwind_fnend directive. Generates the index table entry. */
3765
3766 static void
3767 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3768 {
3769 long where;
3770 char *ptr;
3771 valueT val;
3772 unsigned int marked_pr_dependency;
3773
3774 demand_empty_rest_of_line ();
3775
3776 if (!unwind.proc_start)
3777 {
3778 as_bad (_(".fnend directive without .fnstart"));
3779 return;
3780 }
3781
3782 /* Add eh table entry. */
3783 if (unwind.table_entry == NULL)
3784 val = create_unwind_entry (0);
3785 else
3786 val = 0;
3787
3788 /* Add index table entry. This is two words. */
3789 start_unwind_section (unwind.saved_seg, 1);
3790 frag_align (2, 0, 0);
3791 record_alignment (now_seg, 2);
3792
3793 ptr = frag_more (8);
3794 memset (ptr, 0, 8);
3795 where = frag_now_fix () - 8;
3796
3797 /* Self relative offset of the function start. */
3798 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3799 BFD_RELOC_ARM_PREL31);
3800
3801 /* Indicate dependency on EHABI-defined personality routines to the
3802 linker, if it hasn't been done already. */
3803 marked_pr_dependency
3804 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3805 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3806 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3807 {
3808 static const char *const name[] =
3809 {
3810 "__aeabi_unwind_cpp_pr0",
3811 "__aeabi_unwind_cpp_pr1",
3812 "__aeabi_unwind_cpp_pr2"
3813 };
3814 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3815 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3816 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3817 |= 1 << unwind.personality_index;
3818 }
3819
3820 if (val)
3821 /* Inline exception table entry. */
3822 md_number_to_chars (ptr + 4, val, 4);
3823 else
3824 /* Self relative offset of the table entry. */
3825 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3826 BFD_RELOC_ARM_PREL31);
3827
3828 /* Restore the original section. */
3829 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3830
3831 unwind.proc_start = NULL;
3832 }
3833
3834
3835 /* Parse an unwind_cantunwind directive. */
3836
3837 static void
3838 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3839 {
3840 demand_empty_rest_of_line ();
3841 if (!unwind.proc_start)
3842 as_bad (MISSING_FNSTART);
3843
3844 if (unwind.personality_routine || unwind.personality_index != -1)
3845 as_bad (_("personality routine specified for cantunwind frame"));
3846
3847 unwind.personality_index = -2;
3848 }
3849
3850
3851 /* Parse a personalityindex directive. */
3852
3853 static void
3854 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3855 {
3856 expressionS exp;
3857
3858 if (!unwind.proc_start)
3859 as_bad (MISSING_FNSTART);
3860
3861 if (unwind.personality_routine || unwind.personality_index != -1)
3862 as_bad (_("duplicate .personalityindex directive"));
3863
3864 expression (&exp);
3865
3866 if (exp.X_op != O_constant
3867 || exp.X_add_number < 0 || exp.X_add_number > 15)
3868 {
3869 as_bad (_("bad personality routine number"));
3870 ignore_rest_of_line ();
3871 return;
3872 }
3873
3874 unwind.personality_index = exp.X_add_number;
3875
3876 demand_empty_rest_of_line ();
3877 }
3878
3879
3880 /* Parse a personality directive. */
3881
3882 static void
3883 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3884 {
3885 char *name, *p, c;
3886
3887 if (!unwind.proc_start)
3888 as_bad (MISSING_FNSTART);
3889
3890 if (unwind.personality_routine || unwind.personality_index != -1)
3891 as_bad (_("duplicate .personality directive"));
3892
3893 c = get_symbol_name (& name);
3894 p = input_line_pointer;
3895 if (c == '"')
3896 ++ input_line_pointer;
3897 unwind.personality_routine = symbol_find_or_make (name);
3898 *p = c;
3899 demand_empty_rest_of_line ();
3900 }
3901
3902
3903 /* Parse a directive saving core registers. */
3904
3905 static void
3906 s_arm_unwind_save_core (void)
3907 {
3908 valueT op;
3909 long range;
3910 int n;
3911
3912 range = parse_reg_list (&input_line_pointer);
3913 if (range == FAIL)
3914 {
3915 as_bad (_("expected register list"));
3916 ignore_rest_of_line ();
3917 return;
3918 }
3919
3920 demand_empty_rest_of_line ();
3921
3922 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3923 into .unwind_save {..., sp...}. We aren't bothered about the value of
3924 ip because it is clobbered by calls. */
3925 if (unwind.sp_restored && unwind.fp_reg == 12
3926 && (range & 0x3000) == 0x1000)
3927 {
3928 unwind.opcode_count--;
3929 unwind.sp_restored = 0;
3930 range = (range | 0x2000) & ~0x1000;
3931 unwind.pending_offset = 0;
3932 }
3933
3934 /* Pop r4-r15. */
3935 if (range & 0xfff0)
3936 {
3937 /* See if we can use the short opcodes. These pop a block of up to 8
3938 registers starting with r4, plus maybe r14. */
3939 for (n = 0; n < 8; n++)
3940 {
3941 /* Break at the first non-saved register. */
3942 if ((range & (1 << (n + 4))) == 0)
3943 break;
3944 }
3945 /* See if there are any other bits set. */
3946 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3947 {
3948 /* Use the long form. */
3949 op = 0x8000 | ((range >> 4) & 0xfff);
3950 add_unwind_opcode (op, 2);
3951 }
3952 else
3953 {
3954 /* Use the short form. */
3955 if (range & 0x4000)
3956 op = 0xa8; /* Pop r14. */
3957 else
3958 op = 0xa0; /* Do not pop r14. */
3959 op |= (n - 1);
3960 add_unwind_opcode (op, 1);
3961 }
3962 }
3963
3964 /* Pop r0-r3. */
3965 if (range & 0xf)
3966 {
3967 op = 0xb100 | (range & 0xf);
3968 add_unwind_opcode (op, 2);
3969 }
3970
3971 /* Record the number of bytes pushed. */
3972 for (n = 0; n < 16; n++)
3973 {
3974 if (range & (1 << n))
3975 unwind.frame_size += 4;
3976 }
3977 }
3978
3979
3980 /* Parse a directive saving FPA registers. */
3981
3982 static void
3983 s_arm_unwind_save_fpa (int reg)
3984 {
3985 expressionS exp;
3986 int num_regs;
3987 valueT op;
3988
3989 /* Get Number of registers to transfer. */
3990 if (skip_past_comma (&input_line_pointer) != FAIL)
3991 expression (&exp);
3992 else
3993 exp.X_op = O_illegal;
3994
3995 if (exp.X_op != O_constant)
3996 {
3997 as_bad (_("expected , <constant>"));
3998 ignore_rest_of_line ();
3999 return;
4000 }
4001
4002 num_regs = exp.X_add_number;
4003
4004 if (num_regs < 1 || num_regs > 4)
4005 {
4006 as_bad (_("number of registers must be in the range [1:4]"));
4007 ignore_rest_of_line ();
4008 return;
4009 }
4010
4011 demand_empty_rest_of_line ();
4012
4013 if (reg == 4)
4014 {
4015 /* Short form. */
4016 op = 0xb4 | (num_regs - 1);
4017 add_unwind_opcode (op, 1);
4018 }
4019 else
4020 {
4021 /* Long form. */
4022 op = 0xc800 | (reg << 4) | (num_regs - 1);
4023 add_unwind_opcode (op, 2);
4024 }
4025 unwind.frame_size += num_regs * 12;
4026 }
4027
4028
4029 /* Parse a directive saving VFP registers for ARMv6 and above. */
4030
4031 static void
4032 s_arm_unwind_save_vfp_armv6 (void)
4033 {
4034 int count;
4035 unsigned int start;
4036 valueT op;
4037 int num_vfpv3_regs = 0;
4038 int num_regs_below_16;
4039
4040 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4041 if (count == FAIL)
4042 {
4043 as_bad (_("expected register list"));
4044 ignore_rest_of_line ();
4045 return;
4046 }
4047
4048 demand_empty_rest_of_line ();
4049
4050 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4051 than FSTMX/FLDMX-style ones). */
4052
4053 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4054 if (start >= 16)
4055 num_vfpv3_regs = count;
4056 else if (start + count > 16)
4057 num_vfpv3_regs = start + count - 16;
4058
4059 if (num_vfpv3_regs > 0)
4060 {
4061 int start_offset = start > 16 ? start - 16 : 0;
4062 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4063 add_unwind_opcode (op, 2);
4064 }
4065
4066 /* Generate opcode for registers numbered in the range 0 .. 15. */
4067 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4068 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4069 if (num_regs_below_16 > 0)
4070 {
4071 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4072 add_unwind_opcode (op, 2);
4073 }
4074
4075 unwind.frame_size += count * 8;
4076 }
4077
4078
4079 /* Parse a directive saving VFP registers for pre-ARMv6. */
4080
4081 static void
4082 s_arm_unwind_save_vfp (void)
4083 {
4084 int count;
4085 unsigned int reg;
4086 valueT op;
4087
4088 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4089 if (count == FAIL)
4090 {
4091 as_bad (_("expected register list"));
4092 ignore_rest_of_line ();
4093 return;
4094 }
4095
4096 demand_empty_rest_of_line ();
4097
4098 if (reg == 8)
4099 {
4100 /* Short form. */
4101 op = 0xb8 | (count - 1);
4102 add_unwind_opcode (op, 1);
4103 }
4104 else
4105 {
4106 /* Long form. */
4107 op = 0xb300 | (reg << 4) | (count - 1);
4108 add_unwind_opcode (op, 2);
4109 }
4110 unwind.frame_size += count * 8 + 4;
4111 }
4112
4113
4114 /* Parse a directive saving iWMMXt data registers. */
4115
4116 static void
4117 s_arm_unwind_save_mmxwr (void)
4118 {
4119 int reg;
4120 int hi_reg;
4121 int i;
4122 unsigned mask = 0;
4123 valueT op;
4124
4125 if (*input_line_pointer == '{')
4126 input_line_pointer++;
4127
4128 do
4129 {
4130 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4131
4132 if (reg == FAIL)
4133 {
4134 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4135 goto error;
4136 }
4137
4138 if (mask >> reg)
4139 as_tsktsk (_("register list not in ascending order"));
4140 mask |= 1 << reg;
4141
4142 if (*input_line_pointer == '-')
4143 {
4144 input_line_pointer++;
4145 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4146 if (hi_reg == FAIL)
4147 {
4148 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4149 goto error;
4150 }
4151 else if (reg >= hi_reg)
4152 {
4153 as_bad (_("bad register range"));
4154 goto error;
4155 }
4156 for (; reg < hi_reg; reg++)
4157 mask |= 1 << reg;
4158 }
4159 }
4160 while (skip_past_comma (&input_line_pointer) != FAIL);
4161
4162 skip_past_char (&input_line_pointer, '}');
4163
4164 demand_empty_rest_of_line ();
4165
4166 /* Generate any deferred opcodes because we're going to be looking at
4167 the list. */
4168 flush_pending_unwind ();
4169
4170 for (i = 0; i < 16; i++)
4171 {
4172 if (mask & (1 << i))
4173 unwind.frame_size += 8;
4174 }
4175
4176 /* Attempt to combine with a previous opcode. We do this because gcc
4177 likes to output separate unwind directives for a single block of
4178 registers. */
4179 if (unwind.opcode_count > 0)
4180 {
4181 i = unwind.opcodes[unwind.opcode_count - 1];
4182 if ((i & 0xf8) == 0xc0)
4183 {
4184 i &= 7;
4185 /* Only merge if the blocks are contiguous. */
4186 if (i < 6)
4187 {
4188 if ((mask & 0xfe00) == (1 << 9))
4189 {
4190 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4191 unwind.opcode_count--;
4192 }
4193 }
4194 else if (i == 6 && unwind.opcode_count >= 2)
4195 {
4196 i = unwind.opcodes[unwind.opcode_count - 2];
4197 reg = i >> 4;
4198 i &= 0xf;
4199
4200 op = 0xffff << (reg - 1);
4201 if (reg > 0
4202 && ((mask & op) == (1u << (reg - 1))))
4203 {
4204 op = (1 << (reg + i + 1)) - 1;
4205 op &= ~((1 << reg) - 1);
4206 mask |= op;
4207 unwind.opcode_count -= 2;
4208 }
4209 }
4210 }
4211 }
4212
4213 hi_reg = 15;
4214 /* We want to generate opcodes in the order the registers have been
4215 saved, ie. descending order. */
4216 for (reg = 15; reg >= -1; reg--)
4217 {
4218 /* Save registers in blocks. */
4219 if (reg < 0
4220 || !(mask & (1 << reg)))
4221 {
4222 /* We found an unsaved reg. Generate opcodes to save the
4223 preceding block. */
4224 if (reg != hi_reg)
4225 {
4226 if (reg == 9)
4227 {
4228 /* Short form. */
4229 op = 0xc0 | (hi_reg - 10);
4230 add_unwind_opcode (op, 1);
4231 }
4232 else
4233 {
4234 /* Long form. */
4235 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4236 add_unwind_opcode (op, 2);
4237 }
4238 }
4239 hi_reg = reg - 1;
4240 }
4241 }
4242
4243 return;
4244 error:
4245 ignore_rest_of_line ();
4246 }
4247
4248 static void
4249 s_arm_unwind_save_mmxwcg (void)
4250 {
4251 int reg;
4252 int hi_reg;
4253 unsigned mask = 0;
4254 valueT op;
4255
4256 if (*input_line_pointer == '{')
4257 input_line_pointer++;
4258
4259 skip_whitespace (input_line_pointer);
4260
4261 do
4262 {
4263 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4264
4265 if (reg == FAIL)
4266 {
4267 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4268 goto error;
4269 }
4270
4271 reg -= 8;
4272 if (mask >> reg)
4273 as_tsktsk (_("register list not in ascending order"));
4274 mask |= 1 << reg;
4275
4276 if (*input_line_pointer == '-')
4277 {
4278 input_line_pointer++;
4279 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4280 if (hi_reg == FAIL)
4281 {
4282 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4283 goto error;
4284 }
4285 else if (reg >= hi_reg)
4286 {
4287 as_bad (_("bad register range"));
4288 goto error;
4289 }
4290 for (; reg < hi_reg; reg++)
4291 mask |= 1 << reg;
4292 }
4293 }
4294 while (skip_past_comma (&input_line_pointer) != FAIL);
4295
4296 skip_past_char (&input_line_pointer, '}');
4297
4298 demand_empty_rest_of_line ();
4299
4300 /* Generate any deferred opcodes because we're going to be looking at
4301 the list. */
4302 flush_pending_unwind ();
4303
4304 for (reg = 0; reg < 16; reg++)
4305 {
4306 if (mask & (1 << reg))
4307 unwind.frame_size += 4;
4308 }
4309 op = 0xc700 | mask;
4310 add_unwind_opcode (op, 2);
4311 return;
4312 error:
4313 ignore_rest_of_line ();
4314 }
4315
4316
4317 /* Parse an unwind_save directive.
4318 If the argument is non-zero, this is a .vsave directive. */
4319
4320 static void
4321 s_arm_unwind_save (int arch_v6)
4322 {
4323 char *peek;
4324 struct reg_entry *reg;
4325 bfd_boolean had_brace = FALSE;
4326
4327 if (!unwind.proc_start)
4328 as_bad (MISSING_FNSTART);
4329
4330 /* Figure out what sort of save we have. */
4331 peek = input_line_pointer;
4332
4333 if (*peek == '{')
4334 {
4335 had_brace = TRUE;
4336 peek++;
4337 }
4338
4339 reg = arm_reg_parse_multi (&peek);
4340
4341 if (!reg)
4342 {
4343 as_bad (_("register expected"));
4344 ignore_rest_of_line ();
4345 return;
4346 }
4347
4348 switch (reg->type)
4349 {
4350 case REG_TYPE_FN:
4351 if (had_brace)
4352 {
4353 as_bad (_("FPA .unwind_save does not take a register list"));
4354 ignore_rest_of_line ();
4355 return;
4356 }
4357 input_line_pointer = peek;
4358 s_arm_unwind_save_fpa (reg->number);
4359 return;
4360
4361 case REG_TYPE_RN:
4362 s_arm_unwind_save_core ();
4363 return;
4364
4365 case REG_TYPE_VFD:
4366 if (arch_v6)
4367 s_arm_unwind_save_vfp_armv6 ();
4368 else
4369 s_arm_unwind_save_vfp ();
4370 return;
4371
4372 case REG_TYPE_MMXWR:
4373 s_arm_unwind_save_mmxwr ();
4374 return;
4375
4376 case REG_TYPE_MMXWCG:
4377 s_arm_unwind_save_mmxwcg ();
4378 return;
4379
4380 default:
4381 as_bad (_(".unwind_save does not support this kind of register"));
4382 ignore_rest_of_line ();
4383 }
4384 }
4385
4386
4387 /* Parse an unwind_movsp directive. */
4388
4389 static void
4390 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4391 {
4392 int reg;
4393 valueT op;
4394 int offset;
4395
4396 if (!unwind.proc_start)
4397 as_bad (MISSING_FNSTART);
4398
4399 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4400 if (reg == FAIL)
4401 {
4402 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4403 ignore_rest_of_line ();
4404 return;
4405 }
4406
4407 /* Optional constant. */
4408 if (skip_past_comma (&input_line_pointer) != FAIL)
4409 {
4410 if (immediate_for_directive (&offset) == FAIL)
4411 return;
4412 }
4413 else
4414 offset = 0;
4415
4416 demand_empty_rest_of_line ();
4417
4418 if (reg == REG_SP || reg == REG_PC)
4419 {
4420 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4421 return;
4422 }
4423
4424 if (unwind.fp_reg != REG_SP)
4425 as_bad (_("unexpected .unwind_movsp directive"));
4426
4427 /* Generate opcode to restore the value. */
4428 op = 0x90 | reg;
4429 add_unwind_opcode (op, 1);
4430
4431 /* Record the information for later. */
4432 unwind.fp_reg = reg;
4433 unwind.fp_offset = unwind.frame_size - offset;
4434 unwind.sp_restored = 1;
4435 }
4436
4437 /* Parse an unwind_pad directive. */
4438
4439 static void
4440 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4441 {
4442 int offset;
4443
4444 if (!unwind.proc_start)
4445 as_bad (MISSING_FNSTART);
4446
4447 if (immediate_for_directive (&offset) == FAIL)
4448 return;
4449
4450 if (offset & 3)
4451 {
4452 as_bad (_("stack increment must be multiple of 4"));
4453 ignore_rest_of_line ();
4454 return;
4455 }
4456
4457 /* Don't generate any opcodes, just record the details for later. */
4458 unwind.frame_size += offset;
4459 unwind.pending_offset += offset;
4460
4461 demand_empty_rest_of_line ();
4462 }
4463
4464 /* Parse an unwind_setfp directive. */
4465
4466 static void
4467 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4468 {
4469 int sp_reg;
4470 int fp_reg;
4471 int offset;
4472
4473 if (!unwind.proc_start)
4474 as_bad (MISSING_FNSTART);
4475
4476 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4477 if (skip_past_comma (&input_line_pointer) == FAIL)
4478 sp_reg = FAIL;
4479 else
4480 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4481
4482 if (fp_reg == FAIL || sp_reg == FAIL)
4483 {
4484 as_bad (_("expected <reg>, <reg>"));
4485 ignore_rest_of_line ();
4486 return;
4487 }
4488
4489 /* Optional constant. */
4490 if (skip_past_comma (&input_line_pointer) != FAIL)
4491 {
4492 if (immediate_for_directive (&offset) == FAIL)
4493 return;
4494 }
4495 else
4496 offset = 0;
4497
4498 demand_empty_rest_of_line ();
4499
4500 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4501 {
4502 as_bad (_("register must be either sp or set by a previous"
4503 "unwind_movsp directive"));
4504 return;
4505 }
4506
4507 /* Don't generate any opcodes, just record the information for later. */
4508 unwind.fp_reg = fp_reg;
4509 unwind.fp_used = 1;
4510 if (sp_reg == REG_SP)
4511 unwind.fp_offset = unwind.frame_size - offset;
4512 else
4513 unwind.fp_offset -= offset;
4514 }
4515
4516 /* Parse an unwind_raw directive. */
4517
4518 static void
4519 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4520 {
4521 expressionS exp;
4522 /* This is an arbitrary limit. */
4523 unsigned char op[16];
4524 int count;
4525
4526 if (!unwind.proc_start)
4527 as_bad (MISSING_FNSTART);
4528
4529 expression (&exp);
4530 if (exp.X_op == O_constant
4531 && skip_past_comma (&input_line_pointer) != FAIL)
4532 {
4533 unwind.frame_size += exp.X_add_number;
4534 expression (&exp);
4535 }
4536 else
4537 exp.X_op = O_illegal;
4538
4539 if (exp.X_op != O_constant)
4540 {
4541 as_bad (_("expected <offset>, <opcode>"));
4542 ignore_rest_of_line ();
4543 return;
4544 }
4545
4546 count = 0;
4547
4548 /* Parse the opcode. */
4549 for (;;)
4550 {
4551 if (count >= 16)
4552 {
4553 as_bad (_("unwind opcode too long"));
4554 ignore_rest_of_line ();
4555 }
4556 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4557 {
4558 as_bad (_("invalid unwind opcode"));
4559 ignore_rest_of_line ();
4560 return;
4561 }
4562 op[count++] = exp.X_add_number;
4563
4564 /* Parse the next byte. */
4565 if (skip_past_comma (&input_line_pointer) == FAIL)
4566 break;
4567
4568 expression (&exp);
4569 }
4570
4571 /* Add the opcode bytes in reverse order. */
4572 while (count--)
4573 add_unwind_opcode (op[count], 1);
4574
4575 demand_empty_rest_of_line ();
4576 }
4577
4578
4579 /* Parse a .eabi_attribute directive. */
4580
4581 static void
4582 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4583 {
4584 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4585
4586 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4587 attributes_set_explicitly[tag] = 1;
4588 }
4589
4590 /* Emit a tls fix for the symbol. */
4591
4592 static void
4593 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4594 {
4595 char *p;
4596 expressionS exp;
4597 #ifdef md_flush_pending_output
4598 md_flush_pending_output ();
4599 #endif
4600
4601 #ifdef md_cons_align
4602 md_cons_align (4);
4603 #endif
4604
4605 /* Since we're just labelling the code, there's no need to define a
4606 mapping symbol. */
4607 expression (&exp);
4608 p = obstack_next_free (&frchain_now->frch_obstack);
4609 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4610 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4611 : BFD_RELOC_ARM_TLS_DESCSEQ);
4612 }
4613 #endif /* OBJ_ELF */
4614
4615 static void s_arm_arch (int);
4616 static void s_arm_object_arch (int);
4617 static void s_arm_cpu (int);
4618 static void s_arm_fpu (int);
4619 static void s_arm_arch_extension (int);
4620
4621 #ifdef TE_PE
4622
4623 static void
4624 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4625 {
4626 expressionS exp;
4627
4628 do
4629 {
4630 expression (&exp);
4631 if (exp.X_op == O_symbol)
4632 exp.X_op = O_secrel;
4633
4634 emit_expr (&exp, 4);
4635 }
4636 while (*input_line_pointer++ == ',');
4637
4638 input_line_pointer--;
4639 demand_empty_rest_of_line ();
4640 }
4641 #endif /* TE_PE */
4642
4643 /* This table describes all the machine specific pseudo-ops the assembler
4644 has to support. The fields are:
4645 pseudo-op name without dot
4646 function to call to execute this pseudo-op
4647 Integer arg to pass to the function. */
4648
4649 const pseudo_typeS md_pseudo_table[] =
4650 {
4651 /* Never called because '.req' does not start a line. */
4652 { "req", s_req, 0 },
4653 /* Following two are likewise never called. */
4654 { "dn", s_dn, 0 },
4655 { "qn", s_qn, 0 },
4656 { "unreq", s_unreq, 0 },
4657 { "bss", s_bss, 0 },
4658 { "align", s_align_ptwo, 2 },
4659 { "arm", s_arm, 0 },
4660 { "thumb", s_thumb, 0 },
4661 { "code", s_code, 0 },
4662 { "force_thumb", s_force_thumb, 0 },
4663 { "thumb_func", s_thumb_func, 0 },
4664 { "thumb_set", s_thumb_set, 0 },
4665 { "even", s_even, 0 },
4666 { "ltorg", s_ltorg, 0 },
4667 { "pool", s_ltorg, 0 },
4668 { "syntax", s_syntax, 0 },
4669 { "cpu", s_arm_cpu, 0 },
4670 { "arch", s_arm_arch, 0 },
4671 { "object_arch", s_arm_object_arch, 0 },
4672 { "fpu", s_arm_fpu, 0 },
4673 { "arch_extension", s_arm_arch_extension, 0 },
4674 #ifdef OBJ_ELF
4675 { "word", s_arm_elf_cons, 4 },
4676 { "long", s_arm_elf_cons, 4 },
4677 { "inst.n", s_arm_elf_inst, 2 },
4678 { "inst.w", s_arm_elf_inst, 4 },
4679 { "inst", s_arm_elf_inst, 0 },
4680 { "rel31", s_arm_rel31, 0 },
4681 { "fnstart", s_arm_unwind_fnstart, 0 },
4682 { "fnend", s_arm_unwind_fnend, 0 },
4683 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4684 { "personality", s_arm_unwind_personality, 0 },
4685 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4686 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4687 { "save", s_arm_unwind_save, 0 },
4688 { "vsave", s_arm_unwind_save, 1 },
4689 { "movsp", s_arm_unwind_movsp, 0 },
4690 { "pad", s_arm_unwind_pad, 0 },
4691 { "setfp", s_arm_unwind_setfp, 0 },
4692 { "unwind_raw", s_arm_unwind_raw, 0 },
4693 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4694 { "tlsdescseq", s_arm_tls_descseq, 0 },
4695 #else
4696 { "word", cons, 4},
4697
4698 /* These are used for dwarf. */
4699 {"2byte", cons, 2},
4700 {"4byte", cons, 4},
4701 {"8byte", cons, 8},
4702 /* These are used for dwarf2. */
4703 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4704 { "loc", dwarf2_directive_loc, 0 },
4705 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4706 #endif
4707 { "extend", float_cons, 'x' },
4708 { "ldouble", float_cons, 'x' },
4709 { "packed", float_cons, 'p' },
4710 #ifdef TE_PE
4711 {"secrel32", pe_directive_secrel, 0},
4712 #endif
4713
4714 /* These are for compatibility with CodeComposer Studio. */
4715 {"ref", s_ccs_ref, 0},
4716 {"def", s_ccs_def, 0},
4717 {"asmfunc", s_ccs_asmfunc, 0},
4718 {"endasmfunc", s_ccs_endasmfunc, 0},
4719
4720 { 0, 0, 0 }
4721 };
4722 \f
4723 /* Parser functions used exclusively in instruction operands. */
4724
4725 /* Generic immediate-value read function for use in insn parsing.
4726 STR points to the beginning of the immediate (the leading #);
4727 VAL receives the value; if the value is outside [MIN, MAX]
4728 issue an error. PREFIX_OPT is true if the immediate prefix is
4729 optional. */
4730
4731 static int
4732 parse_immediate (char **str, int *val, int min, int max,
4733 bfd_boolean prefix_opt)
4734 {
4735 expressionS exp;
4736 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4737 if (exp.X_op != O_constant)
4738 {
4739 inst.error = _("constant expression required");
4740 return FAIL;
4741 }
4742
4743 if (exp.X_add_number < min || exp.X_add_number > max)
4744 {
4745 inst.error = _("immediate value out of range");
4746 return FAIL;
4747 }
4748
4749 *val = exp.X_add_number;
4750 return SUCCESS;
4751 }
4752
4753 /* Less-generic immediate-value read function with the possibility of loading a
4754 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4755 instructions. Puts the result directly in inst.operands[i]. */
4756
4757 static int
4758 parse_big_immediate (char **str, int i, expressionS *in_exp,
4759 bfd_boolean allow_symbol_p)
4760 {
4761 expressionS exp;
4762 expressionS *exp_p = in_exp ? in_exp : &exp;
4763 char *ptr = *str;
4764
4765 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4766
4767 if (exp_p->X_op == O_constant)
4768 {
4769 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4770 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4771 O_constant. We have to be careful not to break compilation for
4772 32-bit X_add_number, though. */
4773 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4774 {
4775 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4776 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4777 & 0xffffffff);
4778 inst.operands[i].regisimm = 1;
4779 }
4780 }
4781 else if (exp_p->X_op == O_big
4782 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4783 {
4784 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4785
4786 /* Bignums have their least significant bits in
4787 generic_bignum[0]. Make sure we put 32 bits in imm and
4788 32 bits in reg, in a (hopefully) portable way. */
4789 gas_assert (parts != 0);
4790
4791 /* Make sure that the number is not too big.
4792 PR 11972: Bignums can now be sign-extended to the
4793 size of a .octa so check that the out of range bits
4794 are all zero or all one. */
4795 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4796 {
4797 LITTLENUM_TYPE m = -1;
4798
4799 if (generic_bignum[parts * 2] != 0
4800 && generic_bignum[parts * 2] != m)
4801 return FAIL;
4802
4803 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4804 if (generic_bignum[j] != generic_bignum[j-1])
4805 return FAIL;
4806 }
4807
4808 inst.operands[i].imm = 0;
4809 for (j = 0; j < parts; j++, idx++)
4810 inst.operands[i].imm |= generic_bignum[idx]
4811 << (LITTLENUM_NUMBER_OF_BITS * j);
4812 inst.operands[i].reg = 0;
4813 for (j = 0; j < parts; j++, idx++)
4814 inst.operands[i].reg |= generic_bignum[idx]
4815 << (LITTLENUM_NUMBER_OF_BITS * j);
4816 inst.operands[i].regisimm = 1;
4817 }
4818 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4819 return FAIL;
4820
4821 *str = ptr;
4822
4823 return SUCCESS;
4824 }
4825
4826 /* Returns the pseudo-register number of an FPA immediate constant,
4827 or FAIL if there isn't a valid constant here. */
4828
4829 static int
4830 parse_fpa_immediate (char ** str)
4831 {
4832 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4833 char * save_in;
4834 expressionS exp;
4835 int i;
4836 int j;
4837
4838 /* First try and match exact strings, this is to guarantee
4839 that some formats will work even for cross assembly. */
4840
4841 for (i = 0; fp_const[i]; i++)
4842 {
4843 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4844 {
4845 char *start = *str;
4846
4847 *str += strlen (fp_const[i]);
4848 if (is_end_of_line[(unsigned char) **str])
4849 return i + 8;
4850 *str = start;
4851 }
4852 }
4853
4854 /* Just because we didn't get a match doesn't mean that the constant
4855 isn't valid, just that it is in a format that we don't
4856 automatically recognize. Try parsing it with the standard
4857 expression routines. */
4858
4859 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4860
4861 /* Look for a raw floating point number. */
4862 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4863 && is_end_of_line[(unsigned char) *save_in])
4864 {
4865 for (i = 0; i < NUM_FLOAT_VALS; i++)
4866 {
4867 for (j = 0; j < MAX_LITTLENUMS; j++)
4868 {
4869 if (words[j] != fp_values[i][j])
4870 break;
4871 }
4872
4873 if (j == MAX_LITTLENUMS)
4874 {
4875 *str = save_in;
4876 return i + 8;
4877 }
4878 }
4879 }
4880
4881 /* Try and parse a more complex expression, this will probably fail
4882 unless the code uses a floating point prefix (eg "0f"). */
4883 save_in = input_line_pointer;
4884 input_line_pointer = *str;
4885 if (expression (&exp) == absolute_section
4886 && exp.X_op == O_big
4887 && exp.X_add_number < 0)
4888 {
4889 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4890 Ditto for 15. */
4891 #define X_PRECISION 5
4892 #define E_PRECISION 15L
4893 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4894 {
4895 for (i = 0; i < NUM_FLOAT_VALS; i++)
4896 {
4897 for (j = 0; j < MAX_LITTLENUMS; j++)
4898 {
4899 if (words[j] != fp_values[i][j])
4900 break;
4901 }
4902
4903 if (j == MAX_LITTLENUMS)
4904 {
4905 *str = input_line_pointer;
4906 input_line_pointer = save_in;
4907 return i + 8;
4908 }
4909 }
4910 }
4911 }
4912
4913 *str = input_line_pointer;
4914 input_line_pointer = save_in;
4915 inst.error = _("invalid FPA immediate expression");
4916 return FAIL;
4917 }
4918
4919 /* Returns 1 if a number has "quarter-precision" float format
4920 0baBbbbbbc defgh000 00000000 00000000. */
4921
4922 static int
4923 is_quarter_float (unsigned imm)
4924 {
4925 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4926 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4927 }
4928
4929
4930 /* Detect the presence of a floating point or integer zero constant,
4931 i.e. #0.0 or #0. */
4932
4933 static bfd_boolean
4934 parse_ifimm_zero (char **in)
4935 {
4936 int error_code;
4937
4938 if (!is_immediate_prefix (**in))
4939 return FALSE;
4940
4941 ++*in;
4942
4943 /* Accept #0x0 as a synonym for #0. */
4944 if (strncmp (*in, "0x", 2) == 0)
4945 {
4946 int val;
4947 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4948 return FALSE;
4949 return TRUE;
4950 }
4951
4952 error_code = atof_generic (in, ".", EXP_CHARS,
4953 &generic_floating_point_number);
4954
4955 if (!error_code
4956 && generic_floating_point_number.sign == '+'
4957 && (generic_floating_point_number.low
4958 > generic_floating_point_number.leader))
4959 return TRUE;
4960
4961 return FALSE;
4962 }
4963
4964 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4965 0baBbbbbbc defgh000 00000000 00000000.
4966 The zero and minus-zero cases need special handling, since they can't be
4967 encoded in the "quarter-precision" float format, but can nonetheless be
4968 loaded as integer constants. */
4969
4970 static unsigned
4971 parse_qfloat_immediate (char **ccp, int *immed)
4972 {
4973 char *str = *ccp;
4974 char *fpnum;
4975 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4976 int found_fpchar = 0;
4977
4978 skip_past_char (&str, '#');
4979
4980 /* We must not accidentally parse an integer as a floating-point number. Make
4981 sure that the value we parse is not an integer by checking for special
4982 characters '.' or 'e'.
4983 FIXME: This is a horrible hack, but doing better is tricky because type
4984 information isn't in a very usable state at parse time. */
4985 fpnum = str;
4986 skip_whitespace (fpnum);
4987
4988 if (strncmp (fpnum, "0x", 2) == 0)
4989 return FAIL;
4990 else
4991 {
4992 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4993 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4994 {
4995 found_fpchar = 1;
4996 break;
4997 }
4998
4999 if (!found_fpchar)
5000 return FAIL;
5001 }
5002
5003 if ((str = atof_ieee (str, 's', words)) != NULL)
5004 {
5005 unsigned fpword = 0;
5006 int i;
5007
5008 /* Our FP word must be 32 bits (single-precision FP). */
5009 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5010 {
5011 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5012 fpword |= words[i];
5013 }
5014
5015 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5016 *immed = fpword;
5017 else
5018 return FAIL;
5019
5020 *ccp = str;
5021
5022 return SUCCESS;
5023 }
5024
5025 return FAIL;
5026 }
5027
5028 /* Shift operands. */
5029 enum shift_kind
5030 {
5031 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5032 };
5033
5034 struct asm_shift_name
5035 {
5036 const char *name;
5037 enum shift_kind kind;
5038 };
5039
5040 /* Third argument to parse_shift. */
5041 enum parse_shift_mode
5042 {
5043 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5044 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5045 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5046 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5047 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5048 };
5049
5050 /* Parse a <shift> specifier on an ARM data processing instruction.
5051 This has three forms:
5052
5053 (LSL|LSR|ASL|ASR|ROR) Rs
5054 (LSL|LSR|ASL|ASR|ROR) #imm
5055 RRX
5056
5057 Note that ASL is assimilated to LSL in the instruction encoding, and
5058 RRX to ROR #0 (which cannot be written as such). */
5059
5060 static int
5061 parse_shift (char **str, int i, enum parse_shift_mode mode)
5062 {
5063 const struct asm_shift_name *shift_name;
5064 enum shift_kind shift;
5065 char *s = *str;
5066 char *p = s;
5067 int reg;
5068
5069 for (p = *str; ISALPHA (*p); p++)
5070 ;
5071
5072 if (p == *str)
5073 {
5074 inst.error = _("shift expression expected");
5075 return FAIL;
5076 }
5077
5078 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5079 p - *str);
5080
5081 if (shift_name == NULL)
5082 {
5083 inst.error = _("shift expression expected");
5084 return FAIL;
5085 }
5086
5087 shift = shift_name->kind;
5088
5089 switch (mode)
5090 {
5091 case NO_SHIFT_RESTRICT:
5092 case SHIFT_IMMEDIATE: break;
5093
5094 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5095 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5096 {
5097 inst.error = _("'LSL' or 'ASR' required");
5098 return FAIL;
5099 }
5100 break;
5101
5102 case SHIFT_LSL_IMMEDIATE:
5103 if (shift != SHIFT_LSL)
5104 {
5105 inst.error = _("'LSL' required");
5106 return FAIL;
5107 }
5108 break;
5109
5110 case SHIFT_ASR_IMMEDIATE:
5111 if (shift != SHIFT_ASR)
5112 {
5113 inst.error = _("'ASR' required");
5114 return FAIL;
5115 }
5116 break;
5117
5118 default: abort ();
5119 }
5120
5121 if (shift != SHIFT_RRX)
5122 {
5123 /* Whitespace can appear here if the next thing is a bare digit. */
5124 skip_whitespace (p);
5125
5126 if (mode == NO_SHIFT_RESTRICT
5127 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5128 {
5129 inst.operands[i].imm = reg;
5130 inst.operands[i].immisreg = 1;
5131 }
5132 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5133 return FAIL;
5134 }
5135 inst.operands[i].shift_kind = shift;
5136 inst.operands[i].shifted = 1;
5137 *str = p;
5138 return SUCCESS;
5139 }
5140
5141 /* Parse a <shifter_operand> for an ARM data processing instruction:
5142
5143 #<immediate>
5144 #<immediate>, <rotate>
5145 <Rm>
5146 <Rm>, <shift>
5147
5148 where <shift> is defined by parse_shift above, and <rotate> is a
5149 multiple of 2 between 0 and 30. Validation of immediate operands
5150 is deferred to md_apply_fix. */
5151
5152 static int
5153 parse_shifter_operand (char **str, int i)
5154 {
5155 int value;
5156 expressionS exp;
5157
5158 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5159 {
5160 inst.operands[i].reg = value;
5161 inst.operands[i].isreg = 1;
5162
5163 /* parse_shift will override this if appropriate */
5164 inst.reloc.exp.X_op = O_constant;
5165 inst.reloc.exp.X_add_number = 0;
5166
5167 if (skip_past_comma (str) == FAIL)
5168 return SUCCESS;
5169
5170 /* Shift operation on register. */
5171 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5172 }
5173
5174 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5175 return FAIL;
5176
5177 if (skip_past_comma (str) == SUCCESS)
5178 {
5179 /* #x, y -- ie explicit rotation by Y. */
5180 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5181 return FAIL;
5182
5183 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5184 {
5185 inst.error = _("constant expression expected");
5186 return FAIL;
5187 }
5188
5189 value = exp.X_add_number;
5190 if (value < 0 || value > 30 || value % 2 != 0)
5191 {
5192 inst.error = _("invalid rotation");
5193 return FAIL;
5194 }
5195 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5196 {
5197 inst.error = _("invalid constant");
5198 return FAIL;
5199 }
5200
5201 /* Encode as specified. */
5202 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5203 return SUCCESS;
5204 }
5205
5206 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5207 inst.reloc.pc_rel = 0;
5208 return SUCCESS;
5209 }
5210
5211 /* Group relocation information. Each entry in the table contains the
5212 textual name of the relocation as may appear in assembler source
5213 and must end with a colon.
5214 Along with this textual name are the relocation codes to be used if
5215 the corresponding instruction is an ALU instruction (ADD or SUB only),
5216 an LDR, an LDRS, or an LDC. */
5217
5218 struct group_reloc_table_entry
5219 {
5220 const char *name;
5221 int alu_code;
5222 int ldr_code;
5223 int ldrs_code;
5224 int ldc_code;
5225 };
5226
5227 typedef enum
5228 {
5229 /* Varieties of non-ALU group relocation. */
5230
5231 GROUP_LDR,
5232 GROUP_LDRS,
5233 GROUP_LDC
5234 } group_reloc_type;
5235
5236 static struct group_reloc_table_entry group_reloc_table[] =
5237 { /* Program counter relative: */
5238 { "pc_g0_nc",
5239 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5240 0, /* LDR */
5241 0, /* LDRS */
5242 0 }, /* LDC */
5243 { "pc_g0",
5244 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5245 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5246 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5247 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5248 { "pc_g1_nc",
5249 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5250 0, /* LDR */
5251 0, /* LDRS */
5252 0 }, /* LDC */
5253 { "pc_g1",
5254 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5255 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5256 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5257 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5258 { "pc_g2",
5259 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5260 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5261 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5262 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5263 /* Section base relative */
5264 { "sb_g0_nc",
5265 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5266 0, /* LDR */
5267 0, /* LDRS */
5268 0 }, /* LDC */
5269 { "sb_g0",
5270 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5271 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5272 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5273 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5274 { "sb_g1_nc",
5275 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5276 0, /* LDR */
5277 0, /* LDRS */
5278 0 }, /* LDC */
5279 { "sb_g1",
5280 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5281 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5282 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5283 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5284 { "sb_g2",
5285 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5286 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5287 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5288 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5289 /* Absolute thumb alu relocations. */
5290 { "lower0_7",
5291 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5292 0, /* LDR. */
5293 0, /* LDRS. */
5294 0 }, /* LDC. */
5295 { "lower8_15",
5296 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5297 0, /* LDR. */
5298 0, /* LDRS. */
5299 0 }, /* LDC. */
5300 { "upper0_7",
5301 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5302 0, /* LDR. */
5303 0, /* LDRS. */
5304 0 }, /* LDC. */
5305 { "upper8_15",
5306 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5307 0, /* LDR. */
5308 0, /* LDRS. */
5309 0 } }; /* LDC. */
5310
5311 /* Given the address of a pointer pointing to the textual name of a group
5312 relocation as may appear in assembler source, attempt to find its details
5313 in group_reloc_table. The pointer will be updated to the character after
5314 the trailing colon. On failure, FAIL will be returned; SUCCESS
5315 otherwise. On success, *entry will be updated to point at the relevant
5316 group_reloc_table entry. */
5317
5318 static int
5319 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5320 {
5321 unsigned int i;
5322 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5323 {
5324 int length = strlen (group_reloc_table[i].name);
5325
5326 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5327 && (*str)[length] == ':')
5328 {
5329 *out = &group_reloc_table[i];
5330 *str += (length + 1);
5331 return SUCCESS;
5332 }
5333 }
5334
5335 return FAIL;
5336 }
5337
5338 /* Parse a <shifter_operand> for an ARM data processing instruction
5339 (as for parse_shifter_operand) where group relocations are allowed:
5340
5341 #<immediate>
5342 #<immediate>, <rotate>
5343 #:<group_reloc>:<expression>
5344 <Rm>
5345 <Rm>, <shift>
5346
5347 where <group_reloc> is one of the strings defined in group_reloc_table.
5348 The hashes are optional.
5349
5350 Everything else is as for parse_shifter_operand. */
5351
5352 static parse_operand_result
5353 parse_shifter_operand_group_reloc (char **str, int i)
5354 {
5355 /* Determine if we have the sequence of characters #: or just :
5356 coming next. If we do, then we check for a group relocation.
5357 If we don't, punt the whole lot to parse_shifter_operand. */
5358
5359 if (((*str)[0] == '#' && (*str)[1] == ':')
5360 || (*str)[0] == ':')
5361 {
5362 struct group_reloc_table_entry *entry;
5363
5364 if ((*str)[0] == '#')
5365 (*str) += 2;
5366 else
5367 (*str)++;
5368
5369 /* Try to parse a group relocation. Anything else is an error. */
5370 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5371 {
5372 inst.error = _("unknown group relocation");
5373 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5374 }
5375
5376 /* We now have the group relocation table entry corresponding to
5377 the name in the assembler source. Next, we parse the expression. */
5378 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5379 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5380
5381 /* Record the relocation type (always the ALU variant here). */
5382 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5383 gas_assert (inst.reloc.type != 0);
5384
5385 return PARSE_OPERAND_SUCCESS;
5386 }
5387 else
5388 return parse_shifter_operand (str, i) == SUCCESS
5389 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5390
5391 /* Never reached. */
5392 }
5393
5394 /* Parse a Neon alignment expression. Information is written to
5395 inst.operands[i]. We assume the initial ':' has been skipped.
5396
5397 align .imm = align << 8, .immisalign=1, .preind=0 */
5398 static parse_operand_result
5399 parse_neon_alignment (char **str, int i)
5400 {
5401 char *p = *str;
5402 expressionS exp;
5403
5404 my_get_expression (&exp, &p, GE_NO_PREFIX);
5405
5406 if (exp.X_op != O_constant)
5407 {
5408 inst.error = _("alignment must be constant");
5409 return PARSE_OPERAND_FAIL;
5410 }
5411
5412 inst.operands[i].imm = exp.X_add_number << 8;
5413 inst.operands[i].immisalign = 1;
5414 /* Alignments are not pre-indexes. */
5415 inst.operands[i].preind = 0;
5416
5417 *str = p;
5418 return PARSE_OPERAND_SUCCESS;
5419 }
5420
5421 /* Parse all forms of an ARM address expression. Information is written
5422 to inst.operands[i] and/or inst.reloc.
5423
5424 Preindexed addressing (.preind=1):
5425
5426 [Rn, #offset] .reg=Rn .reloc.exp=offset
5427 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5429 .shift_kind=shift .reloc.exp=shift_imm
5430
5431 These three may have a trailing ! which causes .writeback to be set also.
5432
5433 Postindexed addressing (.postind=1, .writeback=1):
5434
5435 [Rn], #offset .reg=Rn .reloc.exp=offset
5436 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5438 .shift_kind=shift .reloc.exp=shift_imm
5439
5440 Unindexed addressing (.preind=0, .postind=0):
5441
5442 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5443
5444 Other:
5445
5446 [Rn]{!} shorthand for [Rn,#0]{!}
5447 =immediate .isreg=0 .reloc.exp=immediate
5448 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5449
5450 It is the caller's responsibility to check for addressing modes not
5451 supported by the instruction, and to set inst.reloc.type. */
5452
5453 static parse_operand_result
5454 parse_address_main (char **str, int i, int group_relocations,
5455 group_reloc_type group_type)
5456 {
5457 char *p = *str;
5458 int reg;
5459
5460 if (skip_past_char (&p, '[') == FAIL)
5461 {
5462 if (skip_past_char (&p, '=') == FAIL)
5463 {
5464 /* Bare address - translate to PC-relative offset. */
5465 inst.reloc.pc_rel = 1;
5466 inst.operands[i].reg = REG_PC;
5467 inst.operands[i].isreg = 1;
5468 inst.operands[i].preind = 1;
5469
5470 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5471 return PARSE_OPERAND_FAIL;
5472 }
5473 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5474 /*allow_symbol_p=*/TRUE))
5475 return PARSE_OPERAND_FAIL;
5476
5477 *str = p;
5478 return PARSE_OPERAND_SUCCESS;
5479 }
5480
5481 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5482 skip_whitespace (p);
5483
5484 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5485 {
5486 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5487 return PARSE_OPERAND_FAIL;
5488 }
5489 inst.operands[i].reg = reg;
5490 inst.operands[i].isreg = 1;
5491
5492 if (skip_past_comma (&p) == SUCCESS)
5493 {
5494 inst.operands[i].preind = 1;
5495
5496 if (*p == '+') p++;
5497 else if (*p == '-') p++, inst.operands[i].negative = 1;
5498
5499 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5500 {
5501 inst.operands[i].imm = reg;
5502 inst.operands[i].immisreg = 1;
5503
5504 if (skip_past_comma (&p) == SUCCESS)
5505 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5506 return PARSE_OPERAND_FAIL;
5507 }
5508 else if (skip_past_char (&p, ':') == SUCCESS)
5509 {
5510 /* FIXME: '@' should be used here, but it's filtered out by generic
5511 code before we get to see it here. This may be subject to
5512 change. */
5513 parse_operand_result result = parse_neon_alignment (&p, i);
5514
5515 if (result != PARSE_OPERAND_SUCCESS)
5516 return result;
5517 }
5518 else
5519 {
5520 if (inst.operands[i].negative)
5521 {
5522 inst.operands[i].negative = 0;
5523 p--;
5524 }
5525
5526 if (group_relocations
5527 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5528 {
5529 struct group_reloc_table_entry *entry;
5530
5531 /* Skip over the #: or : sequence. */
5532 if (*p == '#')
5533 p += 2;
5534 else
5535 p++;
5536
5537 /* Try to parse a group relocation. Anything else is an
5538 error. */
5539 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5540 {
5541 inst.error = _("unknown group relocation");
5542 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5543 }
5544
5545 /* We now have the group relocation table entry corresponding to
5546 the name in the assembler source. Next, we parse the
5547 expression. */
5548 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5549 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5550
5551 /* Record the relocation type. */
5552 switch (group_type)
5553 {
5554 case GROUP_LDR:
5555 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5556 break;
5557
5558 case GROUP_LDRS:
5559 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5560 break;
5561
5562 case GROUP_LDC:
5563 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5564 break;
5565
5566 default:
5567 gas_assert (0);
5568 }
5569
5570 if (inst.reloc.type == 0)
5571 {
5572 inst.error = _("this group relocation is not allowed on this instruction");
5573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5574 }
5575 }
5576 else
5577 {
5578 char *q = p;
5579 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5580 return PARSE_OPERAND_FAIL;
5581 /* If the offset is 0, find out if it's a +0 or -0. */
5582 if (inst.reloc.exp.X_op == O_constant
5583 && inst.reloc.exp.X_add_number == 0)
5584 {
5585 skip_whitespace (q);
5586 if (*q == '#')
5587 {
5588 q++;
5589 skip_whitespace (q);
5590 }
5591 if (*q == '-')
5592 inst.operands[i].negative = 1;
5593 }
5594 }
5595 }
5596 }
5597 else if (skip_past_char (&p, ':') == SUCCESS)
5598 {
5599 /* FIXME: '@' should be used here, but it's filtered out by generic code
5600 before we get to see it here. This may be subject to change. */
5601 parse_operand_result result = parse_neon_alignment (&p, i);
5602
5603 if (result != PARSE_OPERAND_SUCCESS)
5604 return result;
5605 }
5606
5607 if (skip_past_char (&p, ']') == FAIL)
5608 {
5609 inst.error = _("']' expected");
5610 return PARSE_OPERAND_FAIL;
5611 }
5612
5613 if (skip_past_char (&p, '!') == SUCCESS)
5614 inst.operands[i].writeback = 1;
5615
5616 else if (skip_past_comma (&p) == SUCCESS)
5617 {
5618 if (skip_past_char (&p, '{') == SUCCESS)
5619 {
5620 /* [Rn], {expr} - unindexed, with option */
5621 if (parse_immediate (&p, &inst.operands[i].imm,
5622 0, 255, TRUE) == FAIL)
5623 return PARSE_OPERAND_FAIL;
5624
5625 if (skip_past_char (&p, '}') == FAIL)
5626 {
5627 inst.error = _("'}' expected at end of 'option' field");
5628 return PARSE_OPERAND_FAIL;
5629 }
5630 if (inst.operands[i].preind)
5631 {
5632 inst.error = _("cannot combine index with option");
5633 return PARSE_OPERAND_FAIL;
5634 }
5635 *str = p;
5636 return PARSE_OPERAND_SUCCESS;
5637 }
5638 else
5639 {
5640 inst.operands[i].postind = 1;
5641 inst.operands[i].writeback = 1;
5642
5643 if (inst.operands[i].preind)
5644 {
5645 inst.error = _("cannot combine pre- and post-indexing");
5646 return PARSE_OPERAND_FAIL;
5647 }
5648
5649 if (*p == '+') p++;
5650 else if (*p == '-') p++, inst.operands[i].negative = 1;
5651
5652 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5653 {
5654 /* We might be using the immediate for alignment already. If we
5655 are, OR the register number into the low-order bits. */
5656 if (inst.operands[i].immisalign)
5657 inst.operands[i].imm |= reg;
5658 else
5659 inst.operands[i].imm = reg;
5660 inst.operands[i].immisreg = 1;
5661
5662 if (skip_past_comma (&p) == SUCCESS)
5663 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5664 return PARSE_OPERAND_FAIL;
5665 }
5666 else
5667 {
5668 char *q = p;
5669 if (inst.operands[i].negative)
5670 {
5671 inst.operands[i].negative = 0;
5672 p--;
5673 }
5674 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5675 return PARSE_OPERAND_FAIL;
5676 /* If the offset is 0, find out if it's a +0 or -0. */
5677 if (inst.reloc.exp.X_op == O_constant
5678 && inst.reloc.exp.X_add_number == 0)
5679 {
5680 skip_whitespace (q);
5681 if (*q == '#')
5682 {
5683 q++;
5684 skip_whitespace (q);
5685 }
5686 if (*q == '-')
5687 inst.operands[i].negative = 1;
5688 }
5689 }
5690 }
5691 }
5692
5693 /* If at this point neither .preind nor .postind is set, we have a
5694 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5695 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5696 {
5697 inst.operands[i].preind = 1;
5698 inst.reloc.exp.X_op = O_constant;
5699 inst.reloc.exp.X_add_number = 0;
5700 }
5701 *str = p;
5702 return PARSE_OPERAND_SUCCESS;
5703 }
5704
5705 static int
5706 parse_address (char **str, int i)
5707 {
5708 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5709 ? SUCCESS : FAIL;
5710 }
5711
5712 static parse_operand_result
5713 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5714 {
5715 return parse_address_main (str, i, 1, type);
5716 }
5717
5718 /* Parse an operand for a MOVW or MOVT instruction. */
5719 static int
5720 parse_half (char **str)
5721 {
5722 char * p;
5723
5724 p = *str;
5725 skip_past_char (&p, '#');
5726 if (strncasecmp (p, ":lower16:", 9) == 0)
5727 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5728 else if (strncasecmp (p, ":upper16:", 9) == 0)
5729 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5730
5731 if (inst.reloc.type != BFD_RELOC_UNUSED)
5732 {
5733 p += 9;
5734 skip_whitespace (p);
5735 }
5736
5737 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5738 return FAIL;
5739
5740 if (inst.reloc.type == BFD_RELOC_UNUSED)
5741 {
5742 if (inst.reloc.exp.X_op != O_constant)
5743 {
5744 inst.error = _("constant expression expected");
5745 return FAIL;
5746 }
5747 if (inst.reloc.exp.X_add_number < 0
5748 || inst.reloc.exp.X_add_number > 0xffff)
5749 {
5750 inst.error = _("immediate value out of range");
5751 return FAIL;
5752 }
5753 }
5754 *str = p;
5755 return SUCCESS;
5756 }
5757
5758 /* Miscellaneous. */
5759
5760 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5761 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5762 static int
5763 parse_psr (char **str, bfd_boolean lhs)
5764 {
5765 char *p;
5766 unsigned long psr_field;
5767 const struct asm_psr *psr;
5768 char *start;
5769 bfd_boolean is_apsr = FALSE;
5770 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5771
5772 /* PR gas/12698: If the user has specified -march=all then m_profile will
5773 be TRUE, but we want to ignore it in this case as we are building for any
5774 CPU type, including non-m variants. */
5775 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5776 m_profile = FALSE;
5777
5778 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5779 feature for ease of use and backwards compatibility. */
5780 p = *str;
5781 if (strncasecmp (p, "SPSR", 4) == 0)
5782 {
5783 if (m_profile)
5784 goto unsupported_psr;
5785
5786 psr_field = SPSR_BIT;
5787 }
5788 else if (strncasecmp (p, "CPSR", 4) == 0)
5789 {
5790 if (m_profile)
5791 goto unsupported_psr;
5792
5793 psr_field = 0;
5794 }
5795 else if (strncasecmp (p, "APSR", 4) == 0)
5796 {
5797 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5798 and ARMv7-R architecture CPUs. */
5799 is_apsr = TRUE;
5800 psr_field = 0;
5801 }
5802 else if (m_profile)
5803 {
5804 start = p;
5805 do
5806 p++;
5807 while (ISALNUM (*p) || *p == '_');
5808
5809 if (strncasecmp (start, "iapsr", 5) == 0
5810 || strncasecmp (start, "eapsr", 5) == 0
5811 || strncasecmp (start, "xpsr", 4) == 0
5812 || strncasecmp (start, "psr", 3) == 0)
5813 p = start + strcspn (start, "rR") + 1;
5814
5815 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5816 p - start);
5817
5818 if (!psr)
5819 return FAIL;
5820
5821 /* If APSR is being written, a bitfield may be specified. Note that
5822 APSR itself is handled above. */
5823 if (psr->field <= 3)
5824 {
5825 psr_field = psr->field;
5826 is_apsr = TRUE;
5827 goto check_suffix;
5828 }
5829
5830 *str = p;
5831 /* M-profile MSR instructions have the mask field set to "10", except
5832 *PSR variants which modify APSR, which may use a different mask (and
5833 have been handled already). Do that by setting the PSR_f field
5834 here. */
5835 return psr->field | (lhs ? PSR_f : 0);
5836 }
5837 else
5838 goto unsupported_psr;
5839
5840 p += 4;
5841 check_suffix:
5842 if (*p == '_')
5843 {
5844 /* A suffix follows. */
5845 p++;
5846 start = p;
5847
5848 do
5849 p++;
5850 while (ISALNUM (*p) || *p == '_');
5851
5852 if (is_apsr)
5853 {
5854 /* APSR uses a notation for bits, rather than fields. */
5855 unsigned int nzcvq_bits = 0;
5856 unsigned int g_bit = 0;
5857 char *bit;
5858
5859 for (bit = start; bit != p; bit++)
5860 {
5861 switch (TOLOWER (*bit))
5862 {
5863 case 'n':
5864 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5865 break;
5866
5867 case 'z':
5868 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5869 break;
5870
5871 case 'c':
5872 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5873 break;
5874
5875 case 'v':
5876 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5877 break;
5878
5879 case 'q':
5880 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5881 break;
5882
5883 case 'g':
5884 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5885 break;
5886
5887 default:
5888 inst.error = _("unexpected bit specified after APSR");
5889 return FAIL;
5890 }
5891 }
5892
5893 if (nzcvq_bits == 0x1f)
5894 psr_field |= PSR_f;
5895
5896 if (g_bit == 0x1)
5897 {
5898 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5899 {
5900 inst.error = _("selected processor does not "
5901 "support DSP extension");
5902 return FAIL;
5903 }
5904
5905 psr_field |= PSR_s;
5906 }
5907
5908 if ((nzcvq_bits & 0x20) != 0
5909 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5910 || (g_bit & 0x2) != 0)
5911 {
5912 inst.error = _("bad bitmask specified after APSR");
5913 return FAIL;
5914 }
5915 }
5916 else
5917 {
5918 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5919 p - start);
5920 if (!psr)
5921 goto error;
5922
5923 psr_field |= psr->field;
5924 }
5925 }
5926 else
5927 {
5928 if (ISALNUM (*p))
5929 goto error; /* Garbage after "[CS]PSR". */
5930
5931 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5932 is deprecated, but allow it anyway. */
5933 if (is_apsr && lhs)
5934 {
5935 psr_field |= PSR_f;
5936 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5937 "deprecated"));
5938 }
5939 else if (!m_profile)
5940 /* These bits are never right for M-profile devices: don't set them
5941 (only code paths which read/write APSR reach here). */
5942 psr_field |= (PSR_c | PSR_f);
5943 }
5944 *str = p;
5945 return psr_field;
5946
5947 unsupported_psr:
5948 inst.error = _("selected processor does not support requested special "
5949 "purpose register");
5950 return FAIL;
5951
5952 error:
5953 inst.error = _("flag for {c}psr instruction expected");
5954 return FAIL;
5955 }
5956
5957 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5958 value suitable for splatting into the AIF field of the instruction. */
5959
5960 static int
5961 parse_cps_flags (char **str)
5962 {
5963 int val = 0;
5964 int saw_a_flag = 0;
5965 char *s = *str;
5966
5967 for (;;)
5968 switch (*s++)
5969 {
5970 case '\0': case ',':
5971 goto done;
5972
5973 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5974 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5975 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5976
5977 default:
5978 inst.error = _("unrecognized CPS flag");
5979 return FAIL;
5980 }
5981
5982 done:
5983 if (saw_a_flag == 0)
5984 {
5985 inst.error = _("missing CPS flags");
5986 return FAIL;
5987 }
5988
5989 *str = s - 1;
5990 return val;
5991 }
5992
5993 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5994 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5995
5996 static int
5997 parse_endian_specifier (char **str)
5998 {
5999 int little_endian;
6000 char *s = *str;
6001
6002 if (strncasecmp (s, "BE", 2))
6003 little_endian = 0;
6004 else if (strncasecmp (s, "LE", 2))
6005 little_endian = 1;
6006 else
6007 {
6008 inst.error = _("valid endian specifiers are be or le");
6009 return FAIL;
6010 }
6011
6012 if (ISALNUM (s[2]) || s[2] == '_')
6013 {
6014 inst.error = _("valid endian specifiers are be or le");
6015 return FAIL;
6016 }
6017
6018 *str = s + 2;
6019 return little_endian;
6020 }
6021
6022 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6023 value suitable for poking into the rotate field of an sxt or sxta
6024 instruction, or FAIL on error. */
6025
6026 static int
6027 parse_ror (char **str)
6028 {
6029 int rot;
6030 char *s = *str;
6031
6032 if (strncasecmp (s, "ROR", 3) == 0)
6033 s += 3;
6034 else
6035 {
6036 inst.error = _("missing rotation field after comma");
6037 return FAIL;
6038 }
6039
6040 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6041 return FAIL;
6042
6043 switch (rot)
6044 {
6045 case 0: *str = s; return 0x0;
6046 case 8: *str = s; return 0x1;
6047 case 16: *str = s; return 0x2;
6048 case 24: *str = s; return 0x3;
6049
6050 default:
6051 inst.error = _("rotation can only be 0, 8, 16, or 24");
6052 return FAIL;
6053 }
6054 }
6055
6056 /* Parse a conditional code (from conds[] below). The value returned is in the
6057 range 0 .. 14, or FAIL. */
6058 static int
6059 parse_cond (char **str)
6060 {
6061 char *q;
6062 const struct asm_cond *c;
6063 int n;
6064 /* Condition codes are always 2 characters, so matching up to
6065 3 characters is sufficient. */
6066 char cond[3];
6067
6068 q = *str;
6069 n = 0;
6070 while (ISALPHA (*q) && n < 3)
6071 {
6072 cond[n] = TOLOWER (*q);
6073 q++;
6074 n++;
6075 }
6076
6077 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6078 if (!c)
6079 {
6080 inst.error = _("condition required");
6081 return FAIL;
6082 }
6083
6084 *str = q;
6085 return c->value;
6086 }
6087
6088 /* If the given feature available in the selected CPU, mark it as used.
6089 Returns TRUE iff feature is available. */
6090 static bfd_boolean
6091 mark_feature_used (const arm_feature_set *feature)
6092 {
6093 /* Ensure the option is valid on the current architecture. */
6094 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6095 return FALSE;
6096
6097 /* Add the appropriate architecture feature for the barrier option used.
6098 */
6099 if (thumb_mode)
6100 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6101 else
6102 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6103
6104 return TRUE;
6105 }
6106
6107 /* Parse an option for a barrier instruction. Returns the encoding for the
6108 option, or FAIL. */
6109 static int
6110 parse_barrier (char **str)
6111 {
6112 char *p, *q;
6113 const struct asm_barrier_opt *o;
6114
6115 p = q = *str;
6116 while (ISALPHA (*q))
6117 q++;
6118
6119 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6120 q - p);
6121 if (!o)
6122 return FAIL;
6123
6124 if (!mark_feature_used (&o->arch))
6125 return FAIL;
6126
6127 *str = q;
6128 return o->value;
6129 }
6130
6131 /* Parse the operands of a table branch instruction. Similar to a memory
6132 operand. */
6133 static int
6134 parse_tb (char **str)
6135 {
6136 char * p = *str;
6137 int reg;
6138
6139 if (skip_past_char (&p, '[') == FAIL)
6140 {
6141 inst.error = _("'[' expected");
6142 return FAIL;
6143 }
6144
6145 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6146 {
6147 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6148 return FAIL;
6149 }
6150 inst.operands[0].reg = reg;
6151
6152 if (skip_past_comma (&p) == FAIL)
6153 {
6154 inst.error = _("',' expected");
6155 return FAIL;
6156 }
6157
6158 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6159 {
6160 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6161 return FAIL;
6162 }
6163 inst.operands[0].imm = reg;
6164
6165 if (skip_past_comma (&p) == SUCCESS)
6166 {
6167 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6168 return FAIL;
6169 if (inst.reloc.exp.X_add_number != 1)
6170 {
6171 inst.error = _("invalid shift");
6172 return FAIL;
6173 }
6174 inst.operands[0].shifted = 1;
6175 }
6176
6177 if (skip_past_char (&p, ']') == FAIL)
6178 {
6179 inst.error = _("']' expected");
6180 return FAIL;
6181 }
6182 *str = p;
6183 return SUCCESS;
6184 }
6185
6186 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6187 information on the types the operands can take and how they are encoded.
6188 Up to four operands may be read; this function handles setting the
6189 ".present" field for each read operand itself.
6190 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6191 else returns FAIL. */
6192
6193 static int
6194 parse_neon_mov (char **str, int *which_operand)
6195 {
6196 int i = *which_operand, val;
6197 enum arm_reg_type rtype;
6198 char *ptr = *str;
6199 struct neon_type_el optype;
6200
6201 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6202 {
6203 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6204 inst.operands[i].reg = val;
6205 inst.operands[i].isscalar = 1;
6206 inst.operands[i].vectype = optype;
6207 inst.operands[i++].present = 1;
6208
6209 if (skip_past_comma (&ptr) == FAIL)
6210 goto wanted_comma;
6211
6212 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6213 goto wanted_arm;
6214
6215 inst.operands[i].reg = val;
6216 inst.operands[i].isreg = 1;
6217 inst.operands[i].present = 1;
6218 }
6219 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6220 != FAIL)
6221 {
6222 /* Cases 0, 1, 2, 3, 5 (D only). */
6223 if (skip_past_comma (&ptr) == FAIL)
6224 goto wanted_comma;
6225
6226 inst.operands[i].reg = val;
6227 inst.operands[i].isreg = 1;
6228 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6229 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6230 inst.operands[i].isvec = 1;
6231 inst.operands[i].vectype = optype;
6232 inst.operands[i++].present = 1;
6233
6234 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6235 {
6236 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6237 Case 13: VMOV <Sd>, <Rm> */
6238 inst.operands[i].reg = val;
6239 inst.operands[i].isreg = 1;
6240 inst.operands[i].present = 1;
6241
6242 if (rtype == REG_TYPE_NQ)
6243 {
6244 first_error (_("can't use Neon quad register here"));
6245 return FAIL;
6246 }
6247 else if (rtype != REG_TYPE_VFS)
6248 {
6249 i++;
6250 if (skip_past_comma (&ptr) == FAIL)
6251 goto wanted_comma;
6252 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6253 goto wanted_arm;
6254 inst.operands[i].reg = val;
6255 inst.operands[i].isreg = 1;
6256 inst.operands[i].present = 1;
6257 }
6258 }
6259 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6260 &optype)) != FAIL)
6261 {
6262 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6263 Case 1: VMOV<c><q> <Dd>, <Dm>
6264 Case 8: VMOV.F32 <Sd>, <Sm>
6265 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6266
6267 inst.operands[i].reg = val;
6268 inst.operands[i].isreg = 1;
6269 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6270 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6271 inst.operands[i].isvec = 1;
6272 inst.operands[i].vectype = optype;
6273 inst.operands[i].present = 1;
6274
6275 if (skip_past_comma (&ptr) == SUCCESS)
6276 {
6277 /* Case 15. */
6278 i++;
6279
6280 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6281 goto wanted_arm;
6282
6283 inst.operands[i].reg = val;
6284 inst.operands[i].isreg = 1;
6285 inst.operands[i++].present = 1;
6286
6287 if (skip_past_comma (&ptr) == FAIL)
6288 goto wanted_comma;
6289
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292
6293 inst.operands[i].reg = val;
6294 inst.operands[i].isreg = 1;
6295 inst.operands[i].present = 1;
6296 }
6297 }
6298 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6299 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6300 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6301 Case 10: VMOV.F32 <Sd>, #<imm>
6302 Case 11: VMOV.F64 <Dd>, #<imm> */
6303 inst.operands[i].immisfloat = 1;
6304 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6305 == SUCCESS)
6306 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6307 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6308 ;
6309 else
6310 {
6311 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6312 return FAIL;
6313 }
6314 }
6315 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6316 {
6317 /* Cases 6, 7. */
6318 inst.operands[i].reg = val;
6319 inst.operands[i].isreg = 1;
6320 inst.operands[i++].present = 1;
6321
6322 if (skip_past_comma (&ptr) == FAIL)
6323 goto wanted_comma;
6324
6325 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6326 {
6327 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6328 inst.operands[i].reg = val;
6329 inst.operands[i].isscalar = 1;
6330 inst.operands[i].present = 1;
6331 inst.operands[i].vectype = optype;
6332 }
6333 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6334 {
6335 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6336 inst.operands[i].reg = val;
6337 inst.operands[i].isreg = 1;
6338 inst.operands[i++].present = 1;
6339
6340 if (skip_past_comma (&ptr) == FAIL)
6341 goto wanted_comma;
6342
6343 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6344 == FAIL)
6345 {
6346 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6347 return FAIL;
6348 }
6349
6350 inst.operands[i].reg = val;
6351 inst.operands[i].isreg = 1;
6352 inst.operands[i].isvec = 1;
6353 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6354 inst.operands[i].vectype = optype;
6355 inst.operands[i].present = 1;
6356
6357 if (rtype == REG_TYPE_VFS)
6358 {
6359 /* Case 14. */
6360 i++;
6361 if (skip_past_comma (&ptr) == FAIL)
6362 goto wanted_comma;
6363 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6364 &optype)) == FAIL)
6365 {
6366 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6367 return FAIL;
6368 }
6369 inst.operands[i].reg = val;
6370 inst.operands[i].isreg = 1;
6371 inst.operands[i].isvec = 1;
6372 inst.operands[i].issingle = 1;
6373 inst.operands[i].vectype = optype;
6374 inst.operands[i].present = 1;
6375 }
6376 }
6377 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6378 != FAIL)
6379 {
6380 /* Case 13. */
6381 inst.operands[i].reg = val;
6382 inst.operands[i].isreg = 1;
6383 inst.operands[i].isvec = 1;
6384 inst.operands[i].issingle = 1;
6385 inst.operands[i].vectype = optype;
6386 inst.operands[i].present = 1;
6387 }
6388 }
6389 else
6390 {
6391 first_error (_("parse error"));
6392 return FAIL;
6393 }
6394
6395 /* Successfully parsed the operands. Update args. */
6396 *which_operand = i;
6397 *str = ptr;
6398 return SUCCESS;
6399
6400 wanted_comma:
6401 first_error (_("expected comma"));
6402 return FAIL;
6403
6404 wanted_arm:
6405 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6406 return FAIL;
6407 }
6408
6409 /* Use this macro when the operand constraints are different
6410 for ARM and THUMB (e.g. ldrd). */
6411 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6412 ((arm_operand) | ((thumb_operand) << 16))
6413
6414 /* Matcher codes for parse_operands. */
6415 enum operand_parse_code
6416 {
6417 OP_stop, /* end of line */
6418
6419 OP_RR, /* ARM register */
6420 OP_RRnpc, /* ARM register, not r15 */
6421 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6422 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6423 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6424 optional trailing ! */
6425 OP_RRw, /* ARM register, not r15, optional trailing ! */
6426 OP_RCP, /* Coprocessor number */
6427 OP_RCN, /* Coprocessor register */
6428 OP_RF, /* FPA register */
6429 OP_RVS, /* VFP single precision register */
6430 OP_RVD, /* VFP double precision register (0..15) */
6431 OP_RND, /* Neon double precision register (0..31) */
6432 OP_RNQ, /* Neon quad precision register */
6433 OP_RVSD, /* VFP single or double precision register */
6434 OP_RNDQ, /* Neon double or quad precision register */
6435 OP_RNSDQ, /* Neon single, double or quad precision register */
6436 OP_RNSC, /* Neon scalar D[X] */
6437 OP_RVC, /* VFP control register */
6438 OP_RMF, /* Maverick F register */
6439 OP_RMD, /* Maverick D register */
6440 OP_RMFX, /* Maverick FX register */
6441 OP_RMDX, /* Maverick DX register */
6442 OP_RMAX, /* Maverick AX register */
6443 OP_RMDS, /* Maverick DSPSC register */
6444 OP_RIWR, /* iWMMXt wR register */
6445 OP_RIWC, /* iWMMXt wC register */
6446 OP_RIWG, /* iWMMXt wCG register */
6447 OP_RXA, /* XScale accumulator register */
6448
6449 OP_REGLST, /* ARM register list */
6450 OP_VRSLST, /* VFP single-precision register list */
6451 OP_VRDLST, /* VFP double-precision register list */
6452 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6453 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6454 OP_NSTRLST, /* Neon element/structure list */
6455
6456 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6457 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6458 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6459 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6460 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6461 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6462 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6463 OP_VMOV, /* Neon VMOV operands. */
6464 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6465 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6466 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6467
6468 OP_I0, /* immediate zero */
6469 OP_I7, /* immediate value 0 .. 7 */
6470 OP_I15, /* 0 .. 15 */
6471 OP_I16, /* 1 .. 16 */
6472 OP_I16z, /* 0 .. 16 */
6473 OP_I31, /* 0 .. 31 */
6474 OP_I31w, /* 0 .. 31, optional trailing ! */
6475 OP_I32, /* 1 .. 32 */
6476 OP_I32z, /* 0 .. 32 */
6477 OP_I63, /* 0 .. 63 */
6478 OP_I63s, /* -64 .. 63 */
6479 OP_I64, /* 1 .. 64 */
6480 OP_I64z, /* 0 .. 64 */
6481 OP_I255, /* 0 .. 255 */
6482
6483 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6484 OP_I7b, /* 0 .. 7 */
6485 OP_I15b, /* 0 .. 15 */
6486 OP_I31b, /* 0 .. 31 */
6487
6488 OP_SH, /* shifter operand */
6489 OP_SHG, /* shifter operand with possible group relocation */
6490 OP_ADDR, /* Memory address expression (any mode) */
6491 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6492 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6493 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6494 OP_EXP, /* arbitrary expression */
6495 OP_EXPi, /* same, with optional immediate prefix */
6496 OP_EXPr, /* same, with optional relocation suffix */
6497 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6498
6499 OP_CPSF, /* CPS flags */
6500 OP_ENDI, /* Endianness specifier */
6501 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6502 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6503 OP_COND, /* conditional code */
6504 OP_TB, /* Table branch. */
6505
6506 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6507
6508 OP_RRnpc_I0, /* ARM register or literal 0 */
6509 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6510 OP_RR_EXi, /* ARM register or expression with imm prefix */
6511 OP_RF_IF, /* FPA register or immediate */
6512 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6513 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6514
6515 /* Optional operands. */
6516 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6517 OP_oI31b, /* 0 .. 31 */
6518 OP_oI32b, /* 1 .. 32 */
6519 OP_oI32z, /* 0 .. 32 */
6520 OP_oIffffb, /* 0 .. 65535 */
6521 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6522
6523 OP_oRR, /* ARM register */
6524 OP_oRRnpc, /* ARM register, not the PC */
6525 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6526 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6527 OP_oRND, /* Optional Neon double precision register */
6528 OP_oRNQ, /* Optional Neon quad precision register */
6529 OP_oRNDQ, /* Optional Neon double or quad precision register */
6530 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6531 OP_oSHll, /* LSL immediate */
6532 OP_oSHar, /* ASR immediate */
6533 OP_oSHllar, /* LSL or ASR immediate */
6534 OP_oROR, /* ROR 0/8/16/24 */
6535 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6536
6537 /* Some pre-defined mixed (ARM/THUMB) operands. */
6538 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6539 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6540 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6541
6542 OP_FIRST_OPTIONAL = OP_oI7b
6543 };
6544
6545 /* Generic instruction operand parser. This does no encoding and no
6546 semantic validation; it merely squirrels values away in the inst
6547 structure. Returns SUCCESS or FAIL depending on whether the
6548 specified grammar matched. */
6549 static int
6550 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6551 {
6552 unsigned const int *upat = pattern;
6553 char *backtrack_pos = 0;
6554 const char *backtrack_error = 0;
6555 int i, val = 0, backtrack_index = 0;
6556 enum arm_reg_type rtype;
6557 parse_operand_result result;
6558 unsigned int op_parse_code;
6559
6560 #define po_char_or_fail(chr) \
6561 do \
6562 { \
6563 if (skip_past_char (&str, chr) == FAIL) \
6564 goto bad_args; \
6565 } \
6566 while (0)
6567
6568 #define po_reg_or_fail(regtype) \
6569 do \
6570 { \
6571 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6572 & inst.operands[i].vectype); \
6573 if (val == FAIL) \
6574 { \
6575 first_error (_(reg_expected_msgs[regtype])); \
6576 goto failure; \
6577 } \
6578 inst.operands[i].reg = val; \
6579 inst.operands[i].isreg = 1; \
6580 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6581 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6582 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6583 || rtype == REG_TYPE_VFD \
6584 || rtype == REG_TYPE_NQ); \
6585 } \
6586 while (0)
6587
6588 #define po_reg_or_goto(regtype, label) \
6589 do \
6590 { \
6591 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6592 & inst.operands[i].vectype); \
6593 if (val == FAIL) \
6594 goto label; \
6595 \
6596 inst.operands[i].reg = val; \
6597 inst.operands[i].isreg = 1; \
6598 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6599 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6600 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6601 || rtype == REG_TYPE_VFD \
6602 || rtype == REG_TYPE_NQ); \
6603 } \
6604 while (0)
6605
6606 #define po_imm_or_fail(min, max, popt) \
6607 do \
6608 { \
6609 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6610 goto failure; \
6611 inst.operands[i].imm = val; \
6612 } \
6613 while (0)
6614
6615 #define po_scalar_or_goto(elsz, label) \
6616 do \
6617 { \
6618 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6619 if (val == FAIL) \
6620 goto label; \
6621 inst.operands[i].reg = val; \
6622 inst.operands[i].isscalar = 1; \
6623 } \
6624 while (0)
6625
6626 #define po_misc_or_fail(expr) \
6627 do \
6628 { \
6629 if (expr) \
6630 goto failure; \
6631 } \
6632 while (0)
6633
6634 #define po_misc_or_fail_no_backtrack(expr) \
6635 do \
6636 { \
6637 result = expr; \
6638 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6639 backtrack_pos = 0; \
6640 if (result != PARSE_OPERAND_SUCCESS) \
6641 goto failure; \
6642 } \
6643 while (0)
6644
6645 #define po_barrier_or_imm(str) \
6646 do \
6647 { \
6648 val = parse_barrier (&str); \
6649 if (val == FAIL && ! ISALPHA (*str)) \
6650 goto immediate; \
6651 if (val == FAIL \
6652 /* ISB can only take SY as an option. */ \
6653 || ((inst.instruction & 0xf0) == 0x60 \
6654 && val != 0xf)) \
6655 { \
6656 inst.error = _("invalid barrier type"); \
6657 backtrack_pos = 0; \
6658 goto failure; \
6659 } \
6660 } \
6661 while (0)
6662
6663 skip_whitespace (str);
6664
6665 for (i = 0; upat[i] != OP_stop; i++)
6666 {
6667 op_parse_code = upat[i];
6668 if (op_parse_code >= 1<<16)
6669 op_parse_code = thumb ? (op_parse_code >> 16)
6670 : (op_parse_code & ((1<<16)-1));
6671
6672 if (op_parse_code >= OP_FIRST_OPTIONAL)
6673 {
6674 /* Remember where we are in case we need to backtrack. */
6675 gas_assert (!backtrack_pos);
6676 backtrack_pos = str;
6677 backtrack_error = inst.error;
6678 backtrack_index = i;
6679 }
6680
6681 if (i > 0 && (i > 1 || inst.operands[0].present))
6682 po_char_or_fail (',');
6683
6684 switch (op_parse_code)
6685 {
6686 /* Registers */
6687 case OP_oRRnpc:
6688 case OP_oRRnpcsp:
6689 case OP_RRnpc:
6690 case OP_RRnpcsp:
6691 case OP_oRR:
6692 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6693 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6694 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6695 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6696 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6697 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6698 case OP_oRND:
6699 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6700 case OP_RVC:
6701 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6702 break;
6703 /* Also accept generic coprocessor regs for unknown registers. */
6704 coproc_reg:
6705 po_reg_or_fail (REG_TYPE_CN);
6706 break;
6707 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6708 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6709 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6710 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6711 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6712 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6713 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6714 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6715 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6716 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6717 case OP_oRNQ:
6718 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6719 case OP_oRNDQ:
6720 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6721 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6722 case OP_oRNSDQ:
6723 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6724
6725 /* Neon scalar. Using an element size of 8 means that some invalid
6726 scalars are accepted here, so deal with those in later code. */
6727 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6728
6729 case OP_RNDQ_I0:
6730 {
6731 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6732 break;
6733 try_imm0:
6734 po_imm_or_fail (0, 0, TRUE);
6735 }
6736 break;
6737
6738 case OP_RVSD_I0:
6739 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6740 break;
6741
6742 case OP_RSVD_FI0:
6743 {
6744 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6745 break;
6746 try_ifimm0:
6747 if (parse_ifimm_zero (&str))
6748 inst.operands[i].imm = 0;
6749 else
6750 {
6751 inst.error
6752 = _("only floating point zero is allowed as immediate value");
6753 goto failure;
6754 }
6755 }
6756 break;
6757
6758 case OP_RR_RNSC:
6759 {
6760 po_scalar_or_goto (8, try_rr);
6761 break;
6762 try_rr:
6763 po_reg_or_fail (REG_TYPE_RN);
6764 }
6765 break;
6766
6767 case OP_RNSDQ_RNSC:
6768 {
6769 po_scalar_or_goto (8, try_nsdq);
6770 break;
6771 try_nsdq:
6772 po_reg_or_fail (REG_TYPE_NSDQ);
6773 }
6774 break;
6775
6776 case OP_RNDQ_RNSC:
6777 {
6778 po_scalar_or_goto (8, try_ndq);
6779 break;
6780 try_ndq:
6781 po_reg_or_fail (REG_TYPE_NDQ);
6782 }
6783 break;
6784
6785 case OP_RND_RNSC:
6786 {
6787 po_scalar_or_goto (8, try_vfd);
6788 break;
6789 try_vfd:
6790 po_reg_or_fail (REG_TYPE_VFD);
6791 }
6792 break;
6793
6794 case OP_VMOV:
6795 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6796 not careful then bad things might happen. */
6797 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6798 break;
6799
6800 case OP_RNDQ_Ibig:
6801 {
6802 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6803 break;
6804 try_immbig:
6805 /* There's a possibility of getting a 64-bit immediate here, so
6806 we need special handling. */
6807 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6808 == FAIL)
6809 {
6810 inst.error = _("immediate value is out of range");
6811 goto failure;
6812 }
6813 }
6814 break;
6815
6816 case OP_RNDQ_I63b:
6817 {
6818 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6819 break;
6820 try_shimm:
6821 po_imm_or_fail (0, 63, TRUE);
6822 }
6823 break;
6824
6825 case OP_RRnpcb:
6826 po_char_or_fail ('[');
6827 po_reg_or_fail (REG_TYPE_RN);
6828 po_char_or_fail (']');
6829 break;
6830
6831 case OP_RRnpctw:
6832 case OP_RRw:
6833 case OP_oRRw:
6834 po_reg_or_fail (REG_TYPE_RN);
6835 if (skip_past_char (&str, '!') == SUCCESS)
6836 inst.operands[i].writeback = 1;
6837 break;
6838
6839 /* Immediates */
6840 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6841 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6842 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6843 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6844 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6845 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6846 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6847 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6848 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6849 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6850 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6851 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6852
6853 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6854 case OP_oI7b:
6855 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6856 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6857 case OP_oI31b:
6858 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6859 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6860 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6861 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6862
6863 /* Immediate variants */
6864 case OP_oI255c:
6865 po_char_or_fail ('{');
6866 po_imm_or_fail (0, 255, TRUE);
6867 po_char_or_fail ('}');
6868 break;
6869
6870 case OP_I31w:
6871 /* The expression parser chokes on a trailing !, so we have
6872 to find it first and zap it. */
6873 {
6874 char *s = str;
6875 while (*s && *s != ',')
6876 s++;
6877 if (s[-1] == '!')
6878 {
6879 s[-1] = '\0';
6880 inst.operands[i].writeback = 1;
6881 }
6882 po_imm_or_fail (0, 31, TRUE);
6883 if (str == s - 1)
6884 str = s;
6885 }
6886 break;
6887
6888 /* Expressions */
6889 case OP_EXPi: EXPi:
6890 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6891 GE_OPT_PREFIX));
6892 break;
6893
6894 case OP_EXP:
6895 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6896 GE_NO_PREFIX));
6897 break;
6898
6899 case OP_EXPr: EXPr:
6900 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6901 GE_NO_PREFIX));
6902 if (inst.reloc.exp.X_op == O_symbol)
6903 {
6904 val = parse_reloc (&str);
6905 if (val == -1)
6906 {
6907 inst.error = _("unrecognized relocation suffix");
6908 goto failure;
6909 }
6910 else if (val != BFD_RELOC_UNUSED)
6911 {
6912 inst.operands[i].imm = val;
6913 inst.operands[i].hasreloc = 1;
6914 }
6915 }
6916 break;
6917
6918 /* Operand for MOVW or MOVT. */
6919 case OP_HALF:
6920 po_misc_or_fail (parse_half (&str));
6921 break;
6922
6923 /* Register or expression. */
6924 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6925 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6926
6927 /* Register or immediate. */
6928 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6929 I0: po_imm_or_fail (0, 0, FALSE); break;
6930
6931 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6932 IF:
6933 if (!is_immediate_prefix (*str))
6934 goto bad_args;
6935 str++;
6936 val = parse_fpa_immediate (&str);
6937 if (val == FAIL)
6938 goto failure;
6939 /* FPA immediates are encoded as registers 8-15.
6940 parse_fpa_immediate has already applied the offset. */
6941 inst.operands[i].reg = val;
6942 inst.operands[i].isreg = 1;
6943 break;
6944
6945 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6946 I32z: po_imm_or_fail (0, 32, FALSE); break;
6947
6948 /* Two kinds of register. */
6949 case OP_RIWR_RIWC:
6950 {
6951 struct reg_entry *rege = arm_reg_parse_multi (&str);
6952 if (!rege
6953 || (rege->type != REG_TYPE_MMXWR
6954 && rege->type != REG_TYPE_MMXWC
6955 && rege->type != REG_TYPE_MMXWCG))
6956 {
6957 inst.error = _("iWMMXt data or control register expected");
6958 goto failure;
6959 }
6960 inst.operands[i].reg = rege->number;
6961 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6962 }
6963 break;
6964
6965 case OP_RIWC_RIWG:
6966 {
6967 struct reg_entry *rege = arm_reg_parse_multi (&str);
6968 if (!rege
6969 || (rege->type != REG_TYPE_MMXWC
6970 && rege->type != REG_TYPE_MMXWCG))
6971 {
6972 inst.error = _("iWMMXt control register expected");
6973 goto failure;
6974 }
6975 inst.operands[i].reg = rege->number;
6976 inst.operands[i].isreg = 1;
6977 }
6978 break;
6979
6980 /* Misc */
6981 case OP_CPSF: val = parse_cps_flags (&str); break;
6982 case OP_ENDI: val = parse_endian_specifier (&str); break;
6983 case OP_oROR: val = parse_ror (&str); break;
6984 case OP_COND: val = parse_cond (&str); break;
6985 case OP_oBARRIER_I15:
6986 po_barrier_or_imm (str); break;
6987 immediate:
6988 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6989 goto failure;
6990 break;
6991
6992 case OP_wPSR:
6993 case OP_rPSR:
6994 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6995 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6996 {
6997 inst.error = _("Banked registers are not available with this "
6998 "architecture.");
6999 goto failure;
7000 }
7001 break;
7002 try_psr:
7003 val = parse_psr (&str, op_parse_code == OP_wPSR);
7004 break;
7005
7006 case OP_APSR_RR:
7007 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7008 break;
7009 try_apsr:
7010 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7011 instruction). */
7012 if (strncasecmp (str, "APSR_", 5) == 0)
7013 {
7014 unsigned found = 0;
7015 str += 5;
7016 while (found < 15)
7017 switch (*str++)
7018 {
7019 case 'c': found = (found & 1) ? 16 : found | 1; break;
7020 case 'n': found = (found & 2) ? 16 : found | 2; break;
7021 case 'z': found = (found & 4) ? 16 : found | 4; break;
7022 case 'v': found = (found & 8) ? 16 : found | 8; break;
7023 default: found = 16;
7024 }
7025 if (found != 15)
7026 goto failure;
7027 inst.operands[i].isvec = 1;
7028 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7029 inst.operands[i].reg = REG_PC;
7030 }
7031 else
7032 goto failure;
7033 break;
7034
7035 case OP_TB:
7036 po_misc_or_fail (parse_tb (&str));
7037 break;
7038
7039 /* Register lists. */
7040 case OP_REGLST:
7041 val = parse_reg_list (&str);
7042 if (*str == '^')
7043 {
7044 inst.operands[i].writeback = 1;
7045 str++;
7046 }
7047 break;
7048
7049 case OP_VRSLST:
7050 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7051 break;
7052
7053 case OP_VRDLST:
7054 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7055 break;
7056
7057 case OP_VRSDLST:
7058 /* Allow Q registers too. */
7059 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7060 REGLIST_NEON_D);
7061 if (val == FAIL)
7062 {
7063 inst.error = NULL;
7064 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7065 REGLIST_VFP_S);
7066 inst.operands[i].issingle = 1;
7067 }
7068 break;
7069
7070 case OP_NRDLST:
7071 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7072 REGLIST_NEON_D);
7073 break;
7074
7075 case OP_NSTRLST:
7076 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7077 &inst.operands[i].vectype);
7078 break;
7079
7080 /* Addressing modes */
7081 case OP_ADDR:
7082 po_misc_or_fail (parse_address (&str, i));
7083 break;
7084
7085 case OP_ADDRGLDR:
7086 po_misc_or_fail_no_backtrack (
7087 parse_address_group_reloc (&str, i, GROUP_LDR));
7088 break;
7089
7090 case OP_ADDRGLDRS:
7091 po_misc_or_fail_no_backtrack (
7092 parse_address_group_reloc (&str, i, GROUP_LDRS));
7093 break;
7094
7095 case OP_ADDRGLDC:
7096 po_misc_or_fail_no_backtrack (
7097 parse_address_group_reloc (&str, i, GROUP_LDC));
7098 break;
7099
7100 case OP_SH:
7101 po_misc_or_fail (parse_shifter_operand (&str, i));
7102 break;
7103
7104 case OP_SHG:
7105 po_misc_or_fail_no_backtrack (
7106 parse_shifter_operand_group_reloc (&str, i));
7107 break;
7108
7109 case OP_oSHll:
7110 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7111 break;
7112
7113 case OP_oSHar:
7114 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7115 break;
7116
7117 case OP_oSHllar:
7118 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7119 break;
7120
7121 default:
7122 as_fatal (_("unhandled operand code %d"), op_parse_code);
7123 }
7124
7125 /* Various value-based sanity checks and shared operations. We
7126 do not signal immediate failures for the register constraints;
7127 this allows a syntax error to take precedence. */
7128 switch (op_parse_code)
7129 {
7130 case OP_oRRnpc:
7131 case OP_RRnpc:
7132 case OP_RRnpcb:
7133 case OP_RRw:
7134 case OP_oRRw:
7135 case OP_RRnpc_I0:
7136 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7137 inst.error = BAD_PC;
7138 break;
7139
7140 case OP_oRRnpcsp:
7141 case OP_RRnpcsp:
7142 if (inst.operands[i].isreg)
7143 {
7144 if (inst.operands[i].reg == REG_PC)
7145 inst.error = BAD_PC;
7146 else if (inst.operands[i].reg == REG_SP)
7147 inst.error = BAD_SP;
7148 }
7149 break;
7150
7151 case OP_RRnpctw:
7152 if (inst.operands[i].isreg
7153 && inst.operands[i].reg == REG_PC
7154 && (inst.operands[i].writeback || thumb))
7155 inst.error = BAD_PC;
7156 break;
7157
7158 case OP_CPSF:
7159 case OP_ENDI:
7160 case OP_oROR:
7161 case OP_wPSR:
7162 case OP_rPSR:
7163 case OP_COND:
7164 case OP_oBARRIER_I15:
7165 case OP_REGLST:
7166 case OP_VRSLST:
7167 case OP_VRDLST:
7168 case OP_VRSDLST:
7169 case OP_NRDLST:
7170 case OP_NSTRLST:
7171 if (val == FAIL)
7172 goto failure;
7173 inst.operands[i].imm = val;
7174 break;
7175
7176 default:
7177 break;
7178 }
7179
7180 /* If we get here, this operand was successfully parsed. */
7181 inst.operands[i].present = 1;
7182 continue;
7183
7184 bad_args:
7185 inst.error = BAD_ARGS;
7186
7187 failure:
7188 if (!backtrack_pos)
7189 {
7190 /* The parse routine should already have set inst.error, but set a
7191 default here just in case. */
7192 if (!inst.error)
7193 inst.error = _("syntax error");
7194 return FAIL;
7195 }
7196
7197 /* Do not backtrack over a trailing optional argument that
7198 absorbed some text. We will only fail again, with the
7199 'garbage following instruction' error message, which is
7200 probably less helpful than the current one. */
7201 if (backtrack_index == i && backtrack_pos != str
7202 && upat[i+1] == OP_stop)
7203 {
7204 if (!inst.error)
7205 inst.error = _("syntax error");
7206 return FAIL;
7207 }
7208
7209 /* Try again, skipping the optional argument at backtrack_pos. */
7210 str = backtrack_pos;
7211 inst.error = backtrack_error;
7212 inst.operands[backtrack_index].present = 0;
7213 i = backtrack_index;
7214 backtrack_pos = 0;
7215 }
7216
7217 /* Check that we have parsed all the arguments. */
7218 if (*str != '\0' && !inst.error)
7219 inst.error = _("garbage following instruction");
7220
7221 return inst.error ? FAIL : SUCCESS;
7222 }
7223
7224 #undef po_char_or_fail
7225 #undef po_reg_or_fail
7226 #undef po_reg_or_goto
7227 #undef po_imm_or_fail
7228 #undef po_scalar_or_fail
7229 #undef po_barrier_or_imm
7230
7231 /* Shorthand macro for instruction encoding functions issuing errors. */
7232 #define constraint(expr, err) \
7233 do \
7234 { \
7235 if (expr) \
7236 { \
7237 inst.error = err; \
7238 return; \
7239 } \
7240 } \
7241 while (0)
7242
7243 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7244 instructions are unpredictable if these registers are used. This
7245 is the BadReg predicate in ARM's Thumb-2 documentation. */
7246 #define reject_bad_reg(reg) \
7247 do \
7248 if (reg == REG_SP || reg == REG_PC) \
7249 { \
7250 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7251 return; \
7252 } \
7253 while (0)
7254
7255 /* If REG is R13 (the stack pointer), warn that its use is
7256 deprecated. */
7257 #define warn_deprecated_sp(reg) \
7258 do \
7259 if (warn_on_deprecated && reg == REG_SP) \
7260 as_tsktsk (_("use of r13 is deprecated")); \
7261 while (0)
7262
7263 /* Functions for operand encoding. ARM, then Thumb. */
7264
7265 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7266
7267 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7268
7269 The only binary encoding difference is the Coprocessor number. Coprocessor
7270 9 is used for half-precision calculations or conversions. The format of the
7271 instruction is the same as the equivalent Coprocessor 10 instuction that
7272 exists for Single-Precision operation. */
7273
7274 static void
7275 do_scalar_fp16_v82_encode (void)
7276 {
7277 if (inst.cond != COND_ALWAYS)
7278 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7279 " the behaviour is UNPREDICTABLE"));
7280 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7281 _(BAD_FP16));
7282
7283 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7284 mark_feature_used (&arm_ext_fp16);
7285 }
7286
7287 /* If VAL can be encoded in the immediate field of an ARM instruction,
7288 return the encoded form. Otherwise, return FAIL. */
7289
7290 static unsigned int
7291 encode_arm_immediate (unsigned int val)
7292 {
7293 unsigned int a, i;
7294
7295 if (val <= 0xff)
7296 return val;
7297
7298 for (i = 2; i < 32; i += 2)
7299 if ((a = rotate_left (val, i)) <= 0xff)
7300 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7301
7302 return FAIL;
7303 }
7304
7305 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7306 return the encoded form. Otherwise, return FAIL. */
7307 static unsigned int
7308 encode_thumb32_immediate (unsigned int val)
7309 {
7310 unsigned int a, i;
7311
7312 if (val <= 0xff)
7313 return val;
7314
7315 for (i = 1; i <= 24; i++)
7316 {
7317 a = val >> i;
7318 if ((val & ~(0xff << i)) == 0)
7319 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7320 }
7321
7322 a = val & 0xff;
7323 if (val == ((a << 16) | a))
7324 return 0x100 | a;
7325 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7326 return 0x300 | a;
7327
7328 a = val & 0xff00;
7329 if (val == ((a << 16) | a))
7330 return 0x200 | (a >> 8);
7331
7332 return FAIL;
7333 }
7334 /* Encode a VFP SP or DP register number into inst.instruction. */
7335
7336 static void
7337 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7338 {
7339 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7340 && reg > 15)
7341 {
7342 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7343 {
7344 if (thumb_mode)
7345 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7346 fpu_vfp_ext_d32);
7347 else
7348 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7349 fpu_vfp_ext_d32);
7350 }
7351 else
7352 {
7353 first_error (_("D register out of range for selected VFP version"));
7354 return;
7355 }
7356 }
7357
7358 switch (pos)
7359 {
7360 case VFP_REG_Sd:
7361 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7362 break;
7363
7364 case VFP_REG_Sn:
7365 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7366 break;
7367
7368 case VFP_REG_Sm:
7369 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7370 break;
7371
7372 case VFP_REG_Dd:
7373 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7374 break;
7375
7376 case VFP_REG_Dn:
7377 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7378 break;
7379
7380 case VFP_REG_Dm:
7381 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7382 break;
7383
7384 default:
7385 abort ();
7386 }
7387 }
7388
7389 /* Encode a <shift> in an ARM-format instruction. The immediate,
7390 if any, is handled by md_apply_fix. */
7391 static void
7392 encode_arm_shift (int i)
7393 {
7394 if (inst.operands[i].shift_kind == SHIFT_RRX)
7395 inst.instruction |= SHIFT_ROR << 5;
7396 else
7397 {
7398 inst.instruction |= inst.operands[i].shift_kind << 5;
7399 if (inst.operands[i].immisreg)
7400 {
7401 inst.instruction |= SHIFT_BY_REG;
7402 inst.instruction |= inst.operands[i].imm << 8;
7403 }
7404 else
7405 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7406 }
7407 }
7408
7409 static void
7410 encode_arm_shifter_operand (int i)
7411 {
7412 if (inst.operands[i].isreg)
7413 {
7414 inst.instruction |= inst.operands[i].reg;
7415 encode_arm_shift (i);
7416 }
7417 else
7418 {
7419 inst.instruction |= INST_IMMEDIATE;
7420 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7421 inst.instruction |= inst.operands[i].imm;
7422 }
7423 }
7424
7425 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7426 static void
7427 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7428 {
7429 /* PR 14260:
7430 Generate an error if the operand is not a register. */
7431 constraint (!inst.operands[i].isreg,
7432 _("Instruction does not support =N addresses"));
7433
7434 inst.instruction |= inst.operands[i].reg << 16;
7435
7436 if (inst.operands[i].preind)
7437 {
7438 if (is_t)
7439 {
7440 inst.error = _("instruction does not accept preindexed addressing");
7441 return;
7442 }
7443 inst.instruction |= PRE_INDEX;
7444 if (inst.operands[i].writeback)
7445 inst.instruction |= WRITE_BACK;
7446
7447 }
7448 else if (inst.operands[i].postind)
7449 {
7450 gas_assert (inst.operands[i].writeback);
7451 if (is_t)
7452 inst.instruction |= WRITE_BACK;
7453 }
7454 else /* unindexed - only for coprocessor */
7455 {
7456 inst.error = _("instruction does not accept unindexed addressing");
7457 return;
7458 }
7459
7460 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7461 && (((inst.instruction & 0x000f0000) >> 16)
7462 == ((inst.instruction & 0x0000f000) >> 12)))
7463 as_warn ((inst.instruction & LOAD_BIT)
7464 ? _("destination register same as write-back base")
7465 : _("source register same as write-back base"));
7466 }
7467
7468 /* inst.operands[i] was set up by parse_address. Encode it into an
7469 ARM-format mode 2 load or store instruction. If is_t is true,
7470 reject forms that cannot be used with a T instruction (i.e. not
7471 post-indexed). */
7472 static void
7473 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7474 {
7475 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7476
7477 encode_arm_addr_mode_common (i, is_t);
7478
7479 if (inst.operands[i].immisreg)
7480 {
7481 constraint ((inst.operands[i].imm == REG_PC
7482 || (is_pc && inst.operands[i].writeback)),
7483 BAD_PC_ADDRESSING);
7484 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7485 inst.instruction |= inst.operands[i].imm;
7486 if (!inst.operands[i].negative)
7487 inst.instruction |= INDEX_UP;
7488 if (inst.operands[i].shifted)
7489 {
7490 if (inst.operands[i].shift_kind == SHIFT_RRX)
7491 inst.instruction |= SHIFT_ROR << 5;
7492 else
7493 {
7494 inst.instruction |= inst.operands[i].shift_kind << 5;
7495 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7496 }
7497 }
7498 }
7499 else /* immediate offset in inst.reloc */
7500 {
7501 if (is_pc && !inst.reloc.pc_rel)
7502 {
7503 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7504
7505 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7506 cannot use PC in addressing.
7507 PC cannot be used in writeback addressing, either. */
7508 constraint ((is_t || inst.operands[i].writeback),
7509 BAD_PC_ADDRESSING);
7510
7511 /* Use of PC in str is deprecated for ARMv7. */
7512 if (warn_on_deprecated
7513 && !is_load
7514 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7515 as_tsktsk (_("use of PC in this instruction is deprecated"));
7516 }
7517
7518 if (inst.reloc.type == BFD_RELOC_UNUSED)
7519 {
7520 /* Prefer + for zero encoded value. */
7521 if (!inst.operands[i].negative)
7522 inst.instruction |= INDEX_UP;
7523 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7524 }
7525 }
7526 }
7527
7528 /* inst.operands[i] was set up by parse_address. Encode it into an
7529 ARM-format mode 3 load or store instruction. Reject forms that
7530 cannot be used with such instructions. If is_t is true, reject
7531 forms that cannot be used with a T instruction (i.e. not
7532 post-indexed). */
7533 static void
7534 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7535 {
7536 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7537 {
7538 inst.error = _("instruction does not accept scaled register index");
7539 return;
7540 }
7541
7542 encode_arm_addr_mode_common (i, is_t);
7543
7544 if (inst.operands[i].immisreg)
7545 {
7546 constraint ((inst.operands[i].imm == REG_PC
7547 || (is_t && inst.operands[i].reg == REG_PC)),
7548 BAD_PC_ADDRESSING);
7549 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7550 BAD_PC_WRITEBACK);
7551 inst.instruction |= inst.operands[i].imm;
7552 if (!inst.operands[i].negative)
7553 inst.instruction |= INDEX_UP;
7554 }
7555 else /* immediate offset in inst.reloc */
7556 {
7557 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7558 && inst.operands[i].writeback),
7559 BAD_PC_WRITEBACK);
7560 inst.instruction |= HWOFFSET_IMM;
7561 if (inst.reloc.type == BFD_RELOC_UNUSED)
7562 {
7563 /* Prefer + for zero encoded value. */
7564 if (!inst.operands[i].negative)
7565 inst.instruction |= INDEX_UP;
7566
7567 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7568 }
7569 }
7570 }
7571
7572 /* Write immediate bits [7:0] to the following locations:
7573
7574 |28/24|23 19|18 16|15 4|3 0|
7575 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7576
7577 This function is used by VMOV/VMVN/VORR/VBIC. */
7578
7579 static void
7580 neon_write_immbits (unsigned immbits)
7581 {
7582 inst.instruction |= immbits & 0xf;
7583 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7584 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7585 }
7586
7587 /* Invert low-order SIZE bits of XHI:XLO. */
7588
7589 static void
7590 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7591 {
7592 unsigned immlo = xlo ? *xlo : 0;
7593 unsigned immhi = xhi ? *xhi : 0;
7594
7595 switch (size)
7596 {
7597 case 8:
7598 immlo = (~immlo) & 0xff;
7599 break;
7600
7601 case 16:
7602 immlo = (~immlo) & 0xffff;
7603 break;
7604
7605 case 64:
7606 immhi = (~immhi) & 0xffffffff;
7607 /* fall through. */
7608
7609 case 32:
7610 immlo = (~immlo) & 0xffffffff;
7611 break;
7612
7613 default:
7614 abort ();
7615 }
7616
7617 if (xlo)
7618 *xlo = immlo;
7619
7620 if (xhi)
7621 *xhi = immhi;
7622 }
7623
7624 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7625 A, B, C, D. */
7626
7627 static int
7628 neon_bits_same_in_bytes (unsigned imm)
7629 {
7630 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7631 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7632 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7633 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7634 }
7635
7636 /* For immediate of above form, return 0bABCD. */
7637
7638 static unsigned
7639 neon_squash_bits (unsigned imm)
7640 {
7641 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7642 | ((imm & 0x01000000) >> 21);
7643 }
7644
7645 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7646
7647 static unsigned
7648 neon_qfloat_bits (unsigned imm)
7649 {
7650 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7651 }
7652
7653 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7654 the instruction. *OP is passed as the initial value of the op field, and
7655 may be set to a different value depending on the constant (i.e.
7656 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7657 MVN). If the immediate looks like a repeated pattern then also
7658 try smaller element sizes. */
7659
7660 static int
7661 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7662 unsigned *immbits, int *op, int size,
7663 enum neon_el_type type)
7664 {
7665 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7666 float. */
7667 if (type == NT_float && !float_p)
7668 return FAIL;
7669
7670 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7671 {
7672 if (size != 32 || *op == 1)
7673 return FAIL;
7674 *immbits = neon_qfloat_bits (immlo);
7675 return 0xf;
7676 }
7677
7678 if (size == 64)
7679 {
7680 if (neon_bits_same_in_bytes (immhi)
7681 && neon_bits_same_in_bytes (immlo))
7682 {
7683 if (*op == 1)
7684 return FAIL;
7685 *immbits = (neon_squash_bits (immhi) << 4)
7686 | neon_squash_bits (immlo);
7687 *op = 1;
7688 return 0xe;
7689 }
7690
7691 if (immhi != immlo)
7692 return FAIL;
7693 }
7694
7695 if (size >= 32)
7696 {
7697 if (immlo == (immlo & 0x000000ff))
7698 {
7699 *immbits = immlo;
7700 return 0x0;
7701 }
7702 else if (immlo == (immlo & 0x0000ff00))
7703 {
7704 *immbits = immlo >> 8;
7705 return 0x2;
7706 }
7707 else if (immlo == (immlo & 0x00ff0000))
7708 {
7709 *immbits = immlo >> 16;
7710 return 0x4;
7711 }
7712 else if (immlo == (immlo & 0xff000000))
7713 {
7714 *immbits = immlo >> 24;
7715 return 0x6;
7716 }
7717 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7718 {
7719 *immbits = (immlo >> 8) & 0xff;
7720 return 0xc;
7721 }
7722 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7723 {
7724 *immbits = (immlo >> 16) & 0xff;
7725 return 0xd;
7726 }
7727
7728 if ((immlo & 0xffff) != (immlo >> 16))
7729 return FAIL;
7730 immlo &= 0xffff;
7731 }
7732
7733 if (size >= 16)
7734 {
7735 if (immlo == (immlo & 0x000000ff))
7736 {
7737 *immbits = immlo;
7738 return 0x8;
7739 }
7740 else if (immlo == (immlo & 0x0000ff00))
7741 {
7742 *immbits = immlo >> 8;
7743 return 0xa;
7744 }
7745
7746 if ((immlo & 0xff) != (immlo >> 8))
7747 return FAIL;
7748 immlo &= 0xff;
7749 }
7750
7751 if (immlo == (immlo & 0x000000ff))
7752 {
7753 /* Don't allow MVN with 8-bit immediate. */
7754 if (*op == 1)
7755 return FAIL;
7756 *immbits = immlo;
7757 return 0xe;
7758 }
7759
7760 return FAIL;
7761 }
7762
7763 #if defined BFD_HOST_64_BIT
7764 /* Returns TRUE if double precision value V may be cast
7765 to single precision without loss of accuracy. */
7766
7767 static bfd_boolean
7768 is_double_a_single (bfd_int64_t v)
7769 {
7770 int exp = (int)((v >> 52) & 0x7FF);
7771 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7772
7773 return (exp == 0 || exp == 0x7FF
7774 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7775 && (mantissa & 0x1FFFFFFFl) == 0;
7776 }
7777
7778 /* Returns a double precision value casted to single precision
7779 (ignoring the least significant bits in exponent and mantissa). */
7780
7781 static int
7782 double_to_single (bfd_int64_t v)
7783 {
7784 int sign = (int) ((v >> 63) & 1l);
7785 int exp = (int) ((v >> 52) & 0x7FF);
7786 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7787
7788 if (exp == 0x7FF)
7789 exp = 0xFF;
7790 else
7791 {
7792 exp = exp - 1023 + 127;
7793 if (exp >= 0xFF)
7794 {
7795 /* Infinity. */
7796 exp = 0x7F;
7797 mantissa = 0;
7798 }
7799 else if (exp < 0)
7800 {
7801 /* No denormalized numbers. */
7802 exp = 0;
7803 mantissa = 0;
7804 }
7805 }
7806 mantissa >>= 29;
7807 return (sign << 31) | (exp << 23) | mantissa;
7808 }
7809 #endif /* BFD_HOST_64_BIT */
7810
7811 enum lit_type
7812 {
7813 CONST_THUMB,
7814 CONST_ARM,
7815 CONST_VEC
7816 };
7817
7818 static void do_vfp_nsyn_opcode (const char *);
7819
7820 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7821 Determine whether it can be performed with a move instruction; if
7822 it can, convert inst.instruction to that move instruction and
7823 return TRUE; if it can't, convert inst.instruction to a literal-pool
7824 load and return FALSE. If this is not a valid thing to do in the
7825 current context, set inst.error and return TRUE.
7826
7827 inst.operands[i] describes the destination register. */
7828
7829 static bfd_boolean
7830 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7831 {
7832 unsigned long tbit;
7833 bfd_boolean thumb_p = (t == CONST_THUMB);
7834 bfd_boolean arm_p = (t == CONST_ARM);
7835
7836 if (thumb_p)
7837 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7838 else
7839 tbit = LOAD_BIT;
7840
7841 if ((inst.instruction & tbit) == 0)
7842 {
7843 inst.error = _("invalid pseudo operation");
7844 return TRUE;
7845 }
7846
7847 if (inst.reloc.exp.X_op != O_constant
7848 && inst.reloc.exp.X_op != O_symbol
7849 && inst.reloc.exp.X_op != O_big)
7850 {
7851 inst.error = _("constant expression expected");
7852 return TRUE;
7853 }
7854
7855 if (inst.reloc.exp.X_op == O_constant
7856 || inst.reloc.exp.X_op == O_big)
7857 {
7858 #if defined BFD_HOST_64_BIT
7859 bfd_int64_t v;
7860 #else
7861 offsetT v;
7862 #endif
7863 if (inst.reloc.exp.X_op == O_big)
7864 {
7865 LITTLENUM_TYPE w[X_PRECISION];
7866 LITTLENUM_TYPE * l;
7867
7868 if (inst.reloc.exp.X_add_number == -1)
7869 {
7870 gen_to_words (w, X_PRECISION, E_PRECISION);
7871 l = w;
7872 /* FIXME: Should we check words w[2..5] ? */
7873 }
7874 else
7875 l = generic_bignum;
7876
7877 #if defined BFD_HOST_64_BIT
7878 v =
7879 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7880 << LITTLENUM_NUMBER_OF_BITS)
7881 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7882 << LITTLENUM_NUMBER_OF_BITS)
7883 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7884 << LITTLENUM_NUMBER_OF_BITS)
7885 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7886 #else
7887 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7888 | (l[0] & LITTLENUM_MASK);
7889 #endif
7890 }
7891 else
7892 v = inst.reloc.exp.X_add_number;
7893
7894 if (!inst.operands[i].issingle)
7895 {
7896 if (thumb_p)
7897 {
7898 /* This can be encoded only for a low register. */
7899 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7900 {
7901 /* This can be done with a mov(1) instruction. */
7902 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7903 inst.instruction |= v;
7904 return TRUE;
7905 }
7906
7907 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7908 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7909 {
7910 /* Check if on thumb2 it can be done with a mov.w, mvn or
7911 movw instruction. */
7912 unsigned int newimm;
7913 bfd_boolean isNegated;
7914
7915 newimm = encode_thumb32_immediate (v);
7916 if (newimm != (unsigned int) FAIL)
7917 isNegated = FALSE;
7918 else
7919 {
7920 newimm = encode_thumb32_immediate (~v);
7921 if (newimm != (unsigned int) FAIL)
7922 isNegated = TRUE;
7923 }
7924
7925 /* The number can be loaded with a mov.w or mvn
7926 instruction. */
7927 if (newimm != (unsigned int) FAIL
7928 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7929 {
7930 inst.instruction = (0xf04f0000 /* MOV.W. */
7931 | (inst.operands[i].reg << 8));
7932 /* Change to MOVN. */
7933 inst.instruction |= (isNegated ? 0x200000 : 0);
7934 inst.instruction |= (newimm & 0x800) << 15;
7935 inst.instruction |= (newimm & 0x700) << 4;
7936 inst.instruction |= (newimm & 0x0ff);
7937 return TRUE;
7938 }
7939 /* The number can be loaded with a movw instruction. */
7940 else if ((v & ~0xFFFF) == 0
7941 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7942 {
7943 int imm = v & 0xFFFF;
7944
7945 inst.instruction = 0xf2400000; /* MOVW. */
7946 inst.instruction |= (inst.operands[i].reg << 8);
7947 inst.instruction |= (imm & 0xf000) << 4;
7948 inst.instruction |= (imm & 0x0800) << 15;
7949 inst.instruction |= (imm & 0x0700) << 4;
7950 inst.instruction |= (imm & 0x00ff);
7951 return TRUE;
7952 }
7953 }
7954 }
7955 else if (arm_p)
7956 {
7957 int value = encode_arm_immediate (v);
7958
7959 if (value != FAIL)
7960 {
7961 /* This can be done with a mov instruction. */
7962 inst.instruction &= LITERAL_MASK;
7963 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7964 inst.instruction |= value & 0xfff;
7965 return TRUE;
7966 }
7967
7968 value = encode_arm_immediate (~ v);
7969 if (value != FAIL)
7970 {
7971 /* This can be done with a mvn instruction. */
7972 inst.instruction &= LITERAL_MASK;
7973 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7974 inst.instruction |= value & 0xfff;
7975 return TRUE;
7976 }
7977 }
7978 else if (t == CONST_VEC)
7979 {
7980 int op = 0;
7981 unsigned immbits = 0;
7982 unsigned immlo = inst.operands[1].imm;
7983 unsigned immhi = inst.operands[1].regisimm
7984 ? inst.operands[1].reg
7985 : inst.reloc.exp.X_unsigned
7986 ? 0
7987 : ((bfd_int64_t)((int) immlo)) >> 32;
7988 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7989 &op, 64, NT_invtype);
7990
7991 if (cmode == FAIL)
7992 {
7993 neon_invert_size (&immlo, &immhi, 64);
7994 op = !op;
7995 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7996 &op, 64, NT_invtype);
7997 }
7998
7999 if (cmode != FAIL)
8000 {
8001 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8002 | (1 << 23)
8003 | (cmode << 8)
8004 | (op << 5)
8005 | (1 << 4);
8006
8007 /* Fill other bits in vmov encoding for both thumb and arm. */
8008 if (thumb_mode)
8009 inst.instruction |= (0x7U << 29) | (0xF << 24);
8010 else
8011 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8012 neon_write_immbits (immbits);
8013 return TRUE;
8014 }
8015 }
8016 }
8017
8018 if (t == CONST_VEC)
8019 {
8020 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8021 if (inst.operands[i].issingle
8022 && is_quarter_float (inst.operands[1].imm)
8023 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8024 {
8025 inst.operands[1].imm =
8026 neon_qfloat_bits (v);
8027 do_vfp_nsyn_opcode ("fconsts");
8028 return TRUE;
8029 }
8030
8031 /* If our host does not support a 64-bit type then we cannot perform
8032 the following optimization. This mean that there will be a
8033 discrepancy between the output produced by an assembler built for
8034 a 32-bit-only host and the output produced from a 64-bit host, but
8035 this cannot be helped. */
8036 #if defined BFD_HOST_64_BIT
8037 else if (!inst.operands[1].issingle
8038 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8039 {
8040 if (is_double_a_single (v)
8041 && is_quarter_float (double_to_single (v)))
8042 {
8043 inst.operands[1].imm =
8044 neon_qfloat_bits (double_to_single (v));
8045 do_vfp_nsyn_opcode ("fconstd");
8046 return TRUE;
8047 }
8048 }
8049 #endif
8050 }
8051 }
8052
8053 if (add_to_lit_pool ((!inst.operands[i].isvec
8054 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8055 return TRUE;
8056
8057 inst.operands[1].reg = REG_PC;
8058 inst.operands[1].isreg = 1;
8059 inst.operands[1].preind = 1;
8060 inst.reloc.pc_rel = 1;
8061 inst.reloc.type = (thumb_p
8062 ? BFD_RELOC_ARM_THUMB_OFFSET
8063 : (mode_3
8064 ? BFD_RELOC_ARM_HWLITERAL
8065 : BFD_RELOC_ARM_LITERAL));
8066 return FALSE;
8067 }
8068
8069 /* inst.operands[i] was set up by parse_address. Encode it into an
8070 ARM-format instruction. Reject all forms which cannot be encoded
8071 into a coprocessor load/store instruction. If wb_ok is false,
8072 reject use of writeback; if unind_ok is false, reject use of
8073 unindexed addressing. If reloc_override is not 0, use it instead
8074 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8075 (in which case it is preserved). */
8076
8077 static int
8078 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8079 {
8080 if (!inst.operands[i].isreg)
8081 {
8082 /* PR 18256 */
8083 if (! inst.operands[0].isvec)
8084 {
8085 inst.error = _("invalid co-processor operand");
8086 return FAIL;
8087 }
8088 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8089 return SUCCESS;
8090 }
8091
8092 inst.instruction |= inst.operands[i].reg << 16;
8093
8094 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8095
8096 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8097 {
8098 gas_assert (!inst.operands[i].writeback);
8099 if (!unind_ok)
8100 {
8101 inst.error = _("instruction does not support unindexed addressing");
8102 return FAIL;
8103 }
8104 inst.instruction |= inst.operands[i].imm;
8105 inst.instruction |= INDEX_UP;
8106 return SUCCESS;
8107 }
8108
8109 if (inst.operands[i].preind)
8110 inst.instruction |= PRE_INDEX;
8111
8112 if (inst.operands[i].writeback)
8113 {
8114 if (inst.operands[i].reg == REG_PC)
8115 {
8116 inst.error = _("pc may not be used with write-back");
8117 return FAIL;
8118 }
8119 if (!wb_ok)
8120 {
8121 inst.error = _("instruction does not support writeback");
8122 return FAIL;
8123 }
8124 inst.instruction |= WRITE_BACK;
8125 }
8126
8127 if (reloc_override)
8128 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8129 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8130 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8131 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8132 {
8133 if (thumb_mode)
8134 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8135 else
8136 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8137 }
8138
8139 /* Prefer + for zero encoded value. */
8140 if (!inst.operands[i].negative)
8141 inst.instruction |= INDEX_UP;
8142
8143 return SUCCESS;
8144 }
8145
8146 /* Functions for instruction encoding, sorted by sub-architecture.
8147 First some generics; their names are taken from the conventional
8148 bit positions for register arguments in ARM format instructions. */
8149
8150 static void
8151 do_noargs (void)
8152 {
8153 }
8154
8155 static void
8156 do_rd (void)
8157 {
8158 inst.instruction |= inst.operands[0].reg << 12;
8159 }
8160
8161 static void
8162 do_rd_rm (void)
8163 {
8164 inst.instruction |= inst.operands[0].reg << 12;
8165 inst.instruction |= inst.operands[1].reg;
8166 }
8167
8168 static void
8169 do_rm_rn (void)
8170 {
8171 inst.instruction |= inst.operands[0].reg;
8172 inst.instruction |= inst.operands[1].reg << 16;
8173 }
8174
8175 static void
8176 do_rd_rn (void)
8177 {
8178 inst.instruction |= inst.operands[0].reg << 12;
8179 inst.instruction |= inst.operands[1].reg << 16;
8180 }
8181
8182 static void
8183 do_rn_rd (void)
8184 {
8185 inst.instruction |= inst.operands[0].reg << 16;
8186 inst.instruction |= inst.operands[1].reg << 12;
8187 }
8188
8189 static void
8190 do_tt (void)
8191 {
8192 inst.instruction |= inst.operands[0].reg << 8;
8193 inst.instruction |= inst.operands[1].reg << 16;
8194 }
8195
8196 static bfd_boolean
8197 check_obsolete (const arm_feature_set *feature, const char *msg)
8198 {
8199 if (ARM_CPU_IS_ANY (cpu_variant))
8200 {
8201 as_tsktsk ("%s", msg);
8202 return TRUE;
8203 }
8204 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8205 {
8206 as_bad ("%s", msg);
8207 return TRUE;
8208 }
8209
8210 return FALSE;
8211 }
8212
8213 static void
8214 do_rd_rm_rn (void)
8215 {
8216 unsigned Rn = inst.operands[2].reg;
8217 /* Enforce restrictions on SWP instruction. */
8218 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8219 {
8220 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8221 _("Rn must not overlap other operands"));
8222
8223 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8224 */
8225 if (!check_obsolete (&arm_ext_v8,
8226 _("swp{b} use is obsoleted for ARMv8 and later"))
8227 && warn_on_deprecated
8228 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8229 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8230 }
8231
8232 inst.instruction |= inst.operands[0].reg << 12;
8233 inst.instruction |= inst.operands[1].reg;
8234 inst.instruction |= Rn << 16;
8235 }
8236
8237 static void
8238 do_rd_rn_rm (void)
8239 {
8240 inst.instruction |= inst.operands[0].reg << 12;
8241 inst.instruction |= inst.operands[1].reg << 16;
8242 inst.instruction |= inst.operands[2].reg;
8243 }
8244
8245 static void
8246 do_rm_rd_rn (void)
8247 {
8248 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8249 constraint (((inst.reloc.exp.X_op != O_constant
8250 && inst.reloc.exp.X_op != O_illegal)
8251 || inst.reloc.exp.X_add_number != 0),
8252 BAD_ADDR_MODE);
8253 inst.instruction |= inst.operands[0].reg;
8254 inst.instruction |= inst.operands[1].reg << 12;
8255 inst.instruction |= inst.operands[2].reg << 16;
8256 }
8257
8258 static void
8259 do_imm0 (void)
8260 {
8261 inst.instruction |= inst.operands[0].imm;
8262 }
8263
8264 static void
8265 do_rd_cpaddr (void)
8266 {
8267 inst.instruction |= inst.operands[0].reg << 12;
8268 encode_arm_cp_address (1, TRUE, TRUE, 0);
8269 }
8270
8271 /* ARM instructions, in alphabetical order by function name (except
8272 that wrapper functions appear immediately after the function they
8273 wrap). */
8274
8275 /* This is a pseudo-op of the form "adr rd, label" to be converted
8276 into a relative address of the form "add rd, pc, #label-.-8". */
8277
8278 static void
8279 do_adr (void)
8280 {
8281 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8282
8283 /* Frag hacking will turn this into a sub instruction if the offset turns
8284 out to be negative. */
8285 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8286 inst.reloc.pc_rel = 1;
8287 inst.reloc.exp.X_add_number -= 8;
8288 }
8289
8290 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8291 into a relative address of the form:
8292 add rd, pc, #low(label-.-8)"
8293 add rd, rd, #high(label-.-8)" */
8294
8295 static void
8296 do_adrl (void)
8297 {
8298 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8299
8300 /* Frag hacking will turn this into a sub instruction if the offset turns
8301 out to be negative. */
8302 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8303 inst.reloc.pc_rel = 1;
8304 inst.size = INSN_SIZE * 2;
8305 inst.reloc.exp.X_add_number -= 8;
8306 }
8307
8308 static void
8309 do_arit (void)
8310 {
8311 if (!inst.operands[1].present)
8312 inst.operands[1].reg = inst.operands[0].reg;
8313 inst.instruction |= inst.operands[0].reg << 12;
8314 inst.instruction |= inst.operands[1].reg << 16;
8315 encode_arm_shifter_operand (2);
8316 }
8317
8318 static void
8319 do_barrier (void)
8320 {
8321 if (inst.operands[0].present)
8322 inst.instruction |= inst.operands[0].imm;
8323 else
8324 inst.instruction |= 0xf;
8325 }
8326
8327 static void
8328 do_bfc (void)
8329 {
8330 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8331 constraint (msb > 32, _("bit-field extends past end of register"));
8332 /* The instruction encoding stores the LSB and MSB,
8333 not the LSB and width. */
8334 inst.instruction |= inst.operands[0].reg << 12;
8335 inst.instruction |= inst.operands[1].imm << 7;
8336 inst.instruction |= (msb - 1) << 16;
8337 }
8338
8339 static void
8340 do_bfi (void)
8341 {
8342 unsigned int msb;
8343
8344 /* #0 in second position is alternative syntax for bfc, which is
8345 the same instruction but with REG_PC in the Rm field. */
8346 if (!inst.operands[1].isreg)
8347 inst.operands[1].reg = REG_PC;
8348
8349 msb = inst.operands[2].imm + inst.operands[3].imm;
8350 constraint (msb > 32, _("bit-field extends past end of register"));
8351 /* The instruction encoding stores the LSB and MSB,
8352 not the LSB and width. */
8353 inst.instruction |= inst.operands[0].reg << 12;
8354 inst.instruction |= inst.operands[1].reg;
8355 inst.instruction |= inst.operands[2].imm << 7;
8356 inst.instruction |= (msb - 1) << 16;
8357 }
8358
8359 static void
8360 do_bfx (void)
8361 {
8362 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8363 _("bit-field extends past end of register"));
8364 inst.instruction |= inst.operands[0].reg << 12;
8365 inst.instruction |= inst.operands[1].reg;
8366 inst.instruction |= inst.operands[2].imm << 7;
8367 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8368 }
8369
8370 /* ARM V5 breakpoint instruction (argument parse)
8371 BKPT <16 bit unsigned immediate>
8372 Instruction is not conditional.
8373 The bit pattern given in insns[] has the COND_ALWAYS condition,
8374 and it is an error if the caller tried to override that. */
8375
8376 static void
8377 do_bkpt (void)
8378 {
8379 /* Top 12 of 16 bits to bits 19:8. */
8380 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8381
8382 /* Bottom 4 of 16 bits to bits 3:0. */
8383 inst.instruction |= inst.operands[0].imm & 0xf;
8384 }
8385
8386 static void
8387 encode_branch (int default_reloc)
8388 {
8389 if (inst.operands[0].hasreloc)
8390 {
8391 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8392 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8393 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8394 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8395 ? BFD_RELOC_ARM_PLT32
8396 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8397 }
8398 else
8399 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8400 inst.reloc.pc_rel = 1;
8401 }
8402
8403 static void
8404 do_branch (void)
8405 {
8406 #ifdef OBJ_ELF
8407 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8408 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8409 else
8410 #endif
8411 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8412 }
8413
8414 static void
8415 do_bl (void)
8416 {
8417 #ifdef OBJ_ELF
8418 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8419 {
8420 if (inst.cond == COND_ALWAYS)
8421 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8422 else
8423 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8424 }
8425 else
8426 #endif
8427 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8428 }
8429
8430 /* ARM V5 branch-link-exchange instruction (argument parse)
8431 BLX <target_addr> ie BLX(1)
8432 BLX{<condition>} <Rm> ie BLX(2)
8433 Unfortunately, there are two different opcodes for this mnemonic.
8434 So, the insns[].value is not used, and the code here zaps values
8435 into inst.instruction.
8436 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8437
8438 static void
8439 do_blx (void)
8440 {
8441 if (inst.operands[0].isreg)
8442 {
8443 /* Arg is a register; the opcode provided by insns[] is correct.
8444 It is not illegal to do "blx pc", just useless. */
8445 if (inst.operands[0].reg == REG_PC)
8446 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8447
8448 inst.instruction |= inst.operands[0].reg;
8449 }
8450 else
8451 {
8452 /* Arg is an address; this instruction cannot be executed
8453 conditionally, and the opcode must be adjusted.
8454 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8455 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8456 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8457 inst.instruction = 0xfa000000;
8458 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8459 }
8460 }
8461
8462 static void
8463 do_bx (void)
8464 {
8465 bfd_boolean want_reloc;
8466
8467 if (inst.operands[0].reg == REG_PC)
8468 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8469
8470 inst.instruction |= inst.operands[0].reg;
8471 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8472 it is for ARMv4t or earlier. */
8473 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8474 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8475 want_reloc = TRUE;
8476
8477 #ifdef OBJ_ELF
8478 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8479 #endif
8480 want_reloc = FALSE;
8481
8482 if (want_reloc)
8483 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8484 }
8485
8486
8487 /* ARM v5TEJ. Jump to Jazelle code. */
8488
8489 static void
8490 do_bxj (void)
8491 {
8492 if (inst.operands[0].reg == REG_PC)
8493 as_tsktsk (_("use of r15 in bxj is not really useful"));
8494
8495 inst.instruction |= inst.operands[0].reg;
8496 }
8497
8498 /* Co-processor data operation:
8499 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8500 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8501 static void
8502 do_cdp (void)
8503 {
8504 inst.instruction |= inst.operands[0].reg << 8;
8505 inst.instruction |= inst.operands[1].imm << 20;
8506 inst.instruction |= inst.operands[2].reg << 12;
8507 inst.instruction |= inst.operands[3].reg << 16;
8508 inst.instruction |= inst.operands[4].reg;
8509 inst.instruction |= inst.operands[5].imm << 5;
8510 }
8511
8512 static void
8513 do_cmp (void)
8514 {
8515 inst.instruction |= inst.operands[0].reg << 16;
8516 encode_arm_shifter_operand (1);
8517 }
8518
8519 /* Transfer between coprocessor and ARM registers.
8520 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8521 MRC2
8522 MCR{cond}
8523 MCR2
8524
8525 No special properties. */
8526
8527 struct deprecated_coproc_regs_s
8528 {
8529 unsigned cp;
8530 int opc1;
8531 unsigned crn;
8532 unsigned crm;
8533 int opc2;
8534 arm_feature_set deprecated;
8535 arm_feature_set obsoleted;
8536 const char *dep_msg;
8537 const char *obs_msg;
8538 };
8539
8540 #define DEPR_ACCESS_V8 \
8541 N_("This coprocessor register access is deprecated in ARMv8")
8542
8543 /* Table of all deprecated coprocessor registers. */
8544 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8545 {
8546 {15, 0, 7, 10, 5, /* CP15DMB. */
8547 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8548 DEPR_ACCESS_V8, NULL},
8549 {15, 0, 7, 10, 4, /* CP15DSB. */
8550 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8551 DEPR_ACCESS_V8, NULL},
8552 {15, 0, 7, 5, 4, /* CP15ISB. */
8553 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8554 DEPR_ACCESS_V8, NULL},
8555 {14, 6, 1, 0, 0, /* TEEHBR. */
8556 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8557 DEPR_ACCESS_V8, NULL},
8558 {14, 6, 0, 0, 0, /* TEECR. */
8559 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8560 DEPR_ACCESS_V8, NULL},
8561 };
8562
8563 #undef DEPR_ACCESS_V8
8564
8565 static const size_t deprecated_coproc_reg_count =
8566 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8567
8568 static void
8569 do_co_reg (void)
8570 {
8571 unsigned Rd;
8572 size_t i;
8573
8574 Rd = inst.operands[2].reg;
8575 if (thumb_mode)
8576 {
8577 if (inst.instruction == 0xee000010
8578 || inst.instruction == 0xfe000010)
8579 /* MCR, MCR2 */
8580 reject_bad_reg (Rd);
8581 else
8582 /* MRC, MRC2 */
8583 constraint (Rd == REG_SP, BAD_SP);
8584 }
8585 else
8586 {
8587 /* MCR */
8588 if (inst.instruction == 0xe000010)
8589 constraint (Rd == REG_PC, BAD_PC);
8590 }
8591
8592 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8593 {
8594 const struct deprecated_coproc_regs_s *r =
8595 deprecated_coproc_regs + i;
8596
8597 if (inst.operands[0].reg == r->cp
8598 && inst.operands[1].imm == r->opc1
8599 && inst.operands[3].reg == r->crn
8600 && inst.operands[4].reg == r->crm
8601 && inst.operands[5].imm == r->opc2)
8602 {
8603 if (! ARM_CPU_IS_ANY (cpu_variant)
8604 && warn_on_deprecated
8605 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8606 as_tsktsk ("%s", r->dep_msg);
8607 }
8608 }
8609
8610 inst.instruction |= inst.operands[0].reg << 8;
8611 inst.instruction |= inst.operands[1].imm << 21;
8612 inst.instruction |= Rd << 12;
8613 inst.instruction |= inst.operands[3].reg << 16;
8614 inst.instruction |= inst.operands[4].reg;
8615 inst.instruction |= inst.operands[5].imm << 5;
8616 }
8617
8618 /* Transfer between coprocessor register and pair of ARM registers.
8619 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8620 MCRR2
8621 MRRC{cond}
8622 MRRC2
8623
8624 Two XScale instructions are special cases of these:
8625
8626 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8627 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8628
8629 Result unpredictable if Rd or Rn is R15. */
8630
8631 static void
8632 do_co_reg2c (void)
8633 {
8634 unsigned Rd, Rn;
8635
8636 Rd = inst.operands[2].reg;
8637 Rn = inst.operands[3].reg;
8638
8639 if (thumb_mode)
8640 {
8641 reject_bad_reg (Rd);
8642 reject_bad_reg (Rn);
8643 }
8644 else
8645 {
8646 constraint (Rd == REG_PC, BAD_PC);
8647 constraint (Rn == REG_PC, BAD_PC);
8648 }
8649
8650 inst.instruction |= inst.operands[0].reg << 8;
8651 inst.instruction |= inst.operands[1].imm << 4;
8652 inst.instruction |= Rd << 12;
8653 inst.instruction |= Rn << 16;
8654 inst.instruction |= inst.operands[4].reg;
8655 }
8656
8657 static void
8658 do_cpsi (void)
8659 {
8660 inst.instruction |= inst.operands[0].imm << 6;
8661 if (inst.operands[1].present)
8662 {
8663 inst.instruction |= CPSI_MMOD;
8664 inst.instruction |= inst.operands[1].imm;
8665 }
8666 }
8667
8668 static void
8669 do_dbg (void)
8670 {
8671 inst.instruction |= inst.operands[0].imm;
8672 }
8673
8674 static void
8675 do_div (void)
8676 {
8677 unsigned Rd, Rn, Rm;
8678
8679 Rd = inst.operands[0].reg;
8680 Rn = (inst.operands[1].present
8681 ? inst.operands[1].reg : Rd);
8682 Rm = inst.operands[2].reg;
8683
8684 constraint ((Rd == REG_PC), BAD_PC);
8685 constraint ((Rn == REG_PC), BAD_PC);
8686 constraint ((Rm == REG_PC), BAD_PC);
8687
8688 inst.instruction |= Rd << 16;
8689 inst.instruction |= Rn << 0;
8690 inst.instruction |= Rm << 8;
8691 }
8692
8693 static void
8694 do_it (void)
8695 {
8696 /* There is no IT instruction in ARM mode. We
8697 process it to do the validation as if in
8698 thumb mode, just in case the code gets
8699 assembled for thumb using the unified syntax. */
8700
8701 inst.size = 0;
8702 if (unified_syntax)
8703 {
8704 set_it_insn_type (IT_INSN);
8705 now_it.mask = (inst.instruction & 0xf) | 0x10;
8706 now_it.cc = inst.operands[0].imm;
8707 }
8708 }
8709
8710 /* If there is only one register in the register list,
8711 then return its register number. Otherwise return -1. */
8712 static int
8713 only_one_reg_in_list (int range)
8714 {
8715 int i = ffs (range) - 1;
8716 return (i > 15 || range != (1 << i)) ? -1 : i;
8717 }
8718
8719 static void
8720 encode_ldmstm(int from_push_pop_mnem)
8721 {
8722 int base_reg = inst.operands[0].reg;
8723 int range = inst.operands[1].imm;
8724 int one_reg;
8725
8726 inst.instruction |= base_reg << 16;
8727 inst.instruction |= range;
8728
8729 if (inst.operands[1].writeback)
8730 inst.instruction |= LDM_TYPE_2_OR_3;
8731
8732 if (inst.operands[0].writeback)
8733 {
8734 inst.instruction |= WRITE_BACK;
8735 /* Check for unpredictable uses of writeback. */
8736 if (inst.instruction & LOAD_BIT)
8737 {
8738 /* Not allowed in LDM type 2. */
8739 if ((inst.instruction & LDM_TYPE_2_OR_3)
8740 && ((range & (1 << REG_PC)) == 0))
8741 as_warn (_("writeback of base register is UNPREDICTABLE"));
8742 /* Only allowed if base reg not in list for other types. */
8743 else if (range & (1 << base_reg))
8744 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8745 }
8746 else /* STM. */
8747 {
8748 /* Not allowed for type 2. */
8749 if (inst.instruction & LDM_TYPE_2_OR_3)
8750 as_warn (_("writeback of base register is UNPREDICTABLE"));
8751 /* Only allowed if base reg not in list, or first in list. */
8752 else if ((range & (1 << base_reg))
8753 && (range & ((1 << base_reg) - 1)))
8754 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8755 }
8756 }
8757
8758 /* If PUSH/POP has only one register, then use the A2 encoding. */
8759 one_reg = only_one_reg_in_list (range);
8760 if (from_push_pop_mnem && one_reg >= 0)
8761 {
8762 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8763
8764 inst.instruction &= A_COND_MASK;
8765 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8766 inst.instruction |= one_reg << 12;
8767 }
8768 }
8769
8770 static void
8771 do_ldmstm (void)
8772 {
8773 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8774 }
8775
8776 /* ARMv5TE load-consecutive (argument parse)
8777 Mode is like LDRH.
8778
8779 LDRccD R, mode
8780 STRccD R, mode. */
8781
8782 static void
8783 do_ldrd (void)
8784 {
8785 constraint (inst.operands[0].reg % 2 != 0,
8786 _("first transfer register must be even"));
8787 constraint (inst.operands[1].present
8788 && inst.operands[1].reg != inst.operands[0].reg + 1,
8789 _("can only transfer two consecutive registers"));
8790 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8791 constraint (!inst.operands[2].isreg, _("'[' expected"));
8792
8793 if (!inst.operands[1].present)
8794 inst.operands[1].reg = inst.operands[0].reg + 1;
8795
8796 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8797 register and the first register written; we have to diagnose
8798 overlap between the base and the second register written here. */
8799
8800 if (inst.operands[2].reg == inst.operands[1].reg
8801 && (inst.operands[2].writeback || inst.operands[2].postind))
8802 as_warn (_("base register written back, and overlaps "
8803 "second transfer register"));
8804
8805 if (!(inst.instruction & V4_STR_BIT))
8806 {
8807 /* For an index-register load, the index register must not overlap the
8808 destination (even if not write-back). */
8809 if (inst.operands[2].immisreg
8810 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8811 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8812 as_warn (_("index register overlaps transfer register"));
8813 }
8814 inst.instruction |= inst.operands[0].reg << 12;
8815 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8816 }
8817
8818 static void
8819 do_ldrex (void)
8820 {
8821 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8822 || inst.operands[1].postind || inst.operands[1].writeback
8823 || inst.operands[1].immisreg || inst.operands[1].shifted
8824 || inst.operands[1].negative
8825 /* This can arise if the programmer has written
8826 strex rN, rM, foo
8827 or if they have mistakenly used a register name as the last
8828 operand, eg:
8829 strex rN, rM, rX
8830 It is very difficult to distinguish between these two cases
8831 because "rX" might actually be a label. ie the register
8832 name has been occluded by a symbol of the same name. So we
8833 just generate a general 'bad addressing mode' type error
8834 message and leave it up to the programmer to discover the
8835 true cause and fix their mistake. */
8836 || (inst.operands[1].reg == REG_PC),
8837 BAD_ADDR_MODE);
8838
8839 constraint (inst.reloc.exp.X_op != O_constant
8840 || inst.reloc.exp.X_add_number != 0,
8841 _("offset must be zero in ARM encoding"));
8842
8843 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8844
8845 inst.instruction |= inst.operands[0].reg << 12;
8846 inst.instruction |= inst.operands[1].reg << 16;
8847 inst.reloc.type = BFD_RELOC_UNUSED;
8848 }
8849
8850 static void
8851 do_ldrexd (void)
8852 {
8853 constraint (inst.operands[0].reg % 2 != 0,
8854 _("even register required"));
8855 constraint (inst.operands[1].present
8856 && inst.operands[1].reg != inst.operands[0].reg + 1,
8857 _("can only load two consecutive registers"));
8858 /* If op 1 were present and equal to PC, this function wouldn't
8859 have been called in the first place. */
8860 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8861
8862 inst.instruction |= inst.operands[0].reg << 12;
8863 inst.instruction |= inst.operands[2].reg << 16;
8864 }
8865
8866 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8867 which is not a multiple of four is UNPREDICTABLE. */
8868 static void
8869 check_ldr_r15_aligned (void)
8870 {
8871 constraint (!(inst.operands[1].immisreg)
8872 && (inst.operands[0].reg == REG_PC
8873 && inst.operands[1].reg == REG_PC
8874 && (inst.reloc.exp.X_add_number & 0x3)),
8875 _("ldr to register 15 must be 4-byte alligned"));
8876 }
8877
8878 static void
8879 do_ldst (void)
8880 {
8881 inst.instruction |= inst.operands[0].reg << 12;
8882 if (!inst.operands[1].isreg)
8883 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8884 return;
8885 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8886 check_ldr_r15_aligned ();
8887 }
8888
8889 static void
8890 do_ldstt (void)
8891 {
8892 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8893 reject [Rn,...]. */
8894 if (inst.operands[1].preind)
8895 {
8896 constraint (inst.reloc.exp.X_op != O_constant
8897 || inst.reloc.exp.X_add_number != 0,
8898 _("this instruction requires a post-indexed address"));
8899
8900 inst.operands[1].preind = 0;
8901 inst.operands[1].postind = 1;
8902 inst.operands[1].writeback = 1;
8903 }
8904 inst.instruction |= inst.operands[0].reg << 12;
8905 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8906 }
8907
8908 /* Halfword and signed-byte load/store operations. */
8909
8910 static void
8911 do_ldstv4 (void)
8912 {
8913 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8914 inst.instruction |= inst.operands[0].reg << 12;
8915 if (!inst.operands[1].isreg)
8916 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8917 return;
8918 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8919 }
8920
8921 static void
8922 do_ldsttv4 (void)
8923 {
8924 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8925 reject [Rn,...]. */
8926 if (inst.operands[1].preind)
8927 {
8928 constraint (inst.reloc.exp.X_op != O_constant
8929 || inst.reloc.exp.X_add_number != 0,
8930 _("this instruction requires a post-indexed address"));
8931
8932 inst.operands[1].preind = 0;
8933 inst.operands[1].postind = 1;
8934 inst.operands[1].writeback = 1;
8935 }
8936 inst.instruction |= inst.operands[0].reg << 12;
8937 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8938 }
8939
8940 /* Co-processor register load/store.
8941 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8942 static void
8943 do_lstc (void)
8944 {
8945 inst.instruction |= inst.operands[0].reg << 8;
8946 inst.instruction |= inst.operands[1].reg << 12;
8947 encode_arm_cp_address (2, TRUE, TRUE, 0);
8948 }
8949
8950 static void
8951 do_mlas (void)
8952 {
8953 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8954 if (inst.operands[0].reg == inst.operands[1].reg
8955 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8956 && !(inst.instruction & 0x00400000))
8957 as_tsktsk (_("Rd and Rm should be different in mla"));
8958
8959 inst.instruction |= inst.operands[0].reg << 16;
8960 inst.instruction |= inst.operands[1].reg;
8961 inst.instruction |= inst.operands[2].reg << 8;
8962 inst.instruction |= inst.operands[3].reg << 12;
8963 }
8964
8965 static void
8966 do_mov (void)
8967 {
8968 inst.instruction |= inst.operands[0].reg << 12;
8969 encode_arm_shifter_operand (1);
8970 }
8971
8972 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8973 static void
8974 do_mov16 (void)
8975 {
8976 bfd_vma imm;
8977 bfd_boolean top;
8978
8979 top = (inst.instruction & 0x00400000) != 0;
8980 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8981 _(":lower16: not allowed this instruction"));
8982 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8983 _(":upper16: not allowed instruction"));
8984 inst.instruction |= inst.operands[0].reg << 12;
8985 if (inst.reloc.type == BFD_RELOC_UNUSED)
8986 {
8987 imm = inst.reloc.exp.X_add_number;
8988 /* The value is in two pieces: 0:11, 16:19. */
8989 inst.instruction |= (imm & 0x00000fff);
8990 inst.instruction |= (imm & 0x0000f000) << 4;
8991 }
8992 }
8993
8994 static int
8995 do_vfp_nsyn_mrs (void)
8996 {
8997 if (inst.operands[0].isvec)
8998 {
8999 if (inst.operands[1].reg != 1)
9000 first_error (_("operand 1 must be FPSCR"));
9001 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9002 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9003 do_vfp_nsyn_opcode ("fmstat");
9004 }
9005 else if (inst.operands[1].isvec)
9006 do_vfp_nsyn_opcode ("fmrx");
9007 else
9008 return FAIL;
9009
9010 return SUCCESS;
9011 }
9012
9013 static int
9014 do_vfp_nsyn_msr (void)
9015 {
9016 if (inst.operands[0].isvec)
9017 do_vfp_nsyn_opcode ("fmxr");
9018 else
9019 return FAIL;
9020
9021 return SUCCESS;
9022 }
9023
9024 static void
9025 do_vmrs (void)
9026 {
9027 unsigned Rt = inst.operands[0].reg;
9028
9029 if (thumb_mode && Rt == REG_SP)
9030 {
9031 inst.error = BAD_SP;
9032 return;
9033 }
9034
9035 /* APSR_ sets isvec. All other refs to PC are illegal. */
9036 if (!inst.operands[0].isvec && Rt == REG_PC)
9037 {
9038 inst.error = BAD_PC;
9039 return;
9040 }
9041
9042 /* If we get through parsing the register name, we just insert the number
9043 generated into the instruction without further validation. */
9044 inst.instruction |= (inst.operands[1].reg << 16);
9045 inst.instruction |= (Rt << 12);
9046 }
9047
9048 static void
9049 do_vmsr (void)
9050 {
9051 unsigned Rt = inst.operands[1].reg;
9052
9053 if (thumb_mode)
9054 reject_bad_reg (Rt);
9055 else if (Rt == REG_PC)
9056 {
9057 inst.error = BAD_PC;
9058 return;
9059 }
9060
9061 /* If we get through parsing the register name, we just insert the number
9062 generated into the instruction without further validation. */
9063 inst.instruction |= (inst.operands[0].reg << 16);
9064 inst.instruction |= (Rt << 12);
9065 }
9066
9067 static void
9068 do_mrs (void)
9069 {
9070 unsigned br;
9071
9072 if (do_vfp_nsyn_mrs () == SUCCESS)
9073 return;
9074
9075 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9076 inst.instruction |= inst.operands[0].reg << 12;
9077
9078 if (inst.operands[1].isreg)
9079 {
9080 br = inst.operands[1].reg;
9081 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9082 as_bad (_("bad register for mrs"));
9083 }
9084 else
9085 {
9086 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9087 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9088 != (PSR_c|PSR_f),
9089 _("'APSR', 'CPSR' or 'SPSR' expected"));
9090 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9091 }
9092
9093 inst.instruction |= br;
9094 }
9095
9096 /* Two possible forms:
9097 "{C|S}PSR_<field>, Rm",
9098 "{C|S}PSR_f, #expression". */
9099
9100 static void
9101 do_msr (void)
9102 {
9103 if (do_vfp_nsyn_msr () == SUCCESS)
9104 return;
9105
9106 inst.instruction |= inst.operands[0].imm;
9107 if (inst.operands[1].isreg)
9108 inst.instruction |= inst.operands[1].reg;
9109 else
9110 {
9111 inst.instruction |= INST_IMMEDIATE;
9112 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9113 inst.reloc.pc_rel = 0;
9114 }
9115 }
9116
9117 static void
9118 do_mul (void)
9119 {
9120 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9121
9122 if (!inst.operands[2].present)
9123 inst.operands[2].reg = inst.operands[0].reg;
9124 inst.instruction |= inst.operands[0].reg << 16;
9125 inst.instruction |= inst.operands[1].reg;
9126 inst.instruction |= inst.operands[2].reg << 8;
9127
9128 if (inst.operands[0].reg == inst.operands[1].reg
9129 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9130 as_tsktsk (_("Rd and Rm should be different in mul"));
9131 }
9132
9133 /* Long Multiply Parser
9134 UMULL RdLo, RdHi, Rm, Rs
9135 SMULL RdLo, RdHi, Rm, Rs
9136 UMLAL RdLo, RdHi, Rm, Rs
9137 SMLAL RdLo, RdHi, Rm, Rs. */
9138
9139 static void
9140 do_mull (void)
9141 {
9142 inst.instruction |= inst.operands[0].reg << 12;
9143 inst.instruction |= inst.operands[1].reg << 16;
9144 inst.instruction |= inst.operands[2].reg;
9145 inst.instruction |= inst.operands[3].reg << 8;
9146
9147 /* rdhi and rdlo must be different. */
9148 if (inst.operands[0].reg == inst.operands[1].reg)
9149 as_tsktsk (_("rdhi and rdlo must be different"));
9150
9151 /* rdhi, rdlo and rm must all be different before armv6. */
9152 if ((inst.operands[0].reg == inst.operands[2].reg
9153 || inst.operands[1].reg == inst.operands[2].reg)
9154 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9155 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9156 }
9157
9158 static void
9159 do_nop (void)
9160 {
9161 if (inst.operands[0].present
9162 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9163 {
9164 /* Architectural NOP hints are CPSR sets with no bits selected. */
9165 inst.instruction &= 0xf0000000;
9166 inst.instruction |= 0x0320f000;
9167 if (inst.operands[0].present)
9168 inst.instruction |= inst.operands[0].imm;
9169 }
9170 }
9171
9172 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9173 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9174 Condition defaults to COND_ALWAYS.
9175 Error if Rd, Rn or Rm are R15. */
9176
9177 static void
9178 do_pkhbt (void)
9179 {
9180 inst.instruction |= inst.operands[0].reg << 12;
9181 inst.instruction |= inst.operands[1].reg << 16;
9182 inst.instruction |= inst.operands[2].reg;
9183 if (inst.operands[3].present)
9184 encode_arm_shift (3);
9185 }
9186
9187 /* ARM V6 PKHTB (Argument Parse). */
9188
9189 static void
9190 do_pkhtb (void)
9191 {
9192 if (!inst.operands[3].present)
9193 {
9194 /* If the shift specifier is omitted, turn the instruction
9195 into pkhbt rd, rm, rn. */
9196 inst.instruction &= 0xfff00010;
9197 inst.instruction |= inst.operands[0].reg << 12;
9198 inst.instruction |= inst.operands[1].reg;
9199 inst.instruction |= inst.operands[2].reg << 16;
9200 }
9201 else
9202 {
9203 inst.instruction |= inst.operands[0].reg << 12;
9204 inst.instruction |= inst.operands[1].reg << 16;
9205 inst.instruction |= inst.operands[2].reg;
9206 encode_arm_shift (3);
9207 }
9208 }
9209
9210 /* ARMv5TE: Preload-Cache
9211 MP Extensions: Preload for write
9212
9213 PLD(W) <addr_mode>
9214
9215 Syntactically, like LDR with B=1, W=0, L=1. */
9216
9217 static void
9218 do_pld (void)
9219 {
9220 constraint (!inst.operands[0].isreg,
9221 _("'[' expected after PLD mnemonic"));
9222 constraint (inst.operands[0].postind,
9223 _("post-indexed expression used in preload instruction"));
9224 constraint (inst.operands[0].writeback,
9225 _("writeback used in preload instruction"));
9226 constraint (!inst.operands[0].preind,
9227 _("unindexed addressing used in preload instruction"));
9228 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9229 }
9230
9231 /* ARMv7: PLI <addr_mode> */
9232 static void
9233 do_pli (void)
9234 {
9235 constraint (!inst.operands[0].isreg,
9236 _("'[' expected after PLI mnemonic"));
9237 constraint (inst.operands[0].postind,
9238 _("post-indexed expression used in preload instruction"));
9239 constraint (inst.operands[0].writeback,
9240 _("writeback used in preload instruction"));
9241 constraint (!inst.operands[0].preind,
9242 _("unindexed addressing used in preload instruction"));
9243 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9244 inst.instruction &= ~PRE_INDEX;
9245 }
9246
9247 static void
9248 do_push_pop (void)
9249 {
9250 constraint (inst.operands[0].writeback,
9251 _("push/pop do not support {reglist}^"));
9252 inst.operands[1] = inst.operands[0];
9253 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9254 inst.operands[0].isreg = 1;
9255 inst.operands[0].writeback = 1;
9256 inst.operands[0].reg = REG_SP;
9257 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9258 }
9259
9260 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9261 word at the specified address and the following word
9262 respectively.
9263 Unconditionally executed.
9264 Error if Rn is R15. */
9265
9266 static void
9267 do_rfe (void)
9268 {
9269 inst.instruction |= inst.operands[0].reg << 16;
9270 if (inst.operands[0].writeback)
9271 inst.instruction |= WRITE_BACK;
9272 }
9273
9274 /* ARM V6 ssat (argument parse). */
9275
9276 static void
9277 do_ssat (void)
9278 {
9279 inst.instruction |= inst.operands[0].reg << 12;
9280 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9281 inst.instruction |= inst.operands[2].reg;
9282
9283 if (inst.operands[3].present)
9284 encode_arm_shift (3);
9285 }
9286
9287 /* ARM V6 usat (argument parse). */
9288
9289 static void
9290 do_usat (void)
9291 {
9292 inst.instruction |= inst.operands[0].reg << 12;
9293 inst.instruction |= inst.operands[1].imm << 16;
9294 inst.instruction |= inst.operands[2].reg;
9295
9296 if (inst.operands[3].present)
9297 encode_arm_shift (3);
9298 }
9299
9300 /* ARM V6 ssat16 (argument parse). */
9301
9302 static void
9303 do_ssat16 (void)
9304 {
9305 inst.instruction |= inst.operands[0].reg << 12;
9306 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9307 inst.instruction |= inst.operands[2].reg;
9308 }
9309
9310 static void
9311 do_usat16 (void)
9312 {
9313 inst.instruction |= inst.operands[0].reg << 12;
9314 inst.instruction |= inst.operands[1].imm << 16;
9315 inst.instruction |= inst.operands[2].reg;
9316 }
9317
9318 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9319 preserving the other bits.
9320
9321 setend <endian_specifier>, where <endian_specifier> is either
9322 BE or LE. */
9323
9324 static void
9325 do_setend (void)
9326 {
9327 if (warn_on_deprecated
9328 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9329 as_tsktsk (_("setend use is deprecated for ARMv8"));
9330
9331 if (inst.operands[0].imm)
9332 inst.instruction |= 0x200;
9333 }
9334
9335 static void
9336 do_shift (void)
9337 {
9338 unsigned int Rm = (inst.operands[1].present
9339 ? inst.operands[1].reg
9340 : inst.operands[0].reg);
9341
9342 inst.instruction |= inst.operands[0].reg << 12;
9343 inst.instruction |= Rm;
9344 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9345 {
9346 inst.instruction |= inst.operands[2].reg << 8;
9347 inst.instruction |= SHIFT_BY_REG;
9348 /* PR 12854: Error on extraneous shifts. */
9349 constraint (inst.operands[2].shifted,
9350 _("extraneous shift as part of operand to shift insn"));
9351 }
9352 else
9353 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9354 }
9355
9356 static void
9357 do_smc (void)
9358 {
9359 inst.reloc.type = BFD_RELOC_ARM_SMC;
9360 inst.reloc.pc_rel = 0;
9361 }
9362
9363 static void
9364 do_hvc (void)
9365 {
9366 inst.reloc.type = BFD_RELOC_ARM_HVC;
9367 inst.reloc.pc_rel = 0;
9368 }
9369
9370 static void
9371 do_swi (void)
9372 {
9373 inst.reloc.type = BFD_RELOC_ARM_SWI;
9374 inst.reloc.pc_rel = 0;
9375 }
9376
9377 static void
9378 do_setpan (void)
9379 {
9380 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9381 _("selected processor does not support SETPAN instruction"));
9382
9383 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9384 }
9385
9386 static void
9387 do_t_setpan (void)
9388 {
9389 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9390 _("selected processor does not support SETPAN instruction"));
9391
9392 inst.instruction |= (inst.operands[0].imm << 3);
9393 }
9394
9395 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9396 SMLAxy{cond} Rd,Rm,Rs,Rn
9397 SMLAWy{cond} Rd,Rm,Rs,Rn
9398 Error if any register is R15. */
9399
9400 static void
9401 do_smla (void)
9402 {
9403 inst.instruction |= inst.operands[0].reg << 16;
9404 inst.instruction |= inst.operands[1].reg;
9405 inst.instruction |= inst.operands[2].reg << 8;
9406 inst.instruction |= inst.operands[3].reg << 12;
9407 }
9408
9409 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9410 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9411 Error if any register is R15.
9412 Warning if Rdlo == Rdhi. */
9413
9414 static void
9415 do_smlal (void)
9416 {
9417 inst.instruction |= inst.operands[0].reg << 12;
9418 inst.instruction |= inst.operands[1].reg << 16;
9419 inst.instruction |= inst.operands[2].reg;
9420 inst.instruction |= inst.operands[3].reg << 8;
9421
9422 if (inst.operands[0].reg == inst.operands[1].reg)
9423 as_tsktsk (_("rdhi and rdlo must be different"));
9424 }
9425
9426 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9427 SMULxy{cond} Rd,Rm,Rs
9428 Error if any register is R15. */
9429
9430 static void
9431 do_smul (void)
9432 {
9433 inst.instruction |= inst.operands[0].reg << 16;
9434 inst.instruction |= inst.operands[1].reg;
9435 inst.instruction |= inst.operands[2].reg << 8;
9436 }
9437
9438 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9439 the same for both ARM and Thumb-2. */
9440
9441 static void
9442 do_srs (void)
9443 {
9444 int reg;
9445
9446 if (inst.operands[0].present)
9447 {
9448 reg = inst.operands[0].reg;
9449 constraint (reg != REG_SP, _("SRS base register must be r13"));
9450 }
9451 else
9452 reg = REG_SP;
9453
9454 inst.instruction |= reg << 16;
9455 inst.instruction |= inst.operands[1].imm;
9456 if (inst.operands[0].writeback || inst.operands[1].writeback)
9457 inst.instruction |= WRITE_BACK;
9458 }
9459
9460 /* ARM V6 strex (argument parse). */
9461
9462 static void
9463 do_strex (void)
9464 {
9465 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9466 || inst.operands[2].postind || inst.operands[2].writeback
9467 || inst.operands[2].immisreg || inst.operands[2].shifted
9468 || inst.operands[2].negative
9469 /* See comment in do_ldrex(). */
9470 || (inst.operands[2].reg == REG_PC),
9471 BAD_ADDR_MODE);
9472
9473 constraint (inst.operands[0].reg == inst.operands[1].reg
9474 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9475
9476 constraint (inst.reloc.exp.X_op != O_constant
9477 || inst.reloc.exp.X_add_number != 0,
9478 _("offset must be zero in ARM encoding"));
9479
9480 inst.instruction |= inst.operands[0].reg << 12;
9481 inst.instruction |= inst.operands[1].reg;
9482 inst.instruction |= inst.operands[2].reg << 16;
9483 inst.reloc.type = BFD_RELOC_UNUSED;
9484 }
9485
9486 static void
9487 do_t_strexbh (void)
9488 {
9489 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9490 || inst.operands[2].postind || inst.operands[2].writeback
9491 || inst.operands[2].immisreg || inst.operands[2].shifted
9492 || inst.operands[2].negative,
9493 BAD_ADDR_MODE);
9494
9495 constraint (inst.operands[0].reg == inst.operands[1].reg
9496 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9497
9498 do_rm_rd_rn ();
9499 }
9500
9501 static void
9502 do_strexd (void)
9503 {
9504 constraint (inst.operands[1].reg % 2 != 0,
9505 _("even register required"));
9506 constraint (inst.operands[2].present
9507 && inst.operands[2].reg != inst.operands[1].reg + 1,
9508 _("can only store two consecutive registers"));
9509 /* If op 2 were present and equal to PC, this function wouldn't
9510 have been called in the first place. */
9511 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9512
9513 constraint (inst.operands[0].reg == inst.operands[1].reg
9514 || inst.operands[0].reg == inst.operands[1].reg + 1
9515 || inst.operands[0].reg == inst.operands[3].reg,
9516 BAD_OVERLAP);
9517
9518 inst.instruction |= inst.operands[0].reg << 12;
9519 inst.instruction |= inst.operands[1].reg;
9520 inst.instruction |= inst.operands[3].reg << 16;
9521 }
9522
9523 /* ARM V8 STRL. */
9524 static void
9525 do_stlex (void)
9526 {
9527 constraint (inst.operands[0].reg == inst.operands[1].reg
9528 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9529
9530 do_rd_rm_rn ();
9531 }
9532
9533 static void
9534 do_t_stlex (void)
9535 {
9536 constraint (inst.operands[0].reg == inst.operands[1].reg
9537 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9538
9539 do_rm_rd_rn ();
9540 }
9541
9542 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9543 extends it to 32-bits, and adds the result to a value in another
9544 register. You can specify a rotation by 0, 8, 16, or 24 bits
9545 before extracting the 16-bit value.
9546 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9547 Condition defaults to COND_ALWAYS.
9548 Error if any register uses R15. */
9549
9550 static void
9551 do_sxtah (void)
9552 {
9553 inst.instruction |= inst.operands[0].reg << 12;
9554 inst.instruction |= inst.operands[1].reg << 16;
9555 inst.instruction |= inst.operands[2].reg;
9556 inst.instruction |= inst.operands[3].imm << 10;
9557 }
9558
9559 /* ARM V6 SXTH.
9560
9561 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9562 Condition defaults to COND_ALWAYS.
9563 Error if any register uses R15. */
9564
9565 static void
9566 do_sxth (void)
9567 {
9568 inst.instruction |= inst.operands[0].reg << 12;
9569 inst.instruction |= inst.operands[1].reg;
9570 inst.instruction |= inst.operands[2].imm << 10;
9571 }
9572 \f
9573 /* VFP instructions. In a logical order: SP variant first, monad
9574 before dyad, arithmetic then move then load/store. */
9575
9576 static void
9577 do_vfp_sp_monadic (void)
9578 {
9579 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9580 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9581 }
9582
9583 static void
9584 do_vfp_sp_dyadic (void)
9585 {
9586 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9587 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9588 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9589 }
9590
9591 static void
9592 do_vfp_sp_compare_z (void)
9593 {
9594 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9595 }
9596
9597 static void
9598 do_vfp_dp_sp_cvt (void)
9599 {
9600 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9601 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9602 }
9603
9604 static void
9605 do_vfp_sp_dp_cvt (void)
9606 {
9607 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9608 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9609 }
9610
9611 static void
9612 do_vfp_reg_from_sp (void)
9613 {
9614 inst.instruction |= inst.operands[0].reg << 12;
9615 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9616 }
9617
9618 static void
9619 do_vfp_reg2_from_sp2 (void)
9620 {
9621 constraint (inst.operands[2].imm != 2,
9622 _("only two consecutive VFP SP registers allowed here"));
9623 inst.instruction |= inst.operands[0].reg << 12;
9624 inst.instruction |= inst.operands[1].reg << 16;
9625 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9626 }
9627
9628 static void
9629 do_vfp_sp_from_reg (void)
9630 {
9631 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9632 inst.instruction |= inst.operands[1].reg << 12;
9633 }
9634
9635 static void
9636 do_vfp_sp2_from_reg2 (void)
9637 {
9638 constraint (inst.operands[0].imm != 2,
9639 _("only two consecutive VFP SP registers allowed here"));
9640 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9641 inst.instruction |= inst.operands[1].reg << 12;
9642 inst.instruction |= inst.operands[2].reg << 16;
9643 }
9644
9645 static void
9646 do_vfp_sp_ldst (void)
9647 {
9648 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9649 encode_arm_cp_address (1, FALSE, TRUE, 0);
9650 }
9651
9652 static void
9653 do_vfp_dp_ldst (void)
9654 {
9655 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9656 encode_arm_cp_address (1, FALSE, TRUE, 0);
9657 }
9658
9659
9660 static void
9661 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9662 {
9663 if (inst.operands[0].writeback)
9664 inst.instruction |= WRITE_BACK;
9665 else
9666 constraint (ldstm_type != VFP_LDSTMIA,
9667 _("this addressing mode requires base-register writeback"));
9668 inst.instruction |= inst.operands[0].reg << 16;
9669 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9670 inst.instruction |= inst.operands[1].imm;
9671 }
9672
9673 static void
9674 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9675 {
9676 int count;
9677
9678 if (inst.operands[0].writeback)
9679 inst.instruction |= WRITE_BACK;
9680 else
9681 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9682 _("this addressing mode requires base-register writeback"));
9683
9684 inst.instruction |= inst.operands[0].reg << 16;
9685 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9686
9687 count = inst.operands[1].imm << 1;
9688 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9689 count += 1;
9690
9691 inst.instruction |= count;
9692 }
9693
9694 static void
9695 do_vfp_sp_ldstmia (void)
9696 {
9697 vfp_sp_ldstm (VFP_LDSTMIA);
9698 }
9699
9700 static void
9701 do_vfp_sp_ldstmdb (void)
9702 {
9703 vfp_sp_ldstm (VFP_LDSTMDB);
9704 }
9705
9706 static void
9707 do_vfp_dp_ldstmia (void)
9708 {
9709 vfp_dp_ldstm (VFP_LDSTMIA);
9710 }
9711
9712 static void
9713 do_vfp_dp_ldstmdb (void)
9714 {
9715 vfp_dp_ldstm (VFP_LDSTMDB);
9716 }
9717
9718 static void
9719 do_vfp_xp_ldstmia (void)
9720 {
9721 vfp_dp_ldstm (VFP_LDSTMIAX);
9722 }
9723
9724 static void
9725 do_vfp_xp_ldstmdb (void)
9726 {
9727 vfp_dp_ldstm (VFP_LDSTMDBX);
9728 }
9729
9730 static void
9731 do_vfp_dp_rd_rm (void)
9732 {
9733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9734 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9735 }
9736
9737 static void
9738 do_vfp_dp_rn_rd (void)
9739 {
9740 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9741 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9742 }
9743
9744 static void
9745 do_vfp_dp_rd_rn (void)
9746 {
9747 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9748 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9749 }
9750
9751 static void
9752 do_vfp_dp_rd_rn_rm (void)
9753 {
9754 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9755 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9756 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9757 }
9758
9759 static void
9760 do_vfp_dp_rd (void)
9761 {
9762 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9763 }
9764
9765 static void
9766 do_vfp_dp_rm_rd_rn (void)
9767 {
9768 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9769 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9770 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9771 }
9772
9773 /* VFPv3 instructions. */
9774 static void
9775 do_vfp_sp_const (void)
9776 {
9777 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9778 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9779 inst.instruction |= (inst.operands[1].imm & 0x0f);
9780 }
9781
9782 static void
9783 do_vfp_dp_const (void)
9784 {
9785 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9786 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9787 inst.instruction |= (inst.operands[1].imm & 0x0f);
9788 }
9789
9790 static void
9791 vfp_conv (int srcsize)
9792 {
9793 int immbits = srcsize - inst.operands[1].imm;
9794
9795 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9796 {
9797 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9798 i.e. immbits must be in range 0 - 16. */
9799 inst.error = _("immediate value out of range, expected range [0, 16]");
9800 return;
9801 }
9802 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9803 {
9804 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9805 i.e. immbits must be in range 0 - 31. */
9806 inst.error = _("immediate value out of range, expected range [1, 32]");
9807 return;
9808 }
9809
9810 inst.instruction |= (immbits & 1) << 5;
9811 inst.instruction |= (immbits >> 1);
9812 }
9813
9814 static void
9815 do_vfp_sp_conv_16 (void)
9816 {
9817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9818 vfp_conv (16);
9819 }
9820
9821 static void
9822 do_vfp_dp_conv_16 (void)
9823 {
9824 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9825 vfp_conv (16);
9826 }
9827
9828 static void
9829 do_vfp_sp_conv_32 (void)
9830 {
9831 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9832 vfp_conv (32);
9833 }
9834
9835 static void
9836 do_vfp_dp_conv_32 (void)
9837 {
9838 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9839 vfp_conv (32);
9840 }
9841 \f
9842 /* FPA instructions. Also in a logical order. */
9843
9844 static void
9845 do_fpa_cmp (void)
9846 {
9847 inst.instruction |= inst.operands[0].reg << 16;
9848 inst.instruction |= inst.operands[1].reg;
9849 }
9850
9851 static void
9852 do_fpa_ldmstm (void)
9853 {
9854 inst.instruction |= inst.operands[0].reg << 12;
9855 switch (inst.operands[1].imm)
9856 {
9857 case 1: inst.instruction |= CP_T_X; break;
9858 case 2: inst.instruction |= CP_T_Y; break;
9859 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9860 case 4: break;
9861 default: abort ();
9862 }
9863
9864 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9865 {
9866 /* The instruction specified "ea" or "fd", so we can only accept
9867 [Rn]{!}. The instruction does not really support stacking or
9868 unstacking, so we have to emulate these by setting appropriate
9869 bits and offsets. */
9870 constraint (inst.reloc.exp.X_op != O_constant
9871 || inst.reloc.exp.X_add_number != 0,
9872 _("this instruction does not support indexing"));
9873
9874 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9875 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9876
9877 if (!(inst.instruction & INDEX_UP))
9878 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9879
9880 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9881 {
9882 inst.operands[2].preind = 0;
9883 inst.operands[2].postind = 1;
9884 }
9885 }
9886
9887 encode_arm_cp_address (2, TRUE, TRUE, 0);
9888 }
9889 \f
9890 /* iWMMXt instructions: strictly in alphabetical order. */
9891
9892 static void
9893 do_iwmmxt_tandorc (void)
9894 {
9895 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9896 }
9897
9898 static void
9899 do_iwmmxt_textrc (void)
9900 {
9901 inst.instruction |= inst.operands[0].reg << 12;
9902 inst.instruction |= inst.operands[1].imm;
9903 }
9904
9905 static void
9906 do_iwmmxt_textrm (void)
9907 {
9908 inst.instruction |= inst.operands[0].reg << 12;
9909 inst.instruction |= inst.operands[1].reg << 16;
9910 inst.instruction |= inst.operands[2].imm;
9911 }
9912
9913 static void
9914 do_iwmmxt_tinsr (void)
9915 {
9916 inst.instruction |= inst.operands[0].reg << 16;
9917 inst.instruction |= inst.operands[1].reg << 12;
9918 inst.instruction |= inst.operands[2].imm;
9919 }
9920
9921 static void
9922 do_iwmmxt_tmia (void)
9923 {
9924 inst.instruction |= inst.operands[0].reg << 5;
9925 inst.instruction |= inst.operands[1].reg;
9926 inst.instruction |= inst.operands[2].reg << 12;
9927 }
9928
9929 static void
9930 do_iwmmxt_waligni (void)
9931 {
9932 inst.instruction |= inst.operands[0].reg << 12;
9933 inst.instruction |= inst.operands[1].reg << 16;
9934 inst.instruction |= inst.operands[2].reg;
9935 inst.instruction |= inst.operands[3].imm << 20;
9936 }
9937
9938 static void
9939 do_iwmmxt_wmerge (void)
9940 {
9941 inst.instruction |= inst.operands[0].reg << 12;
9942 inst.instruction |= inst.operands[1].reg << 16;
9943 inst.instruction |= inst.operands[2].reg;
9944 inst.instruction |= inst.operands[3].imm << 21;
9945 }
9946
9947 static void
9948 do_iwmmxt_wmov (void)
9949 {
9950 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9951 inst.instruction |= inst.operands[0].reg << 12;
9952 inst.instruction |= inst.operands[1].reg << 16;
9953 inst.instruction |= inst.operands[1].reg;
9954 }
9955
9956 static void
9957 do_iwmmxt_wldstbh (void)
9958 {
9959 int reloc;
9960 inst.instruction |= inst.operands[0].reg << 12;
9961 if (thumb_mode)
9962 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9963 else
9964 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9965 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9966 }
9967
9968 static void
9969 do_iwmmxt_wldstw (void)
9970 {
9971 /* RIWR_RIWC clears .isreg for a control register. */
9972 if (!inst.operands[0].isreg)
9973 {
9974 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9975 inst.instruction |= 0xf0000000;
9976 }
9977
9978 inst.instruction |= inst.operands[0].reg << 12;
9979 encode_arm_cp_address (1, TRUE, TRUE, 0);
9980 }
9981
9982 static void
9983 do_iwmmxt_wldstd (void)
9984 {
9985 inst.instruction |= inst.operands[0].reg << 12;
9986 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9987 && inst.operands[1].immisreg)
9988 {
9989 inst.instruction &= ~0x1a000ff;
9990 inst.instruction |= (0xfU << 28);
9991 if (inst.operands[1].preind)
9992 inst.instruction |= PRE_INDEX;
9993 if (!inst.operands[1].negative)
9994 inst.instruction |= INDEX_UP;
9995 if (inst.operands[1].writeback)
9996 inst.instruction |= WRITE_BACK;
9997 inst.instruction |= inst.operands[1].reg << 16;
9998 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9999 inst.instruction |= inst.operands[1].imm;
10000 }
10001 else
10002 encode_arm_cp_address (1, TRUE, FALSE, 0);
10003 }
10004
10005 static void
10006 do_iwmmxt_wshufh (void)
10007 {
10008 inst.instruction |= inst.operands[0].reg << 12;
10009 inst.instruction |= inst.operands[1].reg << 16;
10010 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10011 inst.instruction |= (inst.operands[2].imm & 0x0f);
10012 }
10013
10014 static void
10015 do_iwmmxt_wzero (void)
10016 {
10017 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10018 inst.instruction |= inst.operands[0].reg;
10019 inst.instruction |= inst.operands[0].reg << 12;
10020 inst.instruction |= inst.operands[0].reg << 16;
10021 }
10022
10023 static void
10024 do_iwmmxt_wrwrwr_or_imm5 (void)
10025 {
10026 if (inst.operands[2].isreg)
10027 do_rd_rn_rm ();
10028 else {
10029 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10030 _("immediate operand requires iWMMXt2"));
10031 do_rd_rn ();
10032 if (inst.operands[2].imm == 0)
10033 {
10034 switch ((inst.instruction >> 20) & 0xf)
10035 {
10036 case 4:
10037 case 5:
10038 case 6:
10039 case 7:
10040 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10041 inst.operands[2].imm = 16;
10042 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10043 break;
10044 case 8:
10045 case 9:
10046 case 10:
10047 case 11:
10048 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10049 inst.operands[2].imm = 32;
10050 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10051 break;
10052 case 12:
10053 case 13:
10054 case 14:
10055 case 15:
10056 {
10057 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10058 unsigned long wrn;
10059 wrn = (inst.instruction >> 16) & 0xf;
10060 inst.instruction &= 0xff0fff0f;
10061 inst.instruction |= wrn;
10062 /* Bail out here; the instruction is now assembled. */
10063 return;
10064 }
10065 }
10066 }
10067 /* Map 32 -> 0, etc. */
10068 inst.operands[2].imm &= 0x1f;
10069 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10070 }
10071 }
10072 \f
10073 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10074 operations first, then control, shift, and load/store. */
10075
10076 /* Insns like "foo X,Y,Z". */
10077
10078 static void
10079 do_mav_triple (void)
10080 {
10081 inst.instruction |= inst.operands[0].reg << 16;
10082 inst.instruction |= inst.operands[1].reg;
10083 inst.instruction |= inst.operands[2].reg << 12;
10084 }
10085
10086 /* Insns like "foo W,X,Y,Z".
10087 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10088
10089 static void
10090 do_mav_quad (void)
10091 {
10092 inst.instruction |= inst.operands[0].reg << 5;
10093 inst.instruction |= inst.operands[1].reg << 12;
10094 inst.instruction |= inst.operands[2].reg << 16;
10095 inst.instruction |= inst.operands[3].reg;
10096 }
10097
10098 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10099 static void
10100 do_mav_dspsc (void)
10101 {
10102 inst.instruction |= inst.operands[1].reg << 12;
10103 }
10104
10105 /* Maverick shift immediate instructions.
10106 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10107 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10108
10109 static void
10110 do_mav_shift (void)
10111 {
10112 int imm = inst.operands[2].imm;
10113
10114 inst.instruction |= inst.operands[0].reg << 12;
10115 inst.instruction |= inst.operands[1].reg << 16;
10116
10117 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10118 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10119 Bit 4 should be 0. */
10120 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10121
10122 inst.instruction |= imm;
10123 }
10124 \f
10125 /* XScale instructions. Also sorted arithmetic before move. */
10126
10127 /* Xscale multiply-accumulate (argument parse)
10128 MIAcc acc0,Rm,Rs
10129 MIAPHcc acc0,Rm,Rs
10130 MIAxycc acc0,Rm,Rs. */
10131
10132 static void
10133 do_xsc_mia (void)
10134 {
10135 inst.instruction |= inst.operands[1].reg;
10136 inst.instruction |= inst.operands[2].reg << 12;
10137 }
10138
10139 /* Xscale move-accumulator-register (argument parse)
10140
10141 MARcc acc0,RdLo,RdHi. */
10142
10143 static void
10144 do_xsc_mar (void)
10145 {
10146 inst.instruction |= inst.operands[1].reg << 12;
10147 inst.instruction |= inst.operands[2].reg << 16;
10148 }
10149
10150 /* Xscale move-register-accumulator (argument parse)
10151
10152 MRAcc RdLo,RdHi,acc0. */
10153
10154 static void
10155 do_xsc_mra (void)
10156 {
10157 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10158 inst.instruction |= inst.operands[0].reg << 12;
10159 inst.instruction |= inst.operands[1].reg << 16;
10160 }
10161 \f
10162 /* Encoding functions relevant only to Thumb. */
10163
10164 /* inst.operands[i] is a shifted-register operand; encode
10165 it into inst.instruction in the format used by Thumb32. */
10166
10167 static void
10168 encode_thumb32_shifted_operand (int i)
10169 {
10170 unsigned int value = inst.reloc.exp.X_add_number;
10171 unsigned int shift = inst.operands[i].shift_kind;
10172
10173 constraint (inst.operands[i].immisreg,
10174 _("shift by register not allowed in thumb mode"));
10175 inst.instruction |= inst.operands[i].reg;
10176 if (shift == SHIFT_RRX)
10177 inst.instruction |= SHIFT_ROR << 4;
10178 else
10179 {
10180 constraint (inst.reloc.exp.X_op != O_constant,
10181 _("expression too complex"));
10182
10183 constraint (value > 32
10184 || (value == 32 && (shift == SHIFT_LSL
10185 || shift == SHIFT_ROR)),
10186 _("shift expression is too large"));
10187
10188 if (value == 0)
10189 shift = SHIFT_LSL;
10190 else if (value == 32)
10191 value = 0;
10192
10193 inst.instruction |= shift << 4;
10194 inst.instruction |= (value & 0x1c) << 10;
10195 inst.instruction |= (value & 0x03) << 6;
10196 }
10197 }
10198
10199
10200 /* inst.operands[i] was set up by parse_address. Encode it into a
10201 Thumb32 format load or store instruction. Reject forms that cannot
10202 be used with such instructions. If is_t is true, reject forms that
10203 cannot be used with a T instruction; if is_d is true, reject forms
10204 that cannot be used with a D instruction. If it is a store insn,
10205 reject PC in Rn. */
10206
10207 static void
10208 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10209 {
10210 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10211
10212 constraint (!inst.operands[i].isreg,
10213 _("Instruction does not support =N addresses"));
10214
10215 inst.instruction |= inst.operands[i].reg << 16;
10216 if (inst.operands[i].immisreg)
10217 {
10218 constraint (is_pc, BAD_PC_ADDRESSING);
10219 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10220 constraint (inst.operands[i].negative,
10221 _("Thumb does not support negative register indexing"));
10222 constraint (inst.operands[i].postind,
10223 _("Thumb does not support register post-indexing"));
10224 constraint (inst.operands[i].writeback,
10225 _("Thumb does not support register indexing with writeback"));
10226 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10227 _("Thumb supports only LSL in shifted register indexing"));
10228
10229 inst.instruction |= inst.operands[i].imm;
10230 if (inst.operands[i].shifted)
10231 {
10232 constraint (inst.reloc.exp.X_op != O_constant,
10233 _("expression too complex"));
10234 constraint (inst.reloc.exp.X_add_number < 0
10235 || inst.reloc.exp.X_add_number > 3,
10236 _("shift out of range"));
10237 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10238 }
10239 inst.reloc.type = BFD_RELOC_UNUSED;
10240 }
10241 else if (inst.operands[i].preind)
10242 {
10243 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10244 constraint (is_t && inst.operands[i].writeback,
10245 _("cannot use writeback with this instruction"));
10246 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10247 BAD_PC_ADDRESSING);
10248
10249 if (is_d)
10250 {
10251 inst.instruction |= 0x01000000;
10252 if (inst.operands[i].writeback)
10253 inst.instruction |= 0x00200000;
10254 }
10255 else
10256 {
10257 inst.instruction |= 0x00000c00;
10258 if (inst.operands[i].writeback)
10259 inst.instruction |= 0x00000100;
10260 }
10261 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10262 }
10263 else if (inst.operands[i].postind)
10264 {
10265 gas_assert (inst.operands[i].writeback);
10266 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10267 constraint (is_t, _("cannot use post-indexing with this instruction"));
10268
10269 if (is_d)
10270 inst.instruction |= 0x00200000;
10271 else
10272 inst.instruction |= 0x00000900;
10273 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10274 }
10275 else /* unindexed - only for coprocessor */
10276 inst.error = _("instruction does not accept unindexed addressing");
10277 }
10278
10279 /* Table of Thumb instructions which exist in both 16- and 32-bit
10280 encodings (the latter only in post-V6T2 cores). The index is the
10281 value used in the insns table below. When there is more than one
10282 possible 16-bit encoding for the instruction, this table always
10283 holds variant (1).
10284 Also contains several pseudo-instructions used during relaxation. */
10285 #define T16_32_TAB \
10286 X(_adc, 4140, eb400000), \
10287 X(_adcs, 4140, eb500000), \
10288 X(_add, 1c00, eb000000), \
10289 X(_adds, 1c00, eb100000), \
10290 X(_addi, 0000, f1000000), \
10291 X(_addis, 0000, f1100000), \
10292 X(_add_pc,000f, f20f0000), \
10293 X(_add_sp,000d, f10d0000), \
10294 X(_adr, 000f, f20f0000), \
10295 X(_and, 4000, ea000000), \
10296 X(_ands, 4000, ea100000), \
10297 X(_asr, 1000, fa40f000), \
10298 X(_asrs, 1000, fa50f000), \
10299 X(_b, e000, f000b000), \
10300 X(_bcond, d000, f0008000), \
10301 X(_bic, 4380, ea200000), \
10302 X(_bics, 4380, ea300000), \
10303 X(_cmn, 42c0, eb100f00), \
10304 X(_cmp, 2800, ebb00f00), \
10305 X(_cpsie, b660, f3af8400), \
10306 X(_cpsid, b670, f3af8600), \
10307 X(_cpy, 4600, ea4f0000), \
10308 X(_dec_sp,80dd, f1ad0d00), \
10309 X(_eor, 4040, ea800000), \
10310 X(_eors, 4040, ea900000), \
10311 X(_inc_sp,00dd, f10d0d00), \
10312 X(_ldmia, c800, e8900000), \
10313 X(_ldr, 6800, f8500000), \
10314 X(_ldrb, 7800, f8100000), \
10315 X(_ldrh, 8800, f8300000), \
10316 X(_ldrsb, 5600, f9100000), \
10317 X(_ldrsh, 5e00, f9300000), \
10318 X(_ldr_pc,4800, f85f0000), \
10319 X(_ldr_pc2,4800, f85f0000), \
10320 X(_ldr_sp,9800, f85d0000), \
10321 X(_lsl, 0000, fa00f000), \
10322 X(_lsls, 0000, fa10f000), \
10323 X(_lsr, 0800, fa20f000), \
10324 X(_lsrs, 0800, fa30f000), \
10325 X(_mov, 2000, ea4f0000), \
10326 X(_movs, 2000, ea5f0000), \
10327 X(_mul, 4340, fb00f000), \
10328 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10329 X(_mvn, 43c0, ea6f0000), \
10330 X(_mvns, 43c0, ea7f0000), \
10331 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10332 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10333 X(_orr, 4300, ea400000), \
10334 X(_orrs, 4300, ea500000), \
10335 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10336 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10337 X(_rev, ba00, fa90f080), \
10338 X(_rev16, ba40, fa90f090), \
10339 X(_revsh, bac0, fa90f0b0), \
10340 X(_ror, 41c0, fa60f000), \
10341 X(_rors, 41c0, fa70f000), \
10342 X(_sbc, 4180, eb600000), \
10343 X(_sbcs, 4180, eb700000), \
10344 X(_stmia, c000, e8800000), \
10345 X(_str, 6000, f8400000), \
10346 X(_strb, 7000, f8000000), \
10347 X(_strh, 8000, f8200000), \
10348 X(_str_sp,9000, f84d0000), \
10349 X(_sub, 1e00, eba00000), \
10350 X(_subs, 1e00, ebb00000), \
10351 X(_subi, 8000, f1a00000), \
10352 X(_subis, 8000, f1b00000), \
10353 X(_sxtb, b240, fa4ff080), \
10354 X(_sxth, b200, fa0ff080), \
10355 X(_tst, 4200, ea100f00), \
10356 X(_uxtb, b2c0, fa5ff080), \
10357 X(_uxth, b280, fa1ff080), \
10358 X(_nop, bf00, f3af8000), \
10359 X(_yield, bf10, f3af8001), \
10360 X(_wfe, bf20, f3af8002), \
10361 X(_wfi, bf30, f3af8003), \
10362 X(_sev, bf40, f3af8004), \
10363 X(_sevl, bf50, f3af8005), \
10364 X(_udf, de00, f7f0a000)
10365
10366 /* To catch errors in encoding functions, the codes are all offset by
10367 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10368 as 16-bit instructions. */
10369 #define X(a,b,c) T_MNEM##a
10370 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10371 #undef X
10372
10373 #define X(a,b,c) 0x##b
10374 static const unsigned short thumb_op16[] = { T16_32_TAB };
10375 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10376 #undef X
10377
10378 #define X(a,b,c) 0x##c
10379 static const unsigned int thumb_op32[] = { T16_32_TAB };
10380 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10381 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10382 #undef X
10383 #undef T16_32_TAB
10384
10385 /* Thumb instruction encoders, in alphabetical order. */
10386
10387 /* ADDW or SUBW. */
10388
10389 static void
10390 do_t_add_sub_w (void)
10391 {
10392 int Rd, Rn;
10393
10394 Rd = inst.operands[0].reg;
10395 Rn = inst.operands[1].reg;
10396
10397 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10398 is the SP-{plus,minus}-immediate form of the instruction. */
10399 if (Rn == REG_SP)
10400 constraint (Rd == REG_PC, BAD_PC);
10401 else
10402 reject_bad_reg (Rd);
10403
10404 inst.instruction |= (Rn << 16) | (Rd << 8);
10405 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10406 }
10407
10408 /* Parse an add or subtract instruction. We get here with inst.instruction
10409 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10410
10411 static void
10412 do_t_add_sub (void)
10413 {
10414 int Rd, Rs, Rn;
10415
10416 Rd = inst.operands[0].reg;
10417 Rs = (inst.operands[1].present
10418 ? inst.operands[1].reg /* Rd, Rs, foo */
10419 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10420
10421 if (Rd == REG_PC)
10422 set_it_insn_type_last ();
10423
10424 if (unified_syntax)
10425 {
10426 bfd_boolean flags;
10427 bfd_boolean narrow;
10428 int opcode;
10429
10430 flags = (inst.instruction == T_MNEM_adds
10431 || inst.instruction == T_MNEM_subs);
10432 if (flags)
10433 narrow = !in_it_block ();
10434 else
10435 narrow = in_it_block ();
10436 if (!inst.operands[2].isreg)
10437 {
10438 int add;
10439
10440 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10441
10442 add = (inst.instruction == T_MNEM_add
10443 || inst.instruction == T_MNEM_adds);
10444 opcode = 0;
10445 if (inst.size_req != 4)
10446 {
10447 /* Attempt to use a narrow opcode, with relaxation if
10448 appropriate. */
10449 if (Rd == REG_SP && Rs == REG_SP && !flags)
10450 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10451 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10452 opcode = T_MNEM_add_sp;
10453 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10454 opcode = T_MNEM_add_pc;
10455 else if (Rd <= 7 && Rs <= 7 && narrow)
10456 {
10457 if (flags)
10458 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10459 else
10460 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10461 }
10462 if (opcode)
10463 {
10464 inst.instruction = THUMB_OP16(opcode);
10465 inst.instruction |= (Rd << 4) | Rs;
10466 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10467 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10468 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10469 if (inst.size_req != 2)
10470 inst.relax = opcode;
10471 }
10472 else
10473 constraint (inst.size_req == 2, BAD_HIREG);
10474 }
10475 if (inst.size_req == 4
10476 || (inst.size_req != 2 && !opcode))
10477 {
10478 if (Rd == REG_PC)
10479 {
10480 constraint (add, BAD_PC);
10481 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10482 _("only SUBS PC, LR, #const allowed"));
10483 constraint (inst.reloc.exp.X_op != O_constant,
10484 _("expression too complex"));
10485 constraint (inst.reloc.exp.X_add_number < 0
10486 || inst.reloc.exp.X_add_number > 0xff,
10487 _("immediate value out of range"));
10488 inst.instruction = T2_SUBS_PC_LR
10489 | inst.reloc.exp.X_add_number;
10490 inst.reloc.type = BFD_RELOC_UNUSED;
10491 return;
10492 }
10493 else if (Rs == REG_PC)
10494 {
10495 /* Always use addw/subw. */
10496 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10497 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10498 }
10499 else
10500 {
10501 inst.instruction = THUMB_OP32 (inst.instruction);
10502 inst.instruction = (inst.instruction & 0xe1ffffff)
10503 | 0x10000000;
10504 if (flags)
10505 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10506 else
10507 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10508 }
10509 inst.instruction |= Rd << 8;
10510 inst.instruction |= Rs << 16;
10511 }
10512 }
10513 else
10514 {
10515 unsigned int value = inst.reloc.exp.X_add_number;
10516 unsigned int shift = inst.operands[2].shift_kind;
10517
10518 Rn = inst.operands[2].reg;
10519 /* See if we can do this with a 16-bit instruction. */
10520 if (!inst.operands[2].shifted && inst.size_req != 4)
10521 {
10522 if (Rd > 7 || Rs > 7 || Rn > 7)
10523 narrow = FALSE;
10524
10525 if (narrow)
10526 {
10527 inst.instruction = ((inst.instruction == T_MNEM_adds
10528 || inst.instruction == T_MNEM_add)
10529 ? T_OPCODE_ADD_R3
10530 : T_OPCODE_SUB_R3);
10531 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10532 return;
10533 }
10534
10535 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10536 {
10537 /* Thumb-1 cores (except v6-M) require at least one high
10538 register in a narrow non flag setting add. */
10539 if (Rd > 7 || Rn > 7
10540 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10541 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10542 {
10543 if (Rd == Rn)
10544 {
10545 Rn = Rs;
10546 Rs = Rd;
10547 }
10548 inst.instruction = T_OPCODE_ADD_HI;
10549 inst.instruction |= (Rd & 8) << 4;
10550 inst.instruction |= (Rd & 7);
10551 inst.instruction |= Rn << 3;
10552 return;
10553 }
10554 }
10555 }
10556
10557 constraint (Rd == REG_PC, BAD_PC);
10558 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10559 constraint (Rs == REG_PC, BAD_PC);
10560 reject_bad_reg (Rn);
10561
10562 /* If we get here, it can't be done in 16 bits. */
10563 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10564 _("shift must be constant"));
10565 inst.instruction = THUMB_OP32 (inst.instruction);
10566 inst.instruction |= Rd << 8;
10567 inst.instruction |= Rs << 16;
10568 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10569 _("shift value over 3 not allowed in thumb mode"));
10570 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10571 _("only LSL shift allowed in thumb mode"));
10572 encode_thumb32_shifted_operand (2);
10573 }
10574 }
10575 else
10576 {
10577 constraint (inst.instruction == T_MNEM_adds
10578 || inst.instruction == T_MNEM_subs,
10579 BAD_THUMB32);
10580
10581 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10582 {
10583 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10584 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10585 BAD_HIREG);
10586
10587 inst.instruction = (inst.instruction == T_MNEM_add
10588 ? 0x0000 : 0x8000);
10589 inst.instruction |= (Rd << 4) | Rs;
10590 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10591 return;
10592 }
10593
10594 Rn = inst.operands[2].reg;
10595 constraint (inst.operands[2].shifted, _("unshifted register required"));
10596
10597 /* We now have Rd, Rs, and Rn set to registers. */
10598 if (Rd > 7 || Rs > 7 || Rn > 7)
10599 {
10600 /* Can't do this for SUB. */
10601 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10602 inst.instruction = T_OPCODE_ADD_HI;
10603 inst.instruction |= (Rd & 8) << 4;
10604 inst.instruction |= (Rd & 7);
10605 if (Rs == Rd)
10606 inst.instruction |= Rn << 3;
10607 else if (Rn == Rd)
10608 inst.instruction |= Rs << 3;
10609 else
10610 constraint (1, _("dest must overlap one source register"));
10611 }
10612 else
10613 {
10614 inst.instruction = (inst.instruction == T_MNEM_add
10615 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10616 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10617 }
10618 }
10619 }
10620
10621 static void
10622 do_t_adr (void)
10623 {
10624 unsigned Rd;
10625
10626 Rd = inst.operands[0].reg;
10627 reject_bad_reg (Rd);
10628
10629 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10630 {
10631 /* Defer to section relaxation. */
10632 inst.relax = inst.instruction;
10633 inst.instruction = THUMB_OP16 (inst.instruction);
10634 inst.instruction |= Rd << 4;
10635 }
10636 else if (unified_syntax && inst.size_req != 2)
10637 {
10638 /* Generate a 32-bit opcode. */
10639 inst.instruction = THUMB_OP32 (inst.instruction);
10640 inst.instruction |= Rd << 8;
10641 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10642 inst.reloc.pc_rel = 1;
10643 }
10644 else
10645 {
10646 /* Generate a 16-bit opcode. */
10647 inst.instruction = THUMB_OP16 (inst.instruction);
10648 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10649 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10650 inst.reloc.pc_rel = 1;
10651
10652 inst.instruction |= Rd << 4;
10653 }
10654 }
10655
10656 /* Arithmetic instructions for which there is just one 16-bit
10657 instruction encoding, and it allows only two low registers.
10658 For maximal compatibility with ARM syntax, we allow three register
10659 operands even when Thumb-32 instructions are not available, as long
10660 as the first two are identical. For instance, both "sbc r0,r1" and
10661 "sbc r0,r0,r1" are allowed. */
10662 static void
10663 do_t_arit3 (void)
10664 {
10665 int Rd, Rs, Rn;
10666
10667 Rd = inst.operands[0].reg;
10668 Rs = (inst.operands[1].present
10669 ? inst.operands[1].reg /* Rd, Rs, foo */
10670 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10671 Rn = inst.operands[2].reg;
10672
10673 reject_bad_reg (Rd);
10674 reject_bad_reg (Rs);
10675 if (inst.operands[2].isreg)
10676 reject_bad_reg (Rn);
10677
10678 if (unified_syntax)
10679 {
10680 if (!inst.operands[2].isreg)
10681 {
10682 /* For an immediate, we always generate a 32-bit opcode;
10683 section relaxation will shrink it later if possible. */
10684 inst.instruction = THUMB_OP32 (inst.instruction);
10685 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10686 inst.instruction |= Rd << 8;
10687 inst.instruction |= Rs << 16;
10688 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10689 }
10690 else
10691 {
10692 bfd_boolean narrow;
10693
10694 /* See if we can do this with a 16-bit instruction. */
10695 if (THUMB_SETS_FLAGS (inst.instruction))
10696 narrow = !in_it_block ();
10697 else
10698 narrow = in_it_block ();
10699
10700 if (Rd > 7 || Rn > 7 || Rs > 7)
10701 narrow = FALSE;
10702 if (inst.operands[2].shifted)
10703 narrow = FALSE;
10704 if (inst.size_req == 4)
10705 narrow = FALSE;
10706
10707 if (narrow
10708 && Rd == Rs)
10709 {
10710 inst.instruction = THUMB_OP16 (inst.instruction);
10711 inst.instruction |= Rd;
10712 inst.instruction |= Rn << 3;
10713 return;
10714 }
10715
10716 /* If we get here, it can't be done in 16 bits. */
10717 constraint (inst.operands[2].shifted
10718 && inst.operands[2].immisreg,
10719 _("shift must be constant"));
10720 inst.instruction = THUMB_OP32 (inst.instruction);
10721 inst.instruction |= Rd << 8;
10722 inst.instruction |= Rs << 16;
10723 encode_thumb32_shifted_operand (2);
10724 }
10725 }
10726 else
10727 {
10728 /* On its face this is a lie - the instruction does set the
10729 flags. However, the only supported mnemonic in this mode
10730 says it doesn't. */
10731 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10732
10733 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10734 _("unshifted register required"));
10735 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10736 constraint (Rd != Rs,
10737 _("dest and source1 must be the same register"));
10738
10739 inst.instruction = THUMB_OP16 (inst.instruction);
10740 inst.instruction |= Rd;
10741 inst.instruction |= Rn << 3;
10742 }
10743 }
10744
10745 /* Similarly, but for instructions where the arithmetic operation is
10746 commutative, so we can allow either of them to be different from
10747 the destination operand in a 16-bit instruction. For instance, all
10748 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10749 accepted. */
10750 static void
10751 do_t_arit3c (void)
10752 {
10753 int Rd, Rs, Rn;
10754
10755 Rd = inst.operands[0].reg;
10756 Rs = (inst.operands[1].present
10757 ? inst.operands[1].reg /* Rd, Rs, foo */
10758 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10759 Rn = inst.operands[2].reg;
10760
10761 reject_bad_reg (Rd);
10762 reject_bad_reg (Rs);
10763 if (inst.operands[2].isreg)
10764 reject_bad_reg (Rn);
10765
10766 if (unified_syntax)
10767 {
10768 if (!inst.operands[2].isreg)
10769 {
10770 /* For an immediate, we always generate a 32-bit opcode;
10771 section relaxation will shrink it later if possible. */
10772 inst.instruction = THUMB_OP32 (inst.instruction);
10773 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10774 inst.instruction |= Rd << 8;
10775 inst.instruction |= Rs << 16;
10776 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10777 }
10778 else
10779 {
10780 bfd_boolean narrow;
10781
10782 /* See if we can do this with a 16-bit instruction. */
10783 if (THUMB_SETS_FLAGS (inst.instruction))
10784 narrow = !in_it_block ();
10785 else
10786 narrow = in_it_block ();
10787
10788 if (Rd > 7 || Rn > 7 || Rs > 7)
10789 narrow = FALSE;
10790 if (inst.operands[2].shifted)
10791 narrow = FALSE;
10792 if (inst.size_req == 4)
10793 narrow = FALSE;
10794
10795 if (narrow)
10796 {
10797 if (Rd == Rs)
10798 {
10799 inst.instruction = THUMB_OP16 (inst.instruction);
10800 inst.instruction |= Rd;
10801 inst.instruction |= Rn << 3;
10802 return;
10803 }
10804 if (Rd == Rn)
10805 {
10806 inst.instruction = THUMB_OP16 (inst.instruction);
10807 inst.instruction |= Rd;
10808 inst.instruction |= Rs << 3;
10809 return;
10810 }
10811 }
10812
10813 /* If we get here, it can't be done in 16 bits. */
10814 constraint (inst.operands[2].shifted
10815 && inst.operands[2].immisreg,
10816 _("shift must be constant"));
10817 inst.instruction = THUMB_OP32 (inst.instruction);
10818 inst.instruction |= Rd << 8;
10819 inst.instruction |= Rs << 16;
10820 encode_thumb32_shifted_operand (2);
10821 }
10822 }
10823 else
10824 {
10825 /* On its face this is a lie - the instruction does set the
10826 flags. However, the only supported mnemonic in this mode
10827 says it doesn't. */
10828 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10829
10830 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10831 _("unshifted register required"));
10832 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10833
10834 inst.instruction = THUMB_OP16 (inst.instruction);
10835 inst.instruction |= Rd;
10836
10837 if (Rd == Rs)
10838 inst.instruction |= Rn << 3;
10839 else if (Rd == Rn)
10840 inst.instruction |= Rs << 3;
10841 else
10842 constraint (1, _("dest must overlap one source register"));
10843 }
10844 }
10845
10846 static void
10847 do_t_bfc (void)
10848 {
10849 unsigned Rd;
10850 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10851 constraint (msb > 32, _("bit-field extends past end of register"));
10852 /* The instruction encoding stores the LSB and MSB,
10853 not the LSB and width. */
10854 Rd = inst.operands[0].reg;
10855 reject_bad_reg (Rd);
10856 inst.instruction |= Rd << 8;
10857 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10858 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10859 inst.instruction |= msb - 1;
10860 }
10861
10862 static void
10863 do_t_bfi (void)
10864 {
10865 int Rd, Rn;
10866 unsigned int msb;
10867
10868 Rd = inst.operands[0].reg;
10869 reject_bad_reg (Rd);
10870
10871 /* #0 in second position is alternative syntax for bfc, which is
10872 the same instruction but with REG_PC in the Rm field. */
10873 if (!inst.operands[1].isreg)
10874 Rn = REG_PC;
10875 else
10876 {
10877 Rn = inst.operands[1].reg;
10878 reject_bad_reg (Rn);
10879 }
10880
10881 msb = inst.operands[2].imm + inst.operands[3].imm;
10882 constraint (msb > 32, _("bit-field extends past end of register"));
10883 /* The instruction encoding stores the LSB and MSB,
10884 not the LSB and width. */
10885 inst.instruction |= Rd << 8;
10886 inst.instruction |= Rn << 16;
10887 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10888 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10889 inst.instruction |= msb - 1;
10890 }
10891
10892 static void
10893 do_t_bfx (void)
10894 {
10895 unsigned Rd, Rn;
10896
10897 Rd = inst.operands[0].reg;
10898 Rn = inst.operands[1].reg;
10899
10900 reject_bad_reg (Rd);
10901 reject_bad_reg (Rn);
10902
10903 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10904 _("bit-field extends past end of register"));
10905 inst.instruction |= Rd << 8;
10906 inst.instruction |= Rn << 16;
10907 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10908 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10909 inst.instruction |= inst.operands[3].imm - 1;
10910 }
10911
10912 /* ARM V5 Thumb BLX (argument parse)
10913 BLX <target_addr> which is BLX(1)
10914 BLX <Rm> which is BLX(2)
10915 Unfortunately, there are two different opcodes for this mnemonic.
10916 So, the insns[].value is not used, and the code here zaps values
10917 into inst.instruction.
10918
10919 ??? How to take advantage of the additional two bits of displacement
10920 available in Thumb32 mode? Need new relocation? */
10921
10922 static void
10923 do_t_blx (void)
10924 {
10925 set_it_insn_type_last ();
10926
10927 if (inst.operands[0].isreg)
10928 {
10929 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10930 /* We have a register, so this is BLX(2). */
10931 inst.instruction |= inst.operands[0].reg << 3;
10932 }
10933 else
10934 {
10935 /* No register. This must be BLX(1). */
10936 inst.instruction = 0xf000e800;
10937 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10938 }
10939 }
10940
10941 static void
10942 do_t_branch (void)
10943 {
10944 int opcode;
10945 int cond;
10946 int reloc;
10947
10948 cond = inst.cond;
10949 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10950
10951 if (in_it_block ())
10952 {
10953 /* Conditional branches inside IT blocks are encoded as unconditional
10954 branches. */
10955 cond = COND_ALWAYS;
10956 }
10957 else
10958 cond = inst.cond;
10959
10960 if (cond != COND_ALWAYS)
10961 opcode = T_MNEM_bcond;
10962 else
10963 opcode = inst.instruction;
10964
10965 if (unified_syntax
10966 && (inst.size_req == 4
10967 || (inst.size_req != 2
10968 && (inst.operands[0].hasreloc
10969 || inst.reloc.exp.X_op == O_constant))))
10970 {
10971 inst.instruction = THUMB_OP32(opcode);
10972 if (cond == COND_ALWAYS)
10973 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10974 else
10975 {
10976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
10977 _("selected architecture does not support "
10978 "wide conditional branch instruction"));
10979
10980 gas_assert (cond != 0xF);
10981 inst.instruction |= cond << 22;
10982 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10983 }
10984 }
10985 else
10986 {
10987 inst.instruction = THUMB_OP16(opcode);
10988 if (cond == COND_ALWAYS)
10989 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10990 else
10991 {
10992 inst.instruction |= cond << 8;
10993 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10994 }
10995 /* Allow section relaxation. */
10996 if (unified_syntax && inst.size_req != 2)
10997 inst.relax = opcode;
10998 }
10999 inst.reloc.type = reloc;
11000 inst.reloc.pc_rel = 1;
11001 }
11002
11003 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11004 between the two is the maximum immediate allowed - which is passed in
11005 RANGE. */
11006 static void
11007 do_t_bkpt_hlt1 (int range)
11008 {
11009 constraint (inst.cond != COND_ALWAYS,
11010 _("instruction is always unconditional"));
11011 if (inst.operands[0].present)
11012 {
11013 constraint (inst.operands[0].imm > range,
11014 _("immediate value out of range"));
11015 inst.instruction |= inst.operands[0].imm;
11016 }
11017
11018 set_it_insn_type (NEUTRAL_IT_INSN);
11019 }
11020
11021 static void
11022 do_t_hlt (void)
11023 {
11024 do_t_bkpt_hlt1 (63);
11025 }
11026
11027 static void
11028 do_t_bkpt (void)
11029 {
11030 do_t_bkpt_hlt1 (255);
11031 }
11032
11033 static void
11034 do_t_branch23 (void)
11035 {
11036 set_it_insn_type_last ();
11037 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11038
11039 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11040 this file. We used to simply ignore the PLT reloc type here --
11041 the branch encoding is now needed to deal with TLSCALL relocs.
11042 So if we see a PLT reloc now, put it back to how it used to be to
11043 keep the preexisting behaviour. */
11044 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11045 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11046
11047 #if defined(OBJ_COFF)
11048 /* If the destination of the branch is a defined symbol which does not have
11049 the THUMB_FUNC attribute, then we must be calling a function which has
11050 the (interfacearm) attribute. We look for the Thumb entry point to that
11051 function and change the branch to refer to that function instead. */
11052 if ( inst.reloc.exp.X_op == O_symbol
11053 && inst.reloc.exp.X_add_symbol != NULL
11054 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11055 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11056 inst.reloc.exp.X_add_symbol =
11057 find_real_start (inst.reloc.exp.X_add_symbol);
11058 #endif
11059 }
11060
11061 static void
11062 do_t_bx (void)
11063 {
11064 set_it_insn_type_last ();
11065 inst.instruction |= inst.operands[0].reg << 3;
11066 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11067 should cause the alignment to be checked once it is known. This is
11068 because BX PC only works if the instruction is word aligned. */
11069 }
11070
11071 static void
11072 do_t_bxj (void)
11073 {
11074 int Rm;
11075
11076 set_it_insn_type_last ();
11077 Rm = inst.operands[0].reg;
11078 reject_bad_reg (Rm);
11079 inst.instruction |= Rm << 16;
11080 }
11081
11082 static void
11083 do_t_clz (void)
11084 {
11085 unsigned Rd;
11086 unsigned Rm;
11087
11088 Rd = inst.operands[0].reg;
11089 Rm = inst.operands[1].reg;
11090
11091 reject_bad_reg (Rd);
11092 reject_bad_reg (Rm);
11093
11094 inst.instruction |= Rd << 8;
11095 inst.instruction |= Rm << 16;
11096 inst.instruction |= Rm;
11097 }
11098
11099 static void
11100 do_t_cps (void)
11101 {
11102 set_it_insn_type (OUTSIDE_IT_INSN);
11103 inst.instruction |= inst.operands[0].imm;
11104 }
11105
11106 static void
11107 do_t_cpsi (void)
11108 {
11109 set_it_insn_type (OUTSIDE_IT_INSN);
11110 if (unified_syntax
11111 && (inst.operands[1].present || inst.size_req == 4)
11112 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11113 {
11114 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11115 inst.instruction = 0xf3af8000;
11116 inst.instruction |= imod << 9;
11117 inst.instruction |= inst.operands[0].imm << 5;
11118 if (inst.operands[1].present)
11119 inst.instruction |= 0x100 | inst.operands[1].imm;
11120 }
11121 else
11122 {
11123 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11124 && (inst.operands[0].imm & 4),
11125 _("selected processor does not support 'A' form "
11126 "of this instruction"));
11127 constraint (inst.operands[1].present || inst.size_req == 4,
11128 _("Thumb does not support the 2-argument "
11129 "form of this instruction"));
11130 inst.instruction |= inst.operands[0].imm;
11131 }
11132 }
11133
11134 /* THUMB CPY instruction (argument parse). */
11135
11136 static void
11137 do_t_cpy (void)
11138 {
11139 if (inst.size_req == 4)
11140 {
11141 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11142 inst.instruction |= inst.operands[0].reg << 8;
11143 inst.instruction |= inst.operands[1].reg;
11144 }
11145 else
11146 {
11147 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11148 inst.instruction |= (inst.operands[0].reg & 0x7);
11149 inst.instruction |= inst.operands[1].reg << 3;
11150 }
11151 }
11152
11153 static void
11154 do_t_cbz (void)
11155 {
11156 set_it_insn_type (OUTSIDE_IT_INSN);
11157 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11158 inst.instruction |= inst.operands[0].reg;
11159 inst.reloc.pc_rel = 1;
11160 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11161 }
11162
11163 static void
11164 do_t_dbg (void)
11165 {
11166 inst.instruction |= inst.operands[0].imm;
11167 }
11168
11169 static void
11170 do_t_div (void)
11171 {
11172 unsigned Rd, Rn, Rm;
11173
11174 Rd = inst.operands[0].reg;
11175 Rn = (inst.operands[1].present
11176 ? inst.operands[1].reg : Rd);
11177 Rm = inst.operands[2].reg;
11178
11179 reject_bad_reg (Rd);
11180 reject_bad_reg (Rn);
11181 reject_bad_reg (Rm);
11182
11183 inst.instruction |= Rd << 8;
11184 inst.instruction |= Rn << 16;
11185 inst.instruction |= Rm;
11186 }
11187
11188 static void
11189 do_t_hint (void)
11190 {
11191 if (unified_syntax && inst.size_req == 4)
11192 inst.instruction = THUMB_OP32 (inst.instruction);
11193 else
11194 inst.instruction = THUMB_OP16 (inst.instruction);
11195 }
11196
11197 static void
11198 do_t_it (void)
11199 {
11200 unsigned int cond = inst.operands[0].imm;
11201
11202 set_it_insn_type (IT_INSN);
11203 now_it.mask = (inst.instruction & 0xf) | 0x10;
11204 now_it.cc = cond;
11205 now_it.warn_deprecated = FALSE;
11206
11207 /* If the condition is a negative condition, invert the mask. */
11208 if ((cond & 0x1) == 0x0)
11209 {
11210 unsigned int mask = inst.instruction & 0x000f;
11211
11212 if ((mask & 0x7) == 0)
11213 {
11214 /* No conversion needed. */
11215 now_it.block_length = 1;
11216 }
11217 else if ((mask & 0x3) == 0)
11218 {
11219 mask ^= 0x8;
11220 now_it.block_length = 2;
11221 }
11222 else if ((mask & 0x1) == 0)
11223 {
11224 mask ^= 0xC;
11225 now_it.block_length = 3;
11226 }
11227 else
11228 {
11229 mask ^= 0xE;
11230 now_it.block_length = 4;
11231 }
11232
11233 inst.instruction &= 0xfff0;
11234 inst.instruction |= mask;
11235 }
11236
11237 inst.instruction |= cond << 4;
11238 }
11239
11240 /* Helper function used for both push/pop and ldm/stm. */
11241 static void
11242 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11243 {
11244 bfd_boolean load;
11245
11246 load = (inst.instruction & (1 << 20)) != 0;
11247
11248 if (mask & (1 << 13))
11249 inst.error = _("SP not allowed in register list");
11250
11251 if ((mask & (1 << base)) != 0
11252 && writeback)
11253 inst.error = _("having the base register in the register list when "
11254 "using write back is UNPREDICTABLE");
11255
11256 if (load)
11257 {
11258 if (mask & (1 << 15))
11259 {
11260 if (mask & (1 << 14))
11261 inst.error = _("LR and PC should not both be in register list");
11262 else
11263 set_it_insn_type_last ();
11264 }
11265 }
11266 else
11267 {
11268 if (mask & (1 << 15))
11269 inst.error = _("PC not allowed in register list");
11270 }
11271
11272 if ((mask & (mask - 1)) == 0)
11273 {
11274 /* Single register transfers implemented as str/ldr. */
11275 if (writeback)
11276 {
11277 if (inst.instruction & (1 << 23))
11278 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11279 else
11280 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11281 }
11282 else
11283 {
11284 if (inst.instruction & (1 << 23))
11285 inst.instruction = 0x00800000; /* ia -> [base] */
11286 else
11287 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11288 }
11289
11290 inst.instruction |= 0xf8400000;
11291 if (load)
11292 inst.instruction |= 0x00100000;
11293
11294 mask = ffs (mask) - 1;
11295 mask <<= 12;
11296 }
11297 else if (writeback)
11298 inst.instruction |= WRITE_BACK;
11299
11300 inst.instruction |= mask;
11301 inst.instruction |= base << 16;
11302 }
11303
11304 static void
11305 do_t_ldmstm (void)
11306 {
11307 /* This really doesn't seem worth it. */
11308 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11309 _("expression too complex"));
11310 constraint (inst.operands[1].writeback,
11311 _("Thumb load/store multiple does not support {reglist}^"));
11312
11313 if (unified_syntax)
11314 {
11315 bfd_boolean narrow;
11316 unsigned mask;
11317
11318 narrow = FALSE;
11319 /* See if we can use a 16-bit instruction. */
11320 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11321 && inst.size_req != 4
11322 && !(inst.operands[1].imm & ~0xff))
11323 {
11324 mask = 1 << inst.operands[0].reg;
11325
11326 if (inst.operands[0].reg <= 7)
11327 {
11328 if (inst.instruction == T_MNEM_stmia
11329 ? inst.operands[0].writeback
11330 : (inst.operands[0].writeback
11331 == !(inst.operands[1].imm & mask)))
11332 {
11333 if (inst.instruction == T_MNEM_stmia
11334 && (inst.operands[1].imm & mask)
11335 && (inst.operands[1].imm & (mask - 1)))
11336 as_warn (_("value stored for r%d is UNKNOWN"),
11337 inst.operands[0].reg);
11338
11339 inst.instruction = THUMB_OP16 (inst.instruction);
11340 inst.instruction |= inst.operands[0].reg << 8;
11341 inst.instruction |= inst.operands[1].imm;
11342 narrow = TRUE;
11343 }
11344 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11345 {
11346 /* This means 1 register in reg list one of 3 situations:
11347 1. Instruction is stmia, but without writeback.
11348 2. lmdia without writeback, but with Rn not in
11349 reglist.
11350 3. ldmia with writeback, but with Rn in reglist.
11351 Case 3 is UNPREDICTABLE behaviour, so we handle
11352 case 1 and 2 which can be converted into a 16-bit
11353 str or ldr. The SP cases are handled below. */
11354 unsigned long opcode;
11355 /* First, record an error for Case 3. */
11356 if (inst.operands[1].imm & mask
11357 && inst.operands[0].writeback)
11358 inst.error =
11359 _("having the base register in the register list when "
11360 "using write back is UNPREDICTABLE");
11361
11362 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11363 : T_MNEM_ldr);
11364 inst.instruction = THUMB_OP16 (opcode);
11365 inst.instruction |= inst.operands[0].reg << 3;
11366 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11367 narrow = TRUE;
11368 }
11369 }
11370 else if (inst.operands[0] .reg == REG_SP)
11371 {
11372 if (inst.operands[0].writeback)
11373 {
11374 inst.instruction =
11375 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11376 ? T_MNEM_push : T_MNEM_pop);
11377 inst.instruction |= inst.operands[1].imm;
11378 narrow = TRUE;
11379 }
11380 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11381 {
11382 inst.instruction =
11383 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11384 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11385 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11386 narrow = TRUE;
11387 }
11388 }
11389 }
11390
11391 if (!narrow)
11392 {
11393 if (inst.instruction < 0xffff)
11394 inst.instruction = THUMB_OP32 (inst.instruction);
11395
11396 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11397 inst.operands[0].writeback);
11398 }
11399 }
11400 else
11401 {
11402 constraint (inst.operands[0].reg > 7
11403 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11404 constraint (inst.instruction != T_MNEM_ldmia
11405 && inst.instruction != T_MNEM_stmia,
11406 _("Thumb-2 instruction only valid in unified syntax"));
11407 if (inst.instruction == T_MNEM_stmia)
11408 {
11409 if (!inst.operands[0].writeback)
11410 as_warn (_("this instruction will write back the base register"));
11411 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11412 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11413 as_warn (_("value stored for r%d is UNKNOWN"),
11414 inst.operands[0].reg);
11415 }
11416 else
11417 {
11418 if (!inst.operands[0].writeback
11419 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11420 as_warn (_("this instruction will write back the base register"));
11421 else if (inst.operands[0].writeback
11422 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11423 as_warn (_("this instruction will not write back the base register"));
11424 }
11425
11426 inst.instruction = THUMB_OP16 (inst.instruction);
11427 inst.instruction |= inst.operands[0].reg << 8;
11428 inst.instruction |= inst.operands[1].imm;
11429 }
11430 }
11431
11432 static void
11433 do_t_ldrex (void)
11434 {
11435 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11436 || inst.operands[1].postind || inst.operands[1].writeback
11437 || inst.operands[1].immisreg || inst.operands[1].shifted
11438 || inst.operands[1].negative,
11439 BAD_ADDR_MODE);
11440
11441 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11442
11443 inst.instruction |= inst.operands[0].reg << 12;
11444 inst.instruction |= inst.operands[1].reg << 16;
11445 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11446 }
11447
11448 static void
11449 do_t_ldrexd (void)
11450 {
11451 if (!inst.operands[1].present)
11452 {
11453 constraint (inst.operands[0].reg == REG_LR,
11454 _("r14 not allowed as first register "
11455 "when second register is omitted"));
11456 inst.operands[1].reg = inst.operands[0].reg + 1;
11457 }
11458 constraint (inst.operands[0].reg == inst.operands[1].reg,
11459 BAD_OVERLAP);
11460
11461 inst.instruction |= inst.operands[0].reg << 12;
11462 inst.instruction |= inst.operands[1].reg << 8;
11463 inst.instruction |= inst.operands[2].reg << 16;
11464 }
11465
11466 static void
11467 do_t_ldst (void)
11468 {
11469 unsigned long opcode;
11470 int Rn;
11471
11472 if (inst.operands[0].isreg
11473 && !inst.operands[0].preind
11474 && inst.operands[0].reg == REG_PC)
11475 set_it_insn_type_last ();
11476
11477 opcode = inst.instruction;
11478 if (unified_syntax)
11479 {
11480 if (!inst.operands[1].isreg)
11481 {
11482 if (opcode <= 0xffff)
11483 inst.instruction = THUMB_OP32 (opcode);
11484 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11485 return;
11486 }
11487 if (inst.operands[1].isreg
11488 && !inst.operands[1].writeback
11489 && !inst.operands[1].shifted && !inst.operands[1].postind
11490 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11491 && opcode <= 0xffff
11492 && inst.size_req != 4)
11493 {
11494 /* Insn may have a 16-bit form. */
11495 Rn = inst.operands[1].reg;
11496 if (inst.operands[1].immisreg)
11497 {
11498 inst.instruction = THUMB_OP16 (opcode);
11499 /* [Rn, Rik] */
11500 if (Rn <= 7 && inst.operands[1].imm <= 7)
11501 goto op16;
11502 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11503 reject_bad_reg (inst.operands[1].imm);
11504 }
11505 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11506 && opcode != T_MNEM_ldrsb)
11507 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11508 || (Rn == REG_SP && opcode == T_MNEM_str))
11509 {
11510 /* [Rn, #const] */
11511 if (Rn > 7)
11512 {
11513 if (Rn == REG_PC)
11514 {
11515 if (inst.reloc.pc_rel)
11516 opcode = T_MNEM_ldr_pc2;
11517 else
11518 opcode = T_MNEM_ldr_pc;
11519 }
11520 else
11521 {
11522 if (opcode == T_MNEM_ldr)
11523 opcode = T_MNEM_ldr_sp;
11524 else
11525 opcode = T_MNEM_str_sp;
11526 }
11527 inst.instruction = inst.operands[0].reg << 8;
11528 }
11529 else
11530 {
11531 inst.instruction = inst.operands[0].reg;
11532 inst.instruction |= inst.operands[1].reg << 3;
11533 }
11534 inst.instruction |= THUMB_OP16 (opcode);
11535 if (inst.size_req == 2)
11536 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11537 else
11538 inst.relax = opcode;
11539 return;
11540 }
11541 }
11542 /* Definitely a 32-bit variant. */
11543
11544 /* Warning for Erratum 752419. */
11545 if (opcode == T_MNEM_ldr
11546 && inst.operands[0].reg == REG_SP
11547 && inst.operands[1].writeback == 1
11548 && !inst.operands[1].immisreg)
11549 {
11550 if (no_cpu_selected ()
11551 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11552 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11553 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11554 as_warn (_("This instruction may be unpredictable "
11555 "if executed on M-profile cores "
11556 "with interrupts enabled."));
11557 }
11558
11559 /* Do some validations regarding addressing modes. */
11560 if (inst.operands[1].immisreg)
11561 reject_bad_reg (inst.operands[1].imm);
11562
11563 constraint (inst.operands[1].writeback == 1
11564 && inst.operands[0].reg == inst.operands[1].reg,
11565 BAD_OVERLAP);
11566
11567 inst.instruction = THUMB_OP32 (opcode);
11568 inst.instruction |= inst.operands[0].reg << 12;
11569 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11570 check_ldr_r15_aligned ();
11571 return;
11572 }
11573
11574 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11575
11576 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11577 {
11578 /* Only [Rn,Rm] is acceptable. */
11579 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11580 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11581 || inst.operands[1].postind || inst.operands[1].shifted
11582 || inst.operands[1].negative,
11583 _("Thumb does not support this addressing mode"));
11584 inst.instruction = THUMB_OP16 (inst.instruction);
11585 goto op16;
11586 }
11587
11588 inst.instruction = THUMB_OP16 (inst.instruction);
11589 if (!inst.operands[1].isreg)
11590 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11591 return;
11592
11593 constraint (!inst.operands[1].preind
11594 || inst.operands[1].shifted
11595 || inst.operands[1].writeback,
11596 _("Thumb does not support this addressing mode"));
11597 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11598 {
11599 constraint (inst.instruction & 0x0600,
11600 _("byte or halfword not valid for base register"));
11601 constraint (inst.operands[1].reg == REG_PC
11602 && !(inst.instruction & THUMB_LOAD_BIT),
11603 _("r15 based store not allowed"));
11604 constraint (inst.operands[1].immisreg,
11605 _("invalid base register for register offset"));
11606
11607 if (inst.operands[1].reg == REG_PC)
11608 inst.instruction = T_OPCODE_LDR_PC;
11609 else if (inst.instruction & THUMB_LOAD_BIT)
11610 inst.instruction = T_OPCODE_LDR_SP;
11611 else
11612 inst.instruction = T_OPCODE_STR_SP;
11613
11614 inst.instruction |= inst.operands[0].reg << 8;
11615 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11616 return;
11617 }
11618
11619 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11620 if (!inst.operands[1].immisreg)
11621 {
11622 /* Immediate offset. */
11623 inst.instruction |= inst.operands[0].reg;
11624 inst.instruction |= inst.operands[1].reg << 3;
11625 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11626 return;
11627 }
11628
11629 /* Register offset. */
11630 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11631 constraint (inst.operands[1].negative,
11632 _("Thumb does not support this addressing mode"));
11633
11634 op16:
11635 switch (inst.instruction)
11636 {
11637 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11638 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11639 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11640 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11641 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11642 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11643 case 0x5600 /* ldrsb */:
11644 case 0x5e00 /* ldrsh */: break;
11645 default: abort ();
11646 }
11647
11648 inst.instruction |= inst.operands[0].reg;
11649 inst.instruction |= inst.operands[1].reg << 3;
11650 inst.instruction |= inst.operands[1].imm << 6;
11651 }
11652
11653 static void
11654 do_t_ldstd (void)
11655 {
11656 if (!inst.operands[1].present)
11657 {
11658 inst.operands[1].reg = inst.operands[0].reg + 1;
11659 constraint (inst.operands[0].reg == REG_LR,
11660 _("r14 not allowed here"));
11661 constraint (inst.operands[0].reg == REG_R12,
11662 _("r12 not allowed here"));
11663 }
11664
11665 if (inst.operands[2].writeback
11666 && (inst.operands[0].reg == inst.operands[2].reg
11667 || inst.operands[1].reg == inst.operands[2].reg))
11668 as_warn (_("base register written back, and overlaps "
11669 "one of transfer registers"));
11670
11671 inst.instruction |= inst.operands[0].reg << 12;
11672 inst.instruction |= inst.operands[1].reg << 8;
11673 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11674 }
11675
11676 static void
11677 do_t_ldstt (void)
11678 {
11679 inst.instruction |= inst.operands[0].reg << 12;
11680 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11681 }
11682
11683 static void
11684 do_t_mla (void)
11685 {
11686 unsigned Rd, Rn, Rm, Ra;
11687
11688 Rd = inst.operands[0].reg;
11689 Rn = inst.operands[1].reg;
11690 Rm = inst.operands[2].reg;
11691 Ra = inst.operands[3].reg;
11692
11693 reject_bad_reg (Rd);
11694 reject_bad_reg (Rn);
11695 reject_bad_reg (Rm);
11696 reject_bad_reg (Ra);
11697
11698 inst.instruction |= Rd << 8;
11699 inst.instruction |= Rn << 16;
11700 inst.instruction |= Rm;
11701 inst.instruction |= Ra << 12;
11702 }
11703
11704 static void
11705 do_t_mlal (void)
11706 {
11707 unsigned RdLo, RdHi, Rn, Rm;
11708
11709 RdLo = inst.operands[0].reg;
11710 RdHi = inst.operands[1].reg;
11711 Rn = inst.operands[2].reg;
11712 Rm = inst.operands[3].reg;
11713
11714 reject_bad_reg (RdLo);
11715 reject_bad_reg (RdHi);
11716 reject_bad_reg (Rn);
11717 reject_bad_reg (Rm);
11718
11719 inst.instruction |= RdLo << 12;
11720 inst.instruction |= RdHi << 8;
11721 inst.instruction |= Rn << 16;
11722 inst.instruction |= Rm;
11723 }
11724
11725 static void
11726 do_t_mov_cmp (void)
11727 {
11728 unsigned Rn, Rm;
11729
11730 Rn = inst.operands[0].reg;
11731 Rm = inst.operands[1].reg;
11732
11733 if (Rn == REG_PC)
11734 set_it_insn_type_last ();
11735
11736 if (unified_syntax)
11737 {
11738 int r0off = (inst.instruction == T_MNEM_mov
11739 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11740 unsigned long opcode;
11741 bfd_boolean narrow;
11742 bfd_boolean low_regs;
11743
11744 low_regs = (Rn <= 7 && Rm <= 7);
11745 opcode = inst.instruction;
11746 if (in_it_block ())
11747 narrow = opcode != T_MNEM_movs;
11748 else
11749 narrow = opcode != T_MNEM_movs || low_regs;
11750 if (inst.size_req == 4
11751 || inst.operands[1].shifted)
11752 narrow = FALSE;
11753
11754 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11755 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11756 && !inst.operands[1].shifted
11757 && Rn == REG_PC
11758 && Rm == REG_LR)
11759 {
11760 inst.instruction = T2_SUBS_PC_LR;
11761 return;
11762 }
11763
11764 if (opcode == T_MNEM_cmp)
11765 {
11766 constraint (Rn == REG_PC, BAD_PC);
11767 if (narrow)
11768 {
11769 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11770 but valid. */
11771 warn_deprecated_sp (Rm);
11772 /* R15 was documented as a valid choice for Rm in ARMv6,
11773 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11774 tools reject R15, so we do too. */
11775 constraint (Rm == REG_PC, BAD_PC);
11776 }
11777 else
11778 reject_bad_reg (Rm);
11779 }
11780 else if (opcode == T_MNEM_mov
11781 || opcode == T_MNEM_movs)
11782 {
11783 if (inst.operands[1].isreg)
11784 {
11785 if (opcode == T_MNEM_movs)
11786 {
11787 reject_bad_reg (Rn);
11788 reject_bad_reg (Rm);
11789 }
11790 else if (narrow)
11791 {
11792 /* This is mov.n. */
11793 if ((Rn == REG_SP || Rn == REG_PC)
11794 && (Rm == REG_SP || Rm == REG_PC))
11795 {
11796 as_tsktsk (_("Use of r%u as a source register is "
11797 "deprecated when r%u is the destination "
11798 "register."), Rm, Rn);
11799 }
11800 }
11801 else
11802 {
11803 /* This is mov.w. */
11804 constraint (Rn == REG_PC, BAD_PC);
11805 constraint (Rm == REG_PC, BAD_PC);
11806 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11807 }
11808 }
11809 else
11810 reject_bad_reg (Rn);
11811 }
11812
11813 if (!inst.operands[1].isreg)
11814 {
11815 /* Immediate operand. */
11816 if (!in_it_block () && opcode == T_MNEM_mov)
11817 narrow = 0;
11818 if (low_regs && narrow)
11819 {
11820 inst.instruction = THUMB_OP16 (opcode);
11821 inst.instruction |= Rn << 8;
11822 if (inst.size_req == 2)
11823 {
11824 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11825 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11826 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11827 }
11828 else
11829 inst.relax = opcode;
11830 }
11831 else
11832 {
11833 inst.instruction = THUMB_OP32 (inst.instruction);
11834 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11835 inst.instruction |= Rn << r0off;
11836 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11837 }
11838 }
11839 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11840 && (inst.instruction == T_MNEM_mov
11841 || inst.instruction == T_MNEM_movs))
11842 {
11843 /* Register shifts are encoded as separate shift instructions. */
11844 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11845
11846 if (in_it_block ())
11847 narrow = !flags;
11848 else
11849 narrow = flags;
11850
11851 if (inst.size_req == 4)
11852 narrow = FALSE;
11853
11854 if (!low_regs || inst.operands[1].imm > 7)
11855 narrow = FALSE;
11856
11857 if (Rn != Rm)
11858 narrow = FALSE;
11859
11860 switch (inst.operands[1].shift_kind)
11861 {
11862 case SHIFT_LSL:
11863 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11864 break;
11865 case SHIFT_ASR:
11866 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11867 break;
11868 case SHIFT_LSR:
11869 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11870 break;
11871 case SHIFT_ROR:
11872 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11873 break;
11874 default:
11875 abort ();
11876 }
11877
11878 inst.instruction = opcode;
11879 if (narrow)
11880 {
11881 inst.instruction |= Rn;
11882 inst.instruction |= inst.operands[1].imm << 3;
11883 }
11884 else
11885 {
11886 if (flags)
11887 inst.instruction |= CONDS_BIT;
11888
11889 inst.instruction |= Rn << 8;
11890 inst.instruction |= Rm << 16;
11891 inst.instruction |= inst.operands[1].imm;
11892 }
11893 }
11894 else if (!narrow)
11895 {
11896 /* Some mov with immediate shift have narrow variants.
11897 Register shifts are handled above. */
11898 if (low_regs && inst.operands[1].shifted
11899 && (inst.instruction == T_MNEM_mov
11900 || inst.instruction == T_MNEM_movs))
11901 {
11902 if (in_it_block ())
11903 narrow = (inst.instruction == T_MNEM_mov);
11904 else
11905 narrow = (inst.instruction == T_MNEM_movs);
11906 }
11907
11908 if (narrow)
11909 {
11910 switch (inst.operands[1].shift_kind)
11911 {
11912 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11913 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11914 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11915 default: narrow = FALSE; break;
11916 }
11917 }
11918
11919 if (narrow)
11920 {
11921 inst.instruction |= Rn;
11922 inst.instruction |= Rm << 3;
11923 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11924 }
11925 else
11926 {
11927 inst.instruction = THUMB_OP32 (inst.instruction);
11928 inst.instruction |= Rn << r0off;
11929 encode_thumb32_shifted_operand (1);
11930 }
11931 }
11932 else
11933 switch (inst.instruction)
11934 {
11935 case T_MNEM_mov:
11936 /* In v4t or v5t a move of two lowregs produces unpredictable
11937 results. Don't allow this. */
11938 if (low_regs)
11939 {
11940 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11941 "MOV Rd, Rs with two low registers is not "
11942 "permitted on this architecture");
11943 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11944 arm_ext_v6);
11945 }
11946
11947 inst.instruction = T_OPCODE_MOV_HR;
11948 inst.instruction |= (Rn & 0x8) << 4;
11949 inst.instruction |= (Rn & 0x7);
11950 inst.instruction |= Rm << 3;
11951 break;
11952
11953 case T_MNEM_movs:
11954 /* We know we have low registers at this point.
11955 Generate LSLS Rd, Rs, #0. */
11956 inst.instruction = T_OPCODE_LSL_I;
11957 inst.instruction |= Rn;
11958 inst.instruction |= Rm << 3;
11959 break;
11960
11961 case T_MNEM_cmp:
11962 if (low_regs)
11963 {
11964 inst.instruction = T_OPCODE_CMP_LR;
11965 inst.instruction |= Rn;
11966 inst.instruction |= Rm << 3;
11967 }
11968 else
11969 {
11970 inst.instruction = T_OPCODE_CMP_HR;
11971 inst.instruction |= (Rn & 0x8) << 4;
11972 inst.instruction |= (Rn & 0x7);
11973 inst.instruction |= Rm << 3;
11974 }
11975 break;
11976 }
11977 return;
11978 }
11979
11980 inst.instruction = THUMB_OP16 (inst.instruction);
11981
11982 /* PR 10443: Do not silently ignore shifted operands. */
11983 constraint (inst.operands[1].shifted,
11984 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11985
11986 if (inst.operands[1].isreg)
11987 {
11988 if (Rn < 8 && Rm < 8)
11989 {
11990 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11991 since a MOV instruction produces unpredictable results. */
11992 if (inst.instruction == T_OPCODE_MOV_I8)
11993 inst.instruction = T_OPCODE_ADD_I3;
11994 else
11995 inst.instruction = T_OPCODE_CMP_LR;
11996
11997 inst.instruction |= Rn;
11998 inst.instruction |= Rm << 3;
11999 }
12000 else
12001 {
12002 if (inst.instruction == T_OPCODE_MOV_I8)
12003 inst.instruction = T_OPCODE_MOV_HR;
12004 else
12005 inst.instruction = T_OPCODE_CMP_HR;
12006 do_t_cpy ();
12007 }
12008 }
12009 else
12010 {
12011 constraint (Rn > 7,
12012 _("only lo regs allowed with immediate"));
12013 inst.instruction |= Rn << 8;
12014 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12015 }
12016 }
12017
12018 static void
12019 do_t_mov16 (void)
12020 {
12021 unsigned Rd;
12022 bfd_vma imm;
12023 bfd_boolean top;
12024
12025 top = (inst.instruction & 0x00800000) != 0;
12026 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12027 {
12028 constraint (top, _(":lower16: not allowed this instruction"));
12029 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12030 }
12031 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12032 {
12033 constraint (!top, _(":upper16: not allowed this instruction"));
12034 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12035 }
12036
12037 Rd = inst.operands[0].reg;
12038 reject_bad_reg (Rd);
12039
12040 inst.instruction |= Rd << 8;
12041 if (inst.reloc.type == BFD_RELOC_UNUSED)
12042 {
12043 imm = inst.reloc.exp.X_add_number;
12044 inst.instruction |= (imm & 0xf000) << 4;
12045 inst.instruction |= (imm & 0x0800) << 15;
12046 inst.instruction |= (imm & 0x0700) << 4;
12047 inst.instruction |= (imm & 0x00ff);
12048 }
12049 }
12050
12051 static void
12052 do_t_mvn_tst (void)
12053 {
12054 unsigned Rn, Rm;
12055
12056 Rn = inst.operands[0].reg;
12057 Rm = inst.operands[1].reg;
12058
12059 if (inst.instruction == T_MNEM_cmp
12060 || inst.instruction == T_MNEM_cmn)
12061 constraint (Rn == REG_PC, BAD_PC);
12062 else
12063 reject_bad_reg (Rn);
12064 reject_bad_reg (Rm);
12065
12066 if (unified_syntax)
12067 {
12068 int r0off = (inst.instruction == T_MNEM_mvn
12069 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12070 bfd_boolean narrow;
12071
12072 if (inst.size_req == 4
12073 || inst.instruction > 0xffff
12074 || inst.operands[1].shifted
12075 || Rn > 7 || Rm > 7)
12076 narrow = FALSE;
12077 else if (inst.instruction == T_MNEM_cmn
12078 || inst.instruction == T_MNEM_tst)
12079 narrow = TRUE;
12080 else if (THUMB_SETS_FLAGS (inst.instruction))
12081 narrow = !in_it_block ();
12082 else
12083 narrow = in_it_block ();
12084
12085 if (!inst.operands[1].isreg)
12086 {
12087 /* For an immediate, we always generate a 32-bit opcode;
12088 section relaxation will shrink it later if possible. */
12089 if (inst.instruction < 0xffff)
12090 inst.instruction = THUMB_OP32 (inst.instruction);
12091 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12092 inst.instruction |= Rn << r0off;
12093 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12094 }
12095 else
12096 {
12097 /* See if we can do this with a 16-bit instruction. */
12098 if (narrow)
12099 {
12100 inst.instruction = THUMB_OP16 (inst.instruction);
12101 inst.instruction |= Rn;
12102 inst.instruction |= Rm << 3;
12103 }
12104 else
12105 {
12106 constraint (inst.operands[1].shifted
12107 && inst.operands[1].immisreg,
12108 _("shift must be constant"));
12109 if (inst.instruction < 0xffff)
12110 inst.instruction = THUMB_OP32 (inst.instruction);
12111 inst.instruction |= Rn << r0off;
12112 encode_thumb32_shifted_operand (1);
12113 }
12114 }
12115 }
12116 else
12117 {
12118 constraint (inst.instruction > 0xffff
12119 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12120 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12121 _("unshifted register required"));
12122 constraint (Rn > 7 || Rm > 7,
12123 BAD_HIREG);
12124
12125 inst.instruction = THUMB_OP16 (inst.instruction);
12126 inst.instruction |= Rn;
12127 inst.instruction |= Rm << 3;
12128 }
12129 }
12130
12131 static void
12132 do_t_mrs (void)
12133 {
12134 unsigned Rd;
12135
12136 if (do_vfp_nsyn_mrs () == SUCCESS)
12137 return;
12138
12139 Rd = inst.operands[0].reg;
12140 reject_bad_reg (Rd);
12141 inst.instruction |= Rd << 8;
12142
12143 if (inst.operands[1].isreg)
12144 {
12145 unsigned br = inst.operands[1].reg;
12146 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12147 as_bad (_("bad register for mrs"));
12148
12149 inst.instruction |= br & (0xf << 16);
12150 inst.instruction |= (br & 0x300) >> 4;
12151 inst.instruction |= (br & SPSR_BIT) >> 2;
12152 }
12153 else
12154 {
12155 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12156
12157 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12158 {
12159 /* PR gas/12698: The constraint is only applied for m_profile.
12160 If the user has specified -march=all, we want to ignore it as
12161 we are building for any CPU type, including non-m variants. */
12162 bfd_boolean m_profile =
12163 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12164 constraint ((flags != 0) && m_profile, _("selected processor does "
12165 "not support requested special purpose register"));
12166 }
12167 else
12168 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12169 devices). */
12170 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12171 _("'APSR', 'CPSR' or 'SPSR' expected"));
12172
12173 inst.instruction |= (flags & SPSR_BIT) >> 2;
12174 inst.instruction |= inst.operands[1].imm & 0xff;
12175 inst.instruction |= 0xf0000;
12176 }
12177 }
12178
12179 static void
12180 do_t_msr (void)
12181 {
12182 int flags;
12183 unsigned Rn;
12184
12185 if (do_vfp_nsyn_msr () == SUCCESS)
12186 return;
12187
12188 constraint (!inst.operands[1].isreg,
12189 _("Thumb encoding does not support an immediate here"));
12190
12191 if (inst.operands[0].isreg)
12192 flags = (int)(inst.operands[0].reg);
12193 else
12194 flags = inst.operands[0].imm;
12195
12196 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12197 {
12198 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12199
12200 /* PR gas/12698: The constraint is only applied for m_profile.
12201 If the user has specified -march=all, we want to ignore it as
12202 we are building for any CPU type, including non-m variants. */
12203 bfd_boolean m_profile =
12204 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12205 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12206 && (bits & ~(PSR_s | PSR_f)) != 0)
12207 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12208 && bits != PSR_f)) && m_profile,
12209 _("selected processor does not support requested special "
12210 "purpose register"));
12211 }
12212 else
12213 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12214 "requested special purpose register"));
12215
12216 Rn = inst.operands[1].reg;
12217 reject_bad_reg (Rn);
12218
12219 inst.instruction |= (flags & SPSR_BIT) >> 2;
12220 inst.instruction |= (flags & 0xf0000) >> 8;
12221 inst.instruction |= (flags & 0x300) >> 4;
12222 inst.instruction |= (flags & 0xff);
12223 inst.instruction |= Rn << 16;
12224 }
12225
12226 static void
12227 do_t_mul (void)
12228 {
12229 bfd_boolean narrow;
12230 unsigned Rd, Rn, Rm;
12231
12232 if (!inst.operands[2].present)
12233 inst.operands[2].reg = inst.operands[0].reg;
12234
12235 Rd = inst.operands[0].reg;
12236 Rn = inst.operands[1].reg;
12237 Rm = inst.operands[2].reg;
12238
12239 if (unified_syntax)
12240 {
12241 if (inst.size_req == 4
12242 || (Rd != Rn
12243 && Rd != Rm)
12244 || Rn > 7
12245 || Rm > 7)
12246 narrow = FALSE;
12247 else if (inst.instruction == T_MNEM_muls)
12248 narrow = !in_it_block ();
12249 else
12250 narrow = in_it_block ();
12251 }
12252 else
12253 {
12254 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12255 constraint (Rn > 7 || Rm > 7,
12256 BAD_HIREG);
12257 narrow = TRUE;
12258 }
12259
12260 if (narrow)
12261 {
12262 /* 16-bit MULS/Conditional MUL. */
12263 inst.instruction = THUMB_OP16 (inst.instruction);
12264 inst.instruction |= Rd;
12265
12266 if (Rd == Rn)
12267 inst.instruction |= Rm << 3;
12268 else if (Rd == Rm)
12269 inst.instruction |= Rn << 3;
12270 else
12271 constraint (1, _("dest must overlap one source register"));
12272 }
12273 else
12274 {
12275 constraint (inst.instruction != T_MNEM_mul,
12276 _("Thumb-2 MUL must not set flags"));
12277 /* 32-bit MUL. */
12278 inst.instruction = THUMB_OP32 (inst.instruction);
12279 inst.instruction |= Rd << 8;
12280 inst.instruction |= Rn << 16;
12281 inst.instruction |= Rm << 0;
12282
12283 reject_bad_reg (Rd);
12284 reject_bad_reg (Rn);
12285 reject_bad_reg (Rm);
12286 }
12287 }
12288
12289 static void
12290 do_t_mull (void)
12291 {
12292 unsigned RdLo, RdHi, Rn, Rm;
12293
12294 RdLo = inst.operands[0].reg;
12295 RdHi = inst.operands[1].reg;
12296 Rn = inst.operands[2].reg;
12297 Rm = inst.operands[3].reg;
12298
12299 reject_bad_reg (RdLo);
12300 reject_bad_reg (RdHi);
12301 reject_bad_reg (Rn);
12302 reject_bad_reg (Rm);
12303
12304 inst.instruction |= RdLo << 12;
12305 inst.instruction |= RdHi << 8;
12306 inst.instruction |= Rn << 16;
12307 inst.instruction |= Rm;
12308
12309 if (RdLo == RdHi)
12310 as_tsktsk (_("rdhi and rdlo must be different"));
12311 }
12312
12313 static void
12314 do_t_nop (void)
12315 {
12316 set_it_insn_type (NEUTRAL_IT_INSN);
12317
12318 if (unified_syntax)
12319 {
12320 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12321 {
12322 inst.instruction = THUMB_OP32 (inst.instruction);
12323 inst.instruction |= inst.operands[0].imm;
12324 }
12325 else
12326 {
12327 /* PR9722: Check for Thumb2 availability before
12328 generating a thumb2 nop instruction. */
12329 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12330 {
12331 inst.instruction = THUMB_OP16 (inst.instruction);
12332 inst.instruction |= inst.operands[0].imm << 4;
12333 }
12334 else
12335 inst.instruction = 0x46c0;
12336 }
12337 }
12338 else
12339 {
12340 constraint (inst.operands[0].present,
12341 _("Thumb does not support NOP with hints"));
12342 inst.instruction = 0x46c0;
12343 }
12344 }
12345
12346 static void
12347 do_t_neg (void)
12348 {
12349 if (unified_syntax)
12350 {
12351 bfd_boolean narrow;
12352
12353 if (THUMB_SETS_FLAGS (inst.instruction))
12354 narrow = !in_it_block ();
12355 else
12356 narrow = in_it_block ();
12357 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12358 narrow = FALSE;
12359 if (inst.size_req == 4)
12360 narrow = FALSE;
12361
12362 if (!narrow)
12363 {
12364 inst.instruction = THUMB_OP32 (inst.instruction);
12365 inst.instruction |= inst.operands[0].reg << 8;
12366 inst.instruction |= inst.operands[1].reg << 16;
12367 }
12368 else
12369 {
12370 inst.instruction = THUMB_OP16 (inst.instruction);
12371 inst.instruction |= inst.operands[0].reg;
12372 inst.instruction |= inst.operands[1].reg << 3;
12373 }
12374 }
12375 else
12376 {
12377 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12378 BAD_HIREG);
12379 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12380
12381 inst.instruction = THUMB_OP16 (inst.instruction);
12382 inst.instruction |= inst.operands[0].reg;
12383 inst.instruction |= inst.operands[1].reg << 3;
12384 }
12385 }
12386
12387 static void
12388 do_t_orn (void)
12389 {
12390 unsigned Rd, Rn;
12391
12392 Rd = inst.operands[0].reg;
12393 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12394
12395 reject_bad_reg (Rd);
12396 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12397 reject_bad_reg (Rn);
12398
12399 inst.instruction |= Rd << 8;
12400 inst.instruction |= Rn << 16;
12401
12402 if (!inst.operands[2].isreg)
12403 {
12404 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12405 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12406 }
12407 else
12408 {
12409 unsigned Rm;
12410
12411 Rm = inst.operands[2].reg;
12412 reject_bad_reg (Rm);
12413
12414 constraint (inst.operands[2].shifted
12415 && inst.operands[2].immisreg,
12416 _("shift must be constant"));
12417 encode_thumb32_shifted_operand (2);
12418 }
12419 }
12420
12421 static void
12422 do_t_pkhbt (void)
12423 {
12424 unsigned Rd, Rn, Rm;
12425
12426 Rd = inst.operands[0].reg;
12427 Rn = inst.operands[1].reg;
12428 Rm = inst.operands[2].reg;
12429
12430 reject_bad_reg (Rd);
12431 reject_bad_reg (Rn);
12432 reject_bad_reg (Rm);
12433
12434 inst.instruction |= Rd << 8;
12435 inst.instruction |= Rn << 16;
12436 inst.instruction |= Rm;
12437 if (inst.operands[3].present)
12438 {
12439 unsigned int val = inst.reloc.exp.X_add_number;
12440 constraint (inst.reloc.exp.X_op != O_constant,
12441 _("expression too complex"));
12442 inst.instruction |= (val & 0x1c) << 10;
12443 inst.instruction |= (val & 0x03) << 6;
12444 }
12445 }
12446
12447 static void
12448 do_t_pkhtb (void)
12449 {
12450 if (!inst.operands[3].present)
12451 {
12452 unsigned Rtmp;
12453
12454 inst.instruction &= ~0x00000020;
12455
12456 /* PR 10168. Swap the Rm and Rn registers. */
12457 Rtmp = inst.operands[1].reg;
12458 inst.operands[1].reg = inst.operands[2].reg;
12459 inst.operands[2].reg = Rtmp;
12460 }
12461 do_t_pkhbt ();
12462 }
12463
12464 static void
12465 do_t_pld (void)
12466 {
12467 if (inst.operands[0].immisreg)
12468 reject_bad_reg (inst.operands[0].imm);
12469
12470 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12471 }
12472
12473 static void
12474 do_t_push_pop (void)
12475 {
12476 unsigned mask;
12477
12478 constraint (inst.operands[0].writeback,
12479 _("push/pop do not support {reglist}^"));
12480 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12481 _("expression too complex"));
12482
12483 mask = inst.operands[0].imm;
12484 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12485 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12486 else if (inst.size_req != 4
12487 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12488 ? REG_LR : REG_PC)))
12489 {
12490 inst.instruction = THUMB_OP16 (inst.instruction);
12491 inst.instruction |= THUMB_PP_PC_LR;
12492 inst.instruction |= mask & 0xff;
12493 }
12494 else if (unified_syntax)
12495 {
12496 inst.instruction = THUMB_OP32 (inst.instruction);
12497 encode_thumb2_ldmstm (13, mask, TRUE);
12498 }
12499 else
12500 {
12501 inst.error = _("invalid register list to push/pop instruction");
12502 return;
12503 }
12504 }
12505
12506 static void
12507 do_t_rbit (void)
12508 {
12509 unsigned Rd, Rm;
12510
12511 Rd = inst.operands[0].reg;
12512 Rm = inst.operands[1].reg;
12513
12514 reject_bad_reg (Rd);
12515 reject_bad_reg (Rm);
12516
12517 inst.instruction |= Rd << 8;
12518 inst.instruction |= Rm << 16;
12519 inst.instruction |= Rm;
12520 }
12521
12522 static void
12523 do_t_rev (void)
12524 {
12525 unsigned Rd, Rm;
12526
12527 Rd = inst.operands[0].reg;
12528 Rm = inst.operands[1].reg;
12529
12530 reject_bad_reg (Rd);
12531 reject_bad_reg (Rm);
12532
12533 if (Rd <= 7 && Rm <= 7
12534 && inst.size_req != 4)
12535 {
12536 inst.instruction = THUMB_OP16 (inst.instruction);
12537 inst.instruction |= Rd;
12538 inst.instruction |= Rm << 3;
12539 }
12540 else if (unified_syntax)
12541 {
12542 inst.instruction = THUMB_OP32 (inst.instruction);
12543 inst.instruction |= Rd << 8;
12544 inst.instruction |= Rm << 16;
12545 inst.instruction |= Rm;
12546 }
12547 else
12548 inst.error = BAD_HIREG;
12549 }
12550
12551 static void
12552 do_t_rrx (void)
12553 {
12554 unsigned Rd, Rm;
12555
12556 Rd = inst.operands[0].reg;
12557 Rm = inst.operands[1].reg;
12558
12559 reject_bad_reg (Rd);
12560 reject_bad_reg (Rm);
12561
12562 inst.instruction |= Rd << 8;
12563 inst.instruction |= Rm;
12564 }
12565
12566 static void
12567 do_t_rsb (void)
12568 {
12569 unsigned Rd, Rs;
12570
12571 Rd = inst.operands[0].reg;
12572 Rs = (inst.operands[1].present
12573 ? inst.operands[1].reg /* Rd, Rs, foo */
12574 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12575
12576 reject_bad_reg (Rd);
12577 reject_bad_reg (Rs);
12578 if (inst.operands[2].isreg)
12579 reject_bad_reg (inst.operands[2].reg);
12580
12581 inst.instruction |= Rd << 8;
12582 inst.instruction |= Rs << 16;
12583 if (!inst.operands[2].isreg)
12584 {
12585 bfd_boolean narrow;
12586
12587 if ((inst.instruction & 0x00100000) != 0)
12588 narrow = !in_it_block ();
12589 else
12590 narrow = in_it_block ();
12591
12592 if (Rd > 7 || Rs > 7)
12593 narrow = FALSE;
12594
12595 if (inst.size_req == 4 || !unified_syntax)
12596 narrow = FALSE;
12597
12598 if (inst.reloc.exp.X_op != O_constant
12599 || inst.reloc.exp.X_add_number != 0)
12600 narrow = FALSE;
12601
12602 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12603 relaxation, but it doesn't seem worth the hassle. */
12604 if (narrow)
12605 {
12606 inst.reloc.type = BFD_RELOC_UNUSED;
12607 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12608 inst.instruction |= Rs << 3;
12609 inst.instruction |= Rd;
12610 }
12611 else
12612 {
12613 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12614 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12615 }
12616 }
12617 else
12618 encode_thumb32_shifted_operand (2);
12619 }
12620
12621 static void
12622 do_t_setend (void)
12623 {
12624 if (warn_on_deprecated
12625 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12626 as_tsktsk (_("setend use is deprecated for ARMv8"));
12627
12628 set_it_insn_type (OUTSIDE_IT_INSN);
12629 if (inst.operands[0].imm)
12630 inst.instruction |= 0x8;
12631 }
12632
12633 static void
12634 do_t_shift (void)
12635 {
12636 if (!inst.operands[1].present)
12637 inst.operands[1].reg = inst.operands[0].reg;
12638
12639 if (unified_syntax)
12640 {
12641 bfd_boolean narrow;
12642 int shift_kind;
12643
12644 switch (inst.instruction)
12645 {
12646 case T_MNEM_asr:
12647 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12648 case T_MNEM_lsl:
12649 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12650 case T_MNEM_lsr:
12651 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12652 case T_MNEM_ror:
12653 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12654 default: abort ();
12655 }
12656
12657 if (THUMB_SETS_FLAGS (inst.instruction))
12658 narrow = !in_it_block ();
12659 else
12660 narrow = in_it_block ();
12661 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12662 narrow = FALSE;
12663 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12664 narrow = FALSE;
12665 if (inst.operands[2].isreg
12666 && (inst.operands[1].reg != inst.operands[0].reg
12667 || inst.operands[2].reg > 7))
12668 narrow = FALSE;
12669 if (inst.size_req == 4)
12670 narrow = FALSE;
12671
12672 reject_bad_reg (inst.operands[0].reg);
12673 reject_bad_reg (inst.operands[1].reg);
12674
12675 if (!narrow)
12676 {
12677 if (inst.operands[2].isreg)
12678 {
12679 reject_bad_reg (inst.operands[2].reg);
12680 inst.instruction = THUMB_OP32 (inst.instruction);
12681 inst.instruction |= inst.operands[0].reg << 8;
12682 inst.instruction |= inst.operands[1].reg << 16;
12683 inst.instruction |= inst.operands[2].reg;
12684
12685 /* PR 12854: Error on extraneous shifts. */
12686 constraint (inst.operands[2].shifted,
12687 _("extraneous shift as part of operand to shift insn"));
12688 }
12689 else
12690 {
12691 inst.operands[1].shifted = 1;
12692 inst.operands[1].shift_kind = shift_kind;
12693 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12694 ? T_MNEM_movs : T_MNEM_mov);
12695 inst.instruction |= inst.operands[0].reg << 8;
12696 encode_thumb32_shifted_operand (1);
12697 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12698 inst.reloc.type = BFD_RELOC_UNUSED;
12699 }
12700 }
12701 else
12702 {
12703 if (inst.operands[2].isreg)
12704 {
12705 switch (shift_kind)
12706 {
12707 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12708 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12709 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12710 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12711 default: abort ();
12712 }
12713
12714 inst.instruction |= inst.operands[0].reg;
12715 inst.instruction |= inst.operands[2].reg << 3;
12716
12717 /* PR 12854: Error on extraneous shifts. */
12718 constraint (inst.operands[2].shifted,
12719 _("extraneous shift as part of operand to shift insn"));
12720 }
12721 else
12722 {
12723 switch (shift_kind)
12724 {
12725 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12726 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12727 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12728 default: abort ();
12729 }
12730 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12731 inst.instruction |= inst.operands[0].reg;
12732 inst.instruction |= inst.operands[1].reg << 3;
12733 }
12734 }
12735 }
12736 else
12737 {
12738 constraint (inst.operands[0].reg > 7
12739 || inst.operands[1].reg > 7, BAD_HIREG);
12740 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12741
12742 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12743 {
12744 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12745 constraint (inst.operands[0].reg != inst.operands[1].reg,
12746 _("source1 and dest must be same register"));
12747
12748 switch (inst.instruction)
12749 {
12750 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12751 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12752 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12753 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12754 default: abort ();
12755 }
12756
12757 inst.instruction |= inst.operands[0].reg;
12758 inst.instruction |= inst.operands[2].reg << 3;
12759
12760 /* PR 12854: Error on extraneous shifts. */
12761 constraint (inst.operands[2].shifted,
12762 _("extraneous shift as part of operand to shift insn"));
12763 }
12764 else
12765 {
12766 switch (inst.instruction)
12767 {
12768 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12769 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12770 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12771 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12772 default: abort ();
12773 }
12774 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12775 inst.instruction |= inst.operands[0].reg;
12776 inst.instruction |= inst.operands[1].reg << 3;
12777 }
12778 }
12779 }
12780
12781 static void
12782 do_t_simd (void)
12783 {
12784 unsigned Rd, Rn, Rm;
12785
12786 Rd = inst.operands[0].reg;
12787 Rn = inst.operands[1].reg;
12788 Rm = inst.operands[2].reg;
12789
12790 reject_bad_reg (Rd);
12791 reject_bad_reg (Rn);
12792 reject_bad_reg (Rm);
12793
12794 inst.instruction |= Rd << 8;
12795 inst.instruction |= Rn << 16;
12796 inst.instruction |= Rm;
12797 }
12798
12799 static void
12800 do_t_simd2 (void)
12801 {
12802 unsigned Rd, Rn, Rm;
12803
12804 Rd = inst.operands[0].reg;
12805 Rm = inst.operands[1].reg;
12806 Rn = inst.operands[2].reg;
12807
12808 reject_bad_reg (Rd);
12809 reject_bad_reg (Rn);
12810 reject_bad_reg (Rm);
12811
12812 inst.instruction |= Rd << 8;
12813 inst.instruction |= Rn << 16;
12814 inst.instruction |= Rm;
12815 }
12816
12817 static void
12818 do_t_smc (void)
12819 {
12820 unsigned int value = inst.reloc.exp.X_add_number;
12821 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12822 _("SMC is not permitted on this architecture"));
12823 constraint (inst.reloc.exp.X_op != O_constant,
12824 _("expression too complex"));
12825 inst.reloc.type = BFD_RELOC_UNUSED;
12826 inst.instruction |= (value & 0xf000) >> 12;
12827 inst.instruction |= (value & 0x0ff0);
12828 inst.instruction |= (value & 0x000f) << 16;
12829 /* PR gas/15623: SMC instructions must be last in an IT block. */
12830 set_it_insn_type_last ();
12831 }
12832
12833 static void
12834 do_t_hvc (void)
12835 {
12836 unsigned int value = inst.reloc.exp.X_add_number;
12837
12838 inst.reloc.type = BFD_RELOC_UNUSED;
12839 inst.instruction |= (value & 0x0fff);
12840 inst.instruction |= (value & 0xf000) << 4;
12841 }
12842
12843 static void
12844 do_t_ssat_usat (int bias)
12845 {
12846 unsigned Rd, Rn;
12847
12848 Rd = inst.operands[0].reg;
12849 Rn = inst.operands[2].reg;
12850
12851 reject_bad_reg (Rd);
12852 reject_bad_reg (Rn);
12853
12854 inst.instruction |= Rd << 8;
12855 inst.instruction |= inst.operands[1].imm - bias;
12856 inst.instruction |= Rn << 16;
12857
12858 if (inst.operands[3].present)
12859 {
12860 offsetT shift_amount = inst.reloc.exp.X_add_number;
12861
12862 inst.reloc.type = BFD_RELOC_UNUSED;
12863
12864 constraint (inst.reloc.exp.X_op != O_constant,
12865 _("expression too complex"));
12866
12867 if (shift_amount != 0)
12868 {
12869 constraint (shift_amount > 31,
12870 _("shift expression is too large"));
12871
12872 if (inst.operands[3].shift_kind == SHIFT_ASR)
12873 inst.instruction |= 0x00200000; /* sh bit. */
12874
12875 inst.instruction |= (shift_amount & 0x1c) << 10;
12876 inst.instruction |= (shift_amount & 0x03) << 6;
12877 }
12878 }
12879 }
12880
12881 static void
12882 do_t_ssat (void)
12883 {
12884 do_t_ssat_usat (1);
12885 }
12886
12887 static void
12888 do_t_ssat16 (void)
12889 {
12890 unsigned Rd, Rn;
12891
12892 Rd = inst.operands[0].reg;
12893 Rn = inst.operands[2].reg;
12894
12895 reject_bad_reg (Rd);
12896 reject_bad_reg (Rn);
12897
12898 inst.instruction |= Rd << 8;
12899 inst.instruction |= inst.operands[1].imm - 1;
12900 inst.instruction |= Rn << 16;
12901 }
12902
12903 static void
12904 do_t_strex (void)
12905 {
12906 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12907 || inst.operands[2].postind || inst.operands[2].writeback
12908 || inst.operands[2].immisreg || inst.operands[2].shifted
12909 || inst.operands[2].negative,
12910 BAD_ADDR_MODE);
12911
12912 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12913
12914 inst.instruction |= inst.operands[0].reg << 8;
12915 inst.instruction |= inst.operands[1].reg << 12;
12916 inst.instruction |= inst.operands[2].reg << 16;
12917 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12918 }
12919
12920 static void
12921 do_t_strexd (void)
12922 {
12923 if (!inst.operands[2].present)
12924 inst.operands[2].reg = inst.operands[1].reg + 1;
12925
12926 constraint (inst.operands[0].reg == inst.operands[1].reg
12927 || inst.operands[0].reg == inst.operands[2].reg
12928 || inst.operands[0].reg == inst.operands[3].reg,
12929 BAD_OVERLAP);
12930
12931 inst.instruction |= inst.operands[0].reg;
12932 inst.instruction |= inst.operands[1].reg << 12;
12933 inst.instruction |= inst.operands[2].reg << 8;
12934 inst.instruction |= inst.operands[3].reg << 16;
12935 }
12936
12937 static void
12938 do_t_sxtah (void)
12939 {
12940 unsigned Rd, Rn, Rm;
12941
12942 Rd = inst.operands[0].reg;
12943 Rn = inst.operands[1].reg;
12944 Rm = inst.operands[2].reg;
12945
12946 reject_bad_reg (Rd);
12947 reject_bad_reg (Rn);
12948 reject_bad_reg (Rm);
12949
12950 inst.instruction |= Rd << 8;
12951 inst.instruction |= Rn << 16;
12952 inst.instruction |= Rm;
12953 inst.instruction |= inst.operands[3].imm << 4;
12954 }
12955
12956 static void
12957 do_t_sxth (void)
12958 {
12959 unsigned Rd, Rm;
12960
12961 Rd = inst.operands[0].reg;
12962 Rm = inst.operands[1].reg;
12963
12964 reject_bad_reg (Rd);
12965 reject_bad_reg (Rm);
12966
12967 if (inst.instruction <= 0xffff
12968 && inst.size_req != 4
12969 && Rd <= 7 && Rm <= 7
12970 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12971 {
12972 inst.instruction = THUMB_OP16 (inst.instruction);
12973 inst.instruction |= Rd;
12974 inst.instruction |= Rm << 3;
12975 }
12976 else if (unified_syntax)
12977 {
12978 if (inst.instruction <= 0xffff)
12979 inst.instruction = THUMB_OP32 (inst.instruction);
12980 inst.instruction |= Rd << 8;
12981 inst.instruction |= Rm;
12982 inst.instruction |= inst.operands[2].imm << 4;
12983 }
12984 else
12985 {
12986 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12987 _("Thumb encoding does not support rotation"));
12988 constraint (1, BAD_HIREG);
12989 }
12990 }
12991
12992 static void
12993 do_t_swi (void)
12994 {
12995 /* We have to do the following check manually as ARM_EXT_OS only applies
12996 to ARM_EXT_V6M. */
12997 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12998 {
12999 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13000 /* This only applies to the v6m howver, not later architectures. */
13001 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13002 as_bad (_("SVC is not permitted on this architecture"));
13003 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13004 }
13005
13006 inst.reloc.type = BFD_RELOC_ARM_SWI;
13007 }
13008
13009 static void
13010 do_t_tb (void)
13011 {
13012 unsigned Rn, Rm;
13013 int half;
13014
13015 half = (inst.instruction & 0x10) != 0;
13016 set_it_insn_type_last ();
13017 constraint (inst.operands[0].immisreg,
13018 _("instruction requires register index"));
13019
13020 Rn = inst.operands[0].reg;
13021 Rm = inst.operands[0].imm;
13022
13023 constraint (Rn == REG_SP, BAD_SP);
13024 reject_bad_reg (Rm);
13025
13026 constraint (!half && inst.operands[0].shifted,
13027 _("instruction does not allow shifted index"));
13028 inst.instruction |= (Rn << 16) | Rm;
13029 }
13030
13031 static void
13032 do_t_udf (void)
13033 {
13034 if (!inst.operands[0].present)
13035 inst.operands[0].imm = 0;
13036
13037 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13038 {
13039 constraint (inst.size_req == 2,
13040 _("immediate value out of range"));
13041 inst.instruction = THUMB_OP32 (inst.instruction);
13042 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13043 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13044 }
13045 else
13046 {
13047 inst.instruction = THUMB_OP16 (inst.instruction);
13048 inst.instruction |= inst.operands[0].imm;
13049 }
13050
13051 set_it_insn_type (NEUTRAL_IT_INSN);
13052 }
13053
13054
13055 static void
13056 do_t_usat (void)
13057 {
13058 do_t_ssat_usat (0);
13059 }
13060
13061 static void
13062 do_t_usat16 (void)
13063 {
13064 unsigned Rd, Rn;
13065
13066 Rd = inst.operands[0].reg;
13067 Rn = inst.operands[2].reg;
13068
13069 reject_bad_reg (Rd);
13070 reject_bad_reg (Rn);
13071
13072 inst.instruction |= Rd << 8;
13073 inst.instruction |= inst.operands[1].imm;
13074 inst.instruction |= Rn << 16;
13075 }
13076
13077 /* Neon instruction encoder helpers. */
13078
13079 /* Encodings for the different types for various Neon opcodes. */
13080
13081 /* An "invalid" code for the following tables. */
13082 #define N_INV -1u
13083
13084 struct neon_tab_entry
13085 {
13086 unsigned integer;
13087 unsigned float_or_poly;
13088 unsigned scalar_or_imm;
13089 };
13090
13091 /* Map overloaded Neon opcodes to their respective encodings. */
13092 #define NEON_ENC_TAB \
13093 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13094 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13095 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13096 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13097 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13098 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13099 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13100 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13101 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13102 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13103 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13104 /* Register variants of the following two instructions are encoded as
13105 vcge / vcgt with the operands reversed. */ \
13106 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13107 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13108 X(vfma, N_INV, 0x0000c10, N_INV), \
13109 X(vfms, N_INV, 0x0200c10, N_INV), \
13110 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13111 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13112 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13113 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13114 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13115 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13116 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13117 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13118 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13119 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13120 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13121 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13122 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13123 X(vshl, 0x0000400, N_INV, 0x0800510), \
13124 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13125 X(vand, 0x0000110, N_INV, 0x0800030), \
13126 X(vbic, 0x0100110, N_INV, 0x0800030), \
13127 X(veor, 0x1000110, N_INV, N_INV), \
13128 X(vorn, 0x0300110, N_INV, 0x0800010), \
13129 X(vorr, 0x0200110, N_INV, 0x0800010), \
13130 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13131 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13132 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13133 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13134 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13135 X(vst1, 0x0000000, 0x0800000, N_INV), \
13136 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13137 X(vst2, 0x0000100, 0x0800100, N_INV), \
13138 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13139 X(vst3, 0x0000200, 0x0800200, N_INV), \
13140 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13141 X(vst4, 0x0000300, 0x0800300, N_INV), \
13142 X(vmovn, 0x1b20200, N_INV, N_INV), \
13143 X(vtrn, 0x1b20080, N_INV, N_INV), \
13144 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13145 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13146 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13147 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13148 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13149 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13150 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13151 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13152 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13153 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13154 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13155 X(vseleq, 0xe000a00, N_INV, N_INV), \
13156 X(vselvs, 0xe100a00, N_INV, N_INV), \
13157 X(vselge, 0xe200a00, N_INV, N_INV), \
13158 X(vselgt, 0xe300a00, N_INV, N_INV), \
13159 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13160 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13161 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13162 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13163 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13164 X(aes, 0x3b00300, N_INV, N_INV), \
13165 X(sha3op, 0x2000c00, N_INV, N_INV), \
13166 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13167 X(sha2op, 0x3ba0380, N_INV, N_INV)
13168
13169 enum neon_opc
13170 {
13171 #define X(OPC,I,F,S) N_MNEM_##OPC
13172 NEON_ENC_TAB
13173 #undef X
13174 };
13175
13176 static const struct neon_tab_entry neon_enc_tab[] =
13177 {
13178 #define X(OPC,I,F,S) { (I), (F), (S) }
13179 NEON_ENC_TAB
13180 #undef X
13181 };
13182
13183 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13184 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13185 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13186 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13187 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13188 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13189 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13190 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13191 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13192 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13193 #define NEON_ENC_SINGLE_(X) \
13194 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13195 #define NEON_ENC_DOUBLE_(X) \
13196 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13197 #define NEON_ENC_FPV8_(X) \
13198 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13199
13200 #define NEON_ENCODE(type, inst) \
13201 do \
13202 { \
13203 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13204 inst.is_neon = 1; \
13205 } \
13206 while (0)
13207
13208 #define check_neon_suffixes \
13209 do \
13210 { \
13211 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13212 { \
13213 as_bad (_("invalid neon suffix for non neon instruction")); \
13214 return; \
13215 } \
13216 } \
13217 while (0)
13218
13219 /* Define shapes for instruction operands. The following mnemonic characters
13220 are used in this table:
13221
13222 F - VFP S<n> register
13223 D - Neon D<n> register
13224 Q - Neon Q<n> register
13225 I - Immediate
13226 S - Scalar
13227 R - ARM register
13228 L - D<n> register list
13229
13230 This table is used to generate various data:
13231 - enumerations of the form NS_DDR to be used as arguments to
13232 neon_select_shape.
13233 - a table classifying shapes into single, double, quad, mixed.
13234 - a table used to drive neon_select_shape. */
13235
13236 #define NEON_SHAPE_DEF \
13237 X(3, (D, D, D), DOUBLE), \
13238 X(3, (Q, Q, Q), QUAD), \
13239 X(3, (D, D, I), DOUBLE), \
13240 X(3, (Q, Q, I), QUAD), \
13241 X(3, (D, D, S), DOUBLE), \
13242 X(3, (Q, Q, S), QUAD), \
13243 X(2, (D, D), DOUBLE), \
13244 X(2, (Q, Q), QUAD), \
13245 X(2, (D, S), DOUBLE), \
13246 X(2, (Q, S), QUAD), \
13247 X(2, (D, R), DOUBLE), \
13248 X(2, (Q, R), QUAD), \
13249 X(2, (D, I), DOUBLE), \
13250 X(2, (Q, I), QUAD), \
13251 X(3, (D, L, D), DOUBLE), \
13252 X(2, (D, Q), MIXED), \
13253 X(2, (Q, D), MIXED), \
13254 X(3, (D, Q, I), MIXED), \
13255 X(3, (Q, D, I), MIXED), \
13256 X(3, (Q, D, D), MIXED), \
13257 X(3, (D, Q, Q), MIXED), \
13258 X(3, (Q, Q, D), MIXED), \
13259 X(3, (Q, D, S), MIXED), \
13260 X(3, (D, Q, S), MIXED), \
13261 X(4, (D, D, D, I), DOUBLE), \
13262 X(4, (Q, Q, Q, I), QUAD), \
13263 X(2, (F, F), SINGLE), \
13264 X(3, (F, F, F), SINGLE), \
13265 X(2, (F, I), SINGLE), \
13266 X(2, (F, D), MIXED), \
13267 X(2, (D, F), MIXED), \
13268 X(3, (F, F, I), MIXED), \
13269 X(4, (R, R, F, F), SINGLE), \
13270 X(4, (F, F, R, R), SINGLE), \
13271 X(3, (D, R, R), DOUBLE), \
13272 X(3, (R, R, D), DOUBLE), \
13273 X(2, (S, R), SINGLE), \
13274 X(2, (R, S), SINGLE), \
13275 X(2, (F, R), SINGLE), \
13276 X(2, (R, F), SINGLE), \
13277 /* Half float shape supported so far. */\
13278 X (2, (H, D), MIXED), \
13279 X (2, (D, H), MIXED), \
13280 X (2, (H, F), MIXED), \
13281 X (2, (F, H), MIXED), \
13282 X (2, (H, H), HALF), \
13283 X (2, (H, R), HALF), \
13284 X (2, (R, H), HALF), \
13285 X (2, (H, I), HALF), \
13286 X (3, (H, H, H), HALF), \
13287 X (3, (H, F, I), MIXED), \
13288 X (3, (F, H, I), MIXED)
13289
13290 #define S2(A,B) NS_##A##B
13291 #define S3(A,B,C) NS_##A##B##C
13292 #define S4(A,B,C,D) NS_##A##B##C##D
13293
13294 #define X(N, L, C) S##N L
13295
13296 enum neon_shape
13297 {
13298 NEON_SHAPE_DEF,
13299 NS_NULL
13300 };
13301
13302 #undef X
13303 #undef S2
13304 #undef S3
13305 #undef S4
13306
13307 enum neon_shape_class
13308 {
13309 SC_HALF,
13310 SC_SINGLE,
13311 SC_DOUBLE,
13312 SC_QUAD,
13313 SC_MIXED
13314 };
13315
13316 #define X(N, L, C) SC_##C
13317
13318 static enum neon_shape_class neon_shape_class[] =
13319 {
13320 NEON_SHAPE_DEF
13321 };
13322
13323 #undef X
13324
13325 enum neon_shape_el
13326 {
13327 SE_H,
13328 SE_F,
13329 SE_D,
13330 SE_Q,
13331 SE_I,
13332 SE_S,
13333 SE_R,
13334 SE_L
13335 };
13336
13337 /* Register widths of above. */
13338 static unsigned neon_shape_el_size[] =
13339 {
13340 16,
13341 32,
13342 64,
13343 128,
13344 0,
13345 32,
13346 32,
13347 0
13348 };
13349
13350 struct neon_shape_info
13351 {
13352 unsigned els;
13353 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13354 };
13355
13356 #define S2(A,B) { SE_##A, SE_##B }
13357 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13358 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13359
13360 #define X(N, L, C) { N, S##N L }
13361
13362 static struct neon_shape_info neon_shape_tab[] =
13363 {
13364 NEON_SHAPE_DEF
13365 };
13366
13367 #undef X
13368 #undef S2
13369 #undef S3
13370 #undef S4
13371
13372 /* Bit masks used in type checking given instructions.
13373 'N_EQK' means the type must be the same as (or based on in some way) the key
13374 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13375 set, various other bits can be set as well in order to modify the meaning of
13376 the type constraint. */
13377
13378 enum neon_type_mask
13379 {
13380 N_S8 = 0x0000001,
13381 N_S16 = 0x0000002,
13382 N_S32 = 0x0000004,
13383 N_S64 = 0x0000008,
13384 N_U8 = 0x0000010,
13385 N_U16 = 0x0000020,
13386 N_U32 = 0x0000040,
13387 N_U64 = 0x0000080,
13388 N_I8 = 0x0000100,
13389 N_I16 = 0x0000200,
13390 N_I32 = 0x0000400,
13391 N_I64 = 0x0000800,
13392 N_8 = 0x0001000,
13393 N_16 = 0x0002000,
13394 N_32 = 0x0004000,
13395 N_64 = 0x0008000,
13396 N_P8 = 0x0010000,
13397 N_P16 = 0x0020000,
13398 N_F16 = 0x0040000,
13399 N_F32 = 0x0080000,
13400 N_F64 = 0x0100000,
13401 N_P64 = 0x0200000,
13402 N_KEY = 0x1000000, /* Key element (main type specifier). */
13403 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13404 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13405 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13406 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13407 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13408 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13409 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13410 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13411 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13412 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13413 N_UTYP = 0,
13414 N_MAX_NONSPECIAL = N_P64
13415 };
13416
13417 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13418
13419 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13420 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13421 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13422 #define N_SUF_32 (N_SU_32 | N_F32)
13423 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13424 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13425 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13426
13427 /* Pass this as the first type argument to neon_check_type to ignore types
13428 altogether. */
13429 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13430
13431 /* Select a "shape" for the current instruction (describing register types or
13432 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13433 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13434 function of operand parsing, so this function doesn't need to be called.
13435 Shapes should be listed in order of decreasing length. */
13436
13437 static enum neon_shape
13438 neon_select_shape (enum neon_shape shape, ...)
13439 {
13440 va_list ap;
13441 enum neon_shape first_shape = shape;
13442
13443 /* Fix missing optional operands. FIXME: we don't know at this point how
13444 many arguments we should have, so this makes the assumption that we have
13445 > 1. This is true of all current Neon opcodes, I think, but may not be
13446 true in the future. */
13447 if (!inst.operands[1].present)
13448 inst.operands[1] = inst.operands[0];
13449
13450 va_start (ap, shape);
13451
13452 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13453 {
13454 unsigned j;
13455 int matches = 1;
13456
13457 for (j = 0; j < neon_shape_tab[shape].els; j++)
13458 {
13459 if (!inst.operands[j].present)
13460 {
13461 matches = 0;
13462 break;
13463 }
13464
13465 switch (neon_shape_tab[shape].el[j])
13466 {
13467 /* If a .f16, .16, .u16, .s16 type specifier is given over
13468 a VFP single precision register operand, it's essentially
13469 means only half of the register is used.
13470
13471 If the type specifier is given after the mnemonics, the
13472 information is stored in inst.vectype. If the type specifier
13473 is given after register operand, the information is stored
13474 in inst.operands[].vectype.
13475
13476 When there is only one type specifier, and all the register
13477 operands are the same type of hardware register, the type
13478 specifier applies to all register operands.
13479
13480 If no type specifier is given, the shape is inferred from
13481 operand information.
13482
13483 for example:
13484 vadd.f16 s0, s1, s2: NS_HHH
13485 vabs.f16 s0, s1: NS_HH
13486 vmov.f16 s0, r1: NS_HR
13487 vmov.f16 r0, s1: NS_RH
13488 vcvt.f16 r0, s1: NS_RH
13489 vcvt.f16.s32 s2, s2, #29: NS_HFI
13490 vcvt.f16.s32 s2, s2: NS_HF
13491 */
13492 case SE_H:
13493 if (!(inst.operands[j].isreg
13494 && inst.operands[j].isvec
13495 && inst.operands[j].issingle
13496 && !inst.operands[j].isquad
13497 && ((inst.vectype.elems == 1
13498 && inst.vectype.el[0].size == 16)
13499 || (inst.vectype.elems > 1
13500 && inst.vectype.el[j].size == 16)
13501 || (inst.vectype.elems == 0
13502 && inst.operands[j].vectype.type != NT_invtype
13503 && inst.operands[j].vectype.size == 16))))
13504 matches = 0;
13505 break;
13506
13507 case SE_F:
13508 if (!(inst.operands[j].isreg
13509 && inst.operands[j].isvec
13510 && inst.operands[j].issingle
13511 && !inst.operands[j].isquad
13512 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13513 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13514 || (inst.vectype.elems == 0
13515 && (inst.operands[j].vectype.size == 32
13516 || inst.operands[j].vectype.type == NT_invtype)))))
13517 matches = 0;
13518 break;
13519
13520 case SE_D:
13521 if (!(inst.operands[j].isreg
13522 && inst.operands[j].isvec
13523 && !inst.operands[j].isquad
13524 && !inst.operands[j].issingle))
13525 matches = 0;
13526 break;
13527
13528 case SE_R:
13529 if (!(inst.operands[j].isreg
13530 && !inst.operands[j].isvec))
13531 matches = 0;
13532 break;
13533
13534 case SE_Q:
13535 if (!(inst.operands[j].isreg
13536 && inst.operands[j].isvec
13537 && inst.operands[j].isquad
13538 && !inst.operands[j].issingle))
13539 matches = 0;
13540 break;
13541
13542 case SE_I:
13543 if (!(!inst.operands[j].isreg
13544 && !inst.operands[j].isscalar))
13545 matches = 0;
13546 break;
13547
13548 case SE_S:
13549 if (!(!inst.operands[j].isreg
13550 && inst.operands[j].isscalar))
13551 matches = 0;
13552 break;
13553
13554 case SE_L:
13555 break;
13556 }
13557 if (!matches)
13558 break;
13559 }
13560 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13561 /* We've matched all the entries in the shape table, and we don't
13562 have any left over operands which have not been matched. */
13563 break;
13564 }
13565
13566 va_end (ap);
13567
13568 if (shape == NS_NULL && first_shape != NS_NULL)
13569 first_error (_("invalid instruction shape"));
13570
13571 return shape;
13572 }
13573
13574 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13575 means the Q bit should be set). */
13576
13577 static int
13578 neon_quad (enum neon_shape shape)
13579 {
13580 return neon_shape_class[shape] == SC_QUAD;
13581 }
13582
13583 static void
13584 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13585 unsigned *g_size)
13586 {
13587 /* Allow modification to be made to types which are constrained to be
13588 based on the key element, based on bits set alongside N_EQK. */
13589 if ((typebits & N_EQK) != 0)
13590 {
13591 if ((typebits & N_HLF) != 0)
13592 *g_size /= 2;
13593 else if ((typebits & N_DBL) != 0)
13594 *g_size *= 2;
13595 if ((typebits & N_SGN) != 0)
13596 *g_type = NT_signed;
13597 else if ((typebits & N_UNS) != 0)
13598 *g_type = NT_unsigned;
13599 else if ((typebits & N_INT) != 0)
13600 *g_type = NT_integer;
13601 else if ((typebits & N_FLT) != 0)
13602 *g_type = NT_float;
13603 else if ((typebits & N_SIZ) != 0)
13604 *g_type = NT_untyped;
13605 }
13606 }
13607
13608 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13609 operand type, i.e. the single type specified in a Neon instruction when it
13610 is the only one given. */
13611
13612 static struct neon_type_el
13613 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13614 {
13615 struct neon_type_el dest = *key;
13616
13617 gas_assert ((thisarg & N_EQK) != 0);
13618
13619 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13620
13621 return dest;
13622 }
13623
13624 /* Convert Neon type and size into compact bitmask representation. */
13625
13626 static enum neon_type_mask
13627 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13628 {
13629 switch (type)
13630 {
13631 case NT_untyped:
13632 switch (size)
13633 {
13634 case 8: return N_8;
13635 case 16: return N_16;
13636 case 32: return N_32;
13637 case 64: return N_64;
13638 default: ;
13639 }
13640 break;
13641
13642 case NT_integer:
13643 switch (size)
13644 {
13645 case 8: return N_I8;
13646 case 16: return N_I16;
13647 case 32: return N_I32;
13648 case 64: return N_I64;
13649 default: ;
13650 }
13651 break;
13652
13653 case NT_float:
13654 switch (size)
13655 {
13656 case 16: return N_F16;
13657 case 32: return N_F32;
13658 case 64: return N_F64;
13659 default: ;
13660 }
13661 break;
13662
13663 case NT_poly:
13664 switch (size)
13665 {
13666 case 8: return N_P8;
13667 case 16: return N_P16;
13668 case 64: return N_P64;
13669 default: ;
13670 }
13671 break;
13672
13673 case NT_signed:
13674 switch (size)
13675 {
13676 case 8: return N_S8;
13677 case 16: return N_S16;
13678 case 32: return N_S32;
13679 case 64: return N_S64;
13680 default: ;
13681 }
13682 break;
13683
13684 case NT_unsigned:
13685 switch (size)
13686 {
13687 case 8: return N_U8;
13688 case 16: return N_U16;
13689 case 32: return N_U32;
13690 case 64: return N_U64;
13691 default: ;
13692 }
13693 break;
13694
13695 default: ;
13696 }
13697
13698 return N_UTYP;
13699 }
13700
13701 /* Convert compact Neon bitmask type representation to a type and size. Only
13702 handles the case where a single bit is set in the mask. */
13703
13704 static int
13705 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13706 enum neon_type_mask mask)
13707 {
13708 if ((mask & N_EQK) != 0)
13709 return FAIL;
13710
13711 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13712 *size = 8;
13713 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13714 *size = 16;
13715 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13716 *size = 32;
13717 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13718 *size = 64;
13719 else
13720 return FAIL;
13721
13722 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13723 *type = NT_signed;
13724 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13725 *type = NT_unsigned;
13726 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13727 *type = NT_integer;
13728 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13729 *type = NT_untyped;
13730 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13731 *type = NT_poly;
13732 else if ((mask & (N_F_ALL)) != 0)
13733 *type = NT_float;
13734 else
13735 return FAIL;
13736
13737 return SUCCESS;
13738 }
13739
13740 /* Modify a bitmask of allowed types. This is only needed for type
13741 relaxation. */
13742
13743 static unsigned
13744 modify_types_allowed (unsigned allowed, unsigned mods)
13745 {
13746 unsigned size;
13747 enum neon_el_type type;
13748 unsigned destmask;
13749 int i;
13750
13751 destmask = 0;
13752
13753 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13754 {
13755 if (el_type_of_type_chk (&type, &size,
13756 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13757 {
13758 neon_modify_type_size (mods, &type, &size);
13759 destmask |= type_chk_of_el_type (type, size);
13760 }
13761 }
13762
13763 return destmask;
13764 }
13765
13766 /* Check type and return type classification.
13767 The manual states (paraphrase): If one datatype is given, it indicates the
13768 type given in:
13769 - the second operand, if there is one
13770 - the operand, if there is no second operand
13771 - the result, if there are no operands.
13772 This isn't quite good enough though, so we use a concept of a "key" datatype
13773 which is set on a per-instruction basis, which is the one which matters when
13774 only one data type is written.
13775 Note: this function has side-effects (e.g. filling in missing operands). All
13776 Neon instructions should call it before performing bit encoding. */
13777
13778 static struct neon_type_el
13779 neon_check_type (unsigned els, enum neon_shape ns, ...)
13780 {
13781 va_list ap;
13782 unsigned i, pass, key_el = 0;
13783 unsigned types[NEON_MAX_TYPE_ELS];
13784 enum neon_el_type k_type = NT_invtype;
13785 unsigned k_size = -1u;
13786 struct neon_type_el badtype = {NT_invtype, -1};
13787 unsigned key_allowed = 0;
13788
13789 /* Optional registers in Neon instructions are always (not) in operand 1.
13790 Fill in the missing operand here, if it was omitted. */
13791 if (els > 1 && !inst.operands[1].present)
13792 inst.operands[1] = inst.operands[0];
13793
13794 /* Suck up all the varargs. */
13795 va_start (ap, ns);
13796 for (i = 0; i < els; i++)
13797 {
13798 unsigned thisarg = va_arg (ap, unsigned);
13799 if (thisarg == N_IGNORE_TYPE)
13800 {
13801 va_end (ap);
13802 return badtype;
13803 }
13804 types[i] = thisarg;
13805 if ((thisarg & N_KEY) != 0)
13806 key_el = i;
13807 }
13808 va_end (ap);
13809
13810 if (inst.vectype.elems > 0)
13811 for (i = 0; i < els; i++)
13812 if (inst.operands[i].vectype.type != NT_invtype)
13813 {
13814 first_error (_("types specified in both the mnemonic and operands"));
13815 return badtype;
13816 }
13817
13818 /* Duplicate inst.vectype elements here as necessary.
13819 FIXME: No idea if this is exactly the same as the ARM assembler,
13820 particularly when an insn takes one register and one non-register
13821 operand. */
13822 if (inst.vectype.elems == 1 && els > 1)
13823 {
13824 unsigned j;
13825 inst.vectype.elems = els;
13826 inst.vectype.el[key_el] = inst.vectype.el[0];
13827 for (j = 0; j < els; j++)
13828 if (j != key_el)
13829 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13830 types[j]);
13831 }
13832 else if (inst.vectype.elems == 0 && els > 0)
13833 {
13834 unsigned j;
13835 /* No types were given after the mnemonic, so look for types specified
13836 after each operand. We allow some flexibility here; as long as the
13837 "key" operand has a type, we can infer the others. */
13838 for (j = 0; j < els; j++)
13839 if (inst.operands[j].vectype.type != NT_invtype)
13840 inst.vectype.el[j] = inst.operands[j].vectype;
13841
13842 if (inst.operands[key_el].vectype.type != NT_invtype)
13843 {
13844 for (j = 0; j < els; j++)
13845 if (inst.operands[j].vectype.type == NT_invtype)
13846 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13847 types[j]);
13848 }
13849 else
13850 {
13851 first_error (_("operand types can't be inferred"));
13852 return badtype;
13853 }
13854 }
13855 else if (inst.vectype.elems != els)
13856 {
13857 first_error (_("type specifier has the wrong number of parts"));
13858 return badtype;
13859 }
13860
13861 for (pass = 0; pass < 2; pass++)
13862 {
13863 for (i = 0; i < els; i++)
13864 {
13865 unsigned thisarg = types[i];
13866 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13867 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13868 enum neon_el_type g_type = inst.vectype.el[i].type;
13869 unsigned g_size = inst.vectype.el[i].size;
13870
13871 /* Decay more-specific signed & unsigned types to sign-insensitive
13872 integer types if sign-specific variants are unavailable. */
13873 if ((g_type == NT_signed || g_type == NT_unsigned)
13874 && (types_allowed & N_SU_ALL) == 0)
13875 g_type = NT_integer;
13876
13877 /* If only untyped args are allowed, decay any more specific types to
13878 them. Some instructions only care about signs for some element
13879 sizes, so handle that properly. */
13880 if (((types_allowed & N_UNT) == 0)
13881 && ((g_size == 8 && (types_allowed & N_8) != 0)
13882 || (g_size == 16 && (types_allowed & N_16) != 0)
13883 || (g_size == 32 && (types_allowed & N_32) != 0)
13884 || (g_size == 64 && (types_allowed & N_64) != 0)))
13885 g_type = NT_untyped;
13886
13887 if (pass == 0)
13888 {
13889 if ((thisarg & N_KEY) != 0)
13890 {
13891 k_type = g_type;
13892 k_size = g_size;
13893 key_allowed = thisarg & ~N_KEY;
13894 }
13895 }
13896 else
13897 {
13898 if ((thisarg & N_VFP) != 0)
13899 {
13900 enum neon_shape_el regshape;
13901 unsigned regwidth, match;
13902
13903 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13904 if (ns == NS_NULL)
13905 {
13906 first_error (_("invalid instruction shape"));
13907 return badtype;
13908 }
13909 regshape = neon_shape_tab[ns].el[i];
13910 regwidth = neon_shape_el_size[regshape];
13911
13912 /* In VFP mode, operands must match register widths. If we
13913 have a key operand, use its width, else use the width of
13914 the current operand. */
13915 if (k_size != -1u)
13916 match = k_size;
13917 else
13918 match = g_size;
13919
13920 /* FP16 will use a single precision register. */
13921 if (regwidth == 32 && match == 16)
13922 {
13923 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13924 match = regwidth;
13925 else
13926 {
13927 inst.error = _(BAD_FP16);
13928 return badtype;
13929 }
13930 }
13931
13932 if (regwidth != match)
13933 {
13934 first_error (_("operand size must match register width"));
13935 return badtype;
13936 }
13937 }
13938
13939 if ((thisarg & N_EQK) == 0)
13940 {
13941 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13942
13943 if ((given_type & types_allowed) == 0)
13944 {
13945 first_error (_("bad type in Neon instruction"));
13946 return badtype;
13947 }
13948 }
13949 else
13950 {
13951 enum neon_el_type mod_k_type = k_type;
13952 unsigned mod_k_size = k_size;
13953 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13954 if (g_type != mod_k_type || g_size != mod_k_size)
13955 {
13956 first_error (_("inconsistent types in Neon instruction"));
13957 return badtype;
13958 }
13959 }
13960 }
13961 }
13962 }
13963
13964 return inst.vectype.el[key_el];
13965 }
13966
13967 /* Neon-style VFP instruction forwarding. */
13968
13969 /* Thumb VFP instructions have 0xE in the condition field. */
13970
13971 static void
13972 do_vfp_cond_or_thumb (void)
13973 {
13974 inst.is_neon = 1;
13975
13976 if (thumb_mode)
13977 inst.instruction |= 0xe0000000;
13978 else
13979 inst.instruction |= inst.cond << 28;
13980 }
13981
13982 /* Look up and encode a simple mnemonic, for use as a helper function for the
13983 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13984 etc. It is assumed that operand parsing has already been done, and that the
13985 operands are in the form expected by the given opcode (this isn't necessarily
13986 the same as the form in which they were parsed, hence some massaging must
13987 take place before this function is called).
13988 Checks current arch version against that in the looked-up opcode. */
13989
13990 static void
13991 do_vfp_nsyn_opcode (const char *opname)
13992 {
13993 const struct asm_opcode *opcode;
13994
13995 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13996
13997 if (!opcode)
13998 abort ();
13999
14000 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14001 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14002 _(BAD_FPU));
14003
14004 inst.is_neon = 1;
14005
14006 if (thumb_mode)
14007 {
14008 inst.instruction = opcode->tvalue;
14009 opcode->tencode ();
14010 }
14011 else
14012 {
14013 inst.instruction = (inst.cond << 28) | opcode->avalue;
14014 opcode->aencode ();
14015 }
14016 }
14017
14018 static void
14019 do_vfp_nsyn_add_sub (enum neon_shape rs)
14020 {
14021 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14022
14023 if (rs == NS_FFF || rs == NS_HHH)
14024 {
14025 if (is_add)
14026 do_vfp_nsyn_opcode ("fadds");
14027 else
14028 do_vfp_nsyn_opcode ("fsubs");
14029
14030 /* ARMv8.2 fp16 instruction. */
14031 if (rs == NS_HHH)
14032 do_scalar_fp16_v82_encode ();
14033 }
14034 else
14035 {
14036 if (is_add)
14037 do_vfp_nsyn_opcode ("faddd");
14038 else
14039 do_vfp_nsyn_opcode ("fsubd");
14040 }
14041 }
14042
14043 /* Check operand types to see if this is a VFP instruction, and if so call
14044 PFN (). */
14045
14046 static int
14047 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14048 {
14049 enum neon_shape rs;
14050 struct neon_type_el et;
14051
14052 switch (args)
14053 {
14054 case 2:
14055 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14056 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14057 break;
14058
14059 case 3:
14060 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14061 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14062 N_F_ALL | N_KEY | N_VFP);
14063 break;
14064
14065 default:
14066 abort ();
14067 }
14068
14069 if (et.type != NT_invtype)
14070 {
14071 pfn (rs);
14072 return SUCCESS;
14073 }
14074
14075 inst.error = NULL;
14076 return FAIL;
14077 }
14078
14079 static void
14080 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14081 {
14082 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14083
14084 if (rs == NS_FFF || rs == NS_HHH)
14085 {
14086 if (is_mla)
14087 do_vfp_nsyn_opcode ("fmacs");
14088 else
14089 do_vfp_nsyn_opcode ("fnmacs");
14090
14091 /* ARMv8.2 fp16 instruction. */
14092 if (rs == NS_HHH)
14093 do_scalar_fp16_v82_encode ();
14094 }
14095 else
14096 {
14097 if (is_mla)
14098 do_vfp_nsyn_opcode ("fmacd");
14099 else
14100 do_vfp_nsyn_opcode ("fnmacd");
14101 }
14102 }
14103
14104 static void
14105 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14106 {
14107 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14108
14109 if (rs == NS_FFF || rs == NS_HHH)
14110 {
14111 if (is_fma)
14112 do_vfp_nsyn_opcode ("ffmas");
14113 else
14114 do_vfp_nsyn_opcode ("ffnmas");
14115
14116 /* ARMv8.2 fp16 instruction. */
14117 if (rs == NS_HHH)
14118 do_scalar_fp16_v82_encode ();
14119 }
14120 else
14121 {
14122 if (is_fma)
14123 do_vfp_nsyn_opcode ("ffmad");
14124 else
14125 do_vfp_nsyn_opcode ("ffnmad");
14126 }
14127 }
14128
14129 static void
14130 do_vfp_nsyn_mul (enum neon_shape rs)
14131 {
14132 if (rs == NS_FFF || rs == NS_HHH)
14133 {
14134 do_vfp_nsyn_opcode ("fmuls");
14135
14136 /* ARMv8.2 fp16 instruction. */
14137 if (rs == NS_HHH)
14138 do_scalar_fp16_v82_encode ();
14139 }
14140 else
14141 do_vfp_nsyn_opcode ("fmuld");
14142 }
14143
14144 static void
14145 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14146 {
14147 int is_neg = (inst.instruction & 0x80) != 0;
14148 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14149
14150 if (rs == NS_FF || rs == NS_HH)
14151 {
14152 if (is_neg)
14153 do_vfp_nsyn_opcode ("fnegs");
14154 else
14155 do_vfp_nsyn_opcode ("fabss");
14156
14157 /* ARMv8.2 fp16 instruction. */
14158 if (rs == NS_HH)
14159 do_scalar_fp16_v82_encode ();
14160 }
14161 else
14162 {
14163 if (is_neg)
14164 do_vfp_nsyn_opcode ("fnegd");
14165 else
14166 do_vfp_nsyn_opcode ("fabsd");
14167 }
14168 }
14169
14170 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14171 insns belong to Neon, and are handled elsewhere. */
14172
14173 static void
14174 do_vfp_nsyn_ldm_stm (int is_dbmode)
14175 {
14176 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14177 if (is_ldm)
14178 {
14179 if (is_dbmode)
14180 do_vfp_nsyn_opcode ("fldmdbs");
14181 else
14182 do_vfp_nsyn_opcode ("fldmias");
14183 }
14184 else
14185 {
14186 if (is_dbmode)
14187 do_vfp_nsyn_opcode ("fstmdbs");
14188 else
14189 do_vfp_nsyn_opcode ("fstmias");
14190 }
14191 }
14192
14193 static void
14194 do_vfp_nsyn_sqrt (void)
14195 {
14196 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14197 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14198
14199 if (rs == NS_FF || rs == NS_HH)
14200 {
14201 do_vfp_nsyn_opcode ("fsqrts");
14202
14203 /* ARMv8.2 fp16 instruction. */
14204 if (rs == NS_HH)
14205 do_scalar_fp16_v82_encode ();
14206 }
14207 else
14208 do_vfp_nsyn_opcode ("fsqrtd");
14209 }
14210
14211 static void
14212 do_vfp_nsyn_div (void)
14213 {
14214 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14215 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14216 N_F_ALL | N_KEY | N_VFP);
14217
14218 if (rs == NS_FFF || rs == NS_HHH)
14219 {
14220 do_vfp_nsyn_opcode ("fdivs");
14221
14222 /* ARMv8.2 fp16 instruction. */
14223 if (rs == NS_HHH)
14224 do_scalar_fp16_v82_encode ();
14225 }
14226 else
14227 do_vfp_nsyn_opcode ("fdivd");
14228 }
14229
14230 static void
14231 do_vfp_nsyn_nmul (void)
14232 {
14233 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14234 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14235 N_F_ALL | N_KEY | N_VFP);
14236
14237 if (rs == NS_FFF || rs == NS_HHH)
14238 {
14239 NEON_ENCODE (SINGLE, inst);
14240 do_vfp_sp_dyadic ();
14241
14242 /* ARMv8.2 fp16 instruction. */
14243 if (rs == NS_HHH)
14244 do_scalar_fp16_v82_encode ();
14245 }
14246 else
14247 {
14248 NEON_ENCODE (DOUBLE, inst);
14249 do_vfp_dp_rd_rn_rm ();
14250 }
14251 do_vfp_cond_or_thumb ();
14252
14253 }
14254
14255 static void
14256 do_vfp_nsyn_cmp (void)
14257 {
14258 enum neon_shape rs;
14259 if (inst.operands[1].isreg)
14260 {
14261 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14262 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14263
14264 if (rs == NS_FF || rs == NS_HH)
14265 {
14266 NEON_ENCODE (SINGLE, inst);
14267 do_vfp_sp_monadic ();
14268 }
14269 else
14270 {
14271 NEON_ENCODE (DOUBLE, inst);
14272 do_vfp_dp_rd_rm ();
14273 }
14274 }
14275 else
14276 {
14277 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14278 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14279
14280 switch (inst.instruction & 0x0fffffff)
14281 {
14282 case N_MNEM_vcmp:
14283 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14284 break;
14285 case N_MNEM_vcmpe:
14286 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14287 break;
14288 default:
14289 abort ();
14290 }
14291
14292 if (rs == NS_FI || rs == NS_HI)
14293 {
14294 NEON_ENCODE (SINGLE, inst);
14295 do_vfp_sp_compare_z ();
14296 }
14297 else
14298 {
14299 NEON_ENCODE (DOUBLE, inst);
14300 do_vfp_dp_rd ();
14301 }
14302 }
14303 do_vfp_cond_or_thumb ();
14304
14305 /* ARMv8.2 fp16 instruction. */
14306 if (rs == NS_HI || rs == NS_HH)
14307 do_scalar_fp16_v82_encode ();
14308 }
14309
14310 static void
14311 nsyn_insert_sp (void)
14312 {
14313 inst.operands[1] = inst.operands[0];
14314 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14315 inst.operands[0].reg = REG_SP;
14316 inst.operands[0].isreg = 1;
14317 inst.operands[0].writeback = 1;
14318 inst.operands[0].present = 1;
14319 }
14320
14321 static void
14322 do_vfp_nsyn_push (void)
14323 {
14324 nsyn_insert_sp ();
14325 if (inst.operands[1].issingle)
14326 do_vfp_nsyn_opcode ("fstmdbs");
14327 else
14328 do_vfp_nsyn_opcode ("fstmdbd");
14329 }
14330
14331 static void
14332 do_vfp_nsyn_pop (void)
14333 {
14334 nsyn_insert_sp ();
14335 if (inst.operands[1].issingle)
14336 do_vfp_nsyn_opcode ("fldmias");
14337 else
14338 do_vfp_nsyn_opcode ("fldmiad");
14339 }
14340
14341 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14342 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14343
14344 static void
14345 neon_dp_fixup (struct arm_it* insn)
14346 {
14347 unsigned int i = insn->instruction;
14348 insn->is_neon = 1;
14349
14350 if (thumb_mode)
14351 {
14352 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14353 if (i & (1 << 24))
14354 i |= 1 << 28;
14355
14356 i &= ~(1 << 24);
14357
14358 i |= 0xef000000;
14359 }
14360 else
14361 i |= 0xf2000000;
14362
14363 insn->instruction = i;
14364 }
14365
14366 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14367 (0, 1, 2, 3). */
14368
14369 static unsigned
14370 neon_logbits (unsigned x)
14371 {
14372 return ffs (x) - 4;
14373 }
14374
14375 #define LOW4(R) ((R) & 0xf)
14376 #define HI1(R) (((R) >> 4) & 1)
14377
14378 /* Encode insns with bit pattern:
14379
14380 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14381 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14382
14383 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14384 different meaning for some instruction. */
14385
14386 static void
14387 neon_three_same (int isquad, int ubit, int size)
14388 {
14389 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14390 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14391 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14392 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14393 inst.instruction |= LOW4 (inst.operands[2].reg);
14394 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14395 inst.instruction |= (isquad != 0) << 6;
14396 inst.instruction |= (ubit != 0) << 24;
14397 if (size != -1)
14398 inst.instruction |= neon_logbits (size) << 20;
14399
14400 neon_dp_fixup (&inst);
14401 }
14402
14403 /* Encode instructions of the form:
14404
14405 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14406 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14407
14408 Don't write size if SIZE == -1. */
14409
14410 static void
14411 neon_two_same (int qbit, int ubit, int size)
14412 {
14413 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14414 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14415 inst.instruction |= LOW4 (inst.operands[1].reg);
14416 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14417 inst.instruction |= (qbit != 0) << 6;
14418 inst.instruction |= (ubit != 0) << 24;
14419
14420 if (size != -1)
14421 inst.instruction |= neon_logbits (size) << 18;
14422
14423 neon_dp_fixup (&inst);
14424 }
14425
14426 /* Neon instruction encoders, in approximate order of appearance. */
14427
14428 static void
14429 do_neon_dyadic_i_su (void)
14430 {
14431 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14432 struct neon_type_el et = neon_check_type (3, rs,
14433 N_EQK, N_EQK, N_SU_32 | N_KEY);
14434 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14435 }
14436
14437 static void
14438 do_neon_dyadic_i64_su (void)
14439 {
14440 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14441 struct neon_type_el et = neon_check_type (3, rs,
14442 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14443 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14444 }
14445
14446 static void
14447 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14448 unsigned immbits)
14449 {
14450 unsigned size = et.size >> 3;
14451 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14452 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14453 inst.instruction |= LOW4 (inst.operands[1].reg);
14454 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14455 inst.instruction |= (isquad != 0) << 6;
14456 inst.instruction |= immbits << 16;
14457 inst.instruction |= (size >> 3) << 7;
14458 inst.instruction |= (size & 0x7) << 19;
14459 if (write_ubit)
14460 inst.instruction |= (uval != 0) << 24;
14461
14462 neon_dp_fixup (&inst);
14463 }
14464
14465 static void
14466 do_neon_shl_imm (void)
14467 {
14468 if (!inst.operands[2].isreg)
14469 {
14470 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14471 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14472 int imm = inst.operands[2].imm;
14473
14474 constraint (imm < 0 || (unsigned)imm >= et.size,
14475 _("immediate out of range for shift"));
14476 NEON_ENCODE (IMMED, inst);
14477 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14478 }
14479 else
14480 {
14481 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14482 struct neon_type_el et = neon_check_type (3, rs,
14483 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14484 unsigned int tmp;
14485
14486 /* VSHL/VQSHL 3-register variants have syntax such as:
14487 vshl.xx Dd, Dm, Dn
14488 whereas other 3-register operations encoded by neon_three_same have
14489 syntax like:
14490 vadd.xx Dd, Dn, Dm
14491 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14492 here. */
14493 tmp = inst.operands[2].reg;
14494 inst.operands[2].reg = inst.operands[1].reg;
14495 inst.operands[1].reg = tmp;
14496 NEON_ENCODE (INTEGER, inst);
14497 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14498 }
14499 }
14500
14501 static void
14502 do_neon_qshl_imm (void)
14503 {
14504 if (!inst.operands[2].isreg)
14505 {
14506 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14507 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14508 int imm = inst.operands[2].imm;
14509
14510 constraint (imm < 0 || (unsigned)imm >= et.size,
14511 _("immediate out of range for shift"));
14512 NEON_ENCODE (IMMED, inst);
14513 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14514 }
14515 else
14516 {
14517 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14518 struct neon_type_el et = neon_check_type (3, rs,
14519 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14520 unsigned int tmp;
14521
14522 /* See note in do_neon_shl_imm. */
14523 tmp = inst.operands[2].reg;
14524 inst.operands[2].reg = inst.operands[1].reg;
14525 inst.operands[1].reg = tmp;
14526 NEON_ENCODE (INTEGER, inst);
14527 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14528 }
14529 }
14530
14531 static void
14532 do_neon_rshl (void)
14533 {
14534 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14535 struct neon_type_el et = neon_check_type (3, rs,
14536 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14537 unsigned int tmp;
14538
14539 tmp = inst.operands[2].reg;
14540 inst.operands[2].reg = inst.operands[1].reg;
14541 inst.operands[1].reg = tmp;
14542 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14543 }
14544
14545 static int
14546 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14547 {
14548 /* Handle .I8 pseudo-instructions. */
14549 if (size == 8)
14550 {
14551 /* Unfortunately, this will make everything apart from zero out-of-range.
14552 FIXME is this the intended semantics? There doesn't seem much point in
14553 accepting .I8 if so. */
14554 immediate |= immediate << 8;
14555 size = 16;
14556 }
14557
14558 if (size >= 32)
14559 {
14560 if (immediate == (immediate & 0x000000ff))
14561 {
14562 *immbits = immediate;
14563 return 0x1;
14564 }
14565 else if (immediate == (immediate & 0x0000ff00))
14566 {
14567 *immbits = immediate >> 8;
14568 return 0x3;
14569 }
14570 else if (immediate == (immediate & 0x00ff0000))
14571 {
14572 *immbits = immediate >> 16;
14573 return 0x5;
14574 }
14575 else if (immediate == (immediate & 0xff000000))
14576 {
14577 *immbits = immediate >> 24;
14578 return 0x7;
14579 }
14580 if ((immediate & 0xffff) != (immediate >> 16))
14581 goto bad_immediate;
14582 immediate &= 0xffff;
14583 }
14584
14585 if (immediate == (immediate & 0x000000ff))
14586 {
14587 *immbits = immediate;
14588 return 0x9;
14589 }
14590 else if (immediate == (immediate & 0x0000ff00))
14591 {
14592 *immbits = immediate >> 8;
14593 return 0xb;
14594 }
14595
14596 bad_immediate:
14597 first_error (_("immediate value out of range"));
14598 return FAIL;
14599 }
14600
14601 static void
14602 do_neon_logic (void)
14603 {
14604 if (inst.operands[2].present && inst.operands[2].isreg)
14605 {
14606 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14607 neon_check_type (3, rs, N_IGNORE_TYPE);
14608 /* U bit and size field were set as part of the bitmask. */
14609 NEON_ENCODE (INTEGER, inst);
14610 neon_three_same (neon_quad (rs), 0, -1);
14611 }
14612 else
14613 {
14614 const int three_ops_form = (inst.operands[2].present
14615 && !inst.operands[2].isreg);
14616 const int immoperand = (three_ops_form ? 2 : 1);
14617 enum neon_shape rs = (three_ops_form
14618 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14619 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14620 struct neon_type_el et = neon_check_type (2, rs,
14621 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14622 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14623 unsigned immbits;
14624 int cmode;
14625
14626 if (et.type == NT_invtype)
14627 return;
14628
14629 if (three_ops_form)
14630 constraint (inst.operands[0].reg != inst.operands[1].reg,
14631 _("first and second operands shall be the same register"));
14632
14633 NEON_ENCODE (IMMED, inst);
14634
14635 immbits = inst.operands[immoperand].imm;
14636 if (et.size == 64)
14637 {
14638 /* .i64 is a pseudo-op, so the immediate must be a repeating
14639 pattern. */
14640 if (immbits != (inst.operands[immoperand].regisimm ?
14641 inst.operands[immoperand].reg : 0))
14642 {
14643 /* Set immbits to an invalid constant. */
14644 immbits = 0xdeadbeef;
14645 }
14646 }
14647
14648 switch (opcode)
14649 {
14650 case N_MNEM_vbic:
14651 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14652 break;
14653
14654 case N_MNEM_vorr:
14655 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14656 break;
14657
14658 case N_MNEM_vand:
14659 /* Pseudo-instruction for VBIC. */
14660 neon_invert_size (&immbits, 0, et.size);
14661 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14662 break;
14663
14664 case N_MNEM_vorn:
14665 /* Pseudo-instruction for VORR. */
14666 neon_invert_size (&immbits, 0, et.size);
14667 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14668 break;
14669
14670 default:
14671 abort ();
14672 }
14673
14674 if (cmode == FAIL)
14675 return;
14676
14677 inst.instruction |= neon_quad (rs) << 6;
14678 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14679 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14680 inst.instruction |= cmode << 8;
14681 neon_write_immbits (immbits);
14682
14683 neon_dp_fixup (&inst);
14684 }
14685 }
14686
14687 static void
14688 do_neon_bitfield (void)
14689 {
14690 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14691 neon_check_type (3, rs, N_IGNORE_TYPE);
14692 neon_three_same (neon_quad (rs), 0, -1);
14693 }
14694
14695 static void
14696 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14697 unsigned destbits)
14698 {
14699 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14700 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14701 types | N_KEY);
14702 if (et.type == NT_float)
14703 {
14704 NEON_ENCODE (FLOAT, inst);
14705 neon_three_same (neon_quad (rs), 0, -1);
14706 }
14707 else
14708 {
14709 NEON_ENCODE (INTEGER, inst);
14710 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14711 }
14712 }
14713
14714 static void
14715 do_neon_dyadic_if_su (void)
14716 {
14717 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14718 }
14719
14720 static void
14721 do_neon_dyadic_if_su_d (void)
14722 {
14723 /* This version only allow D registers, but that constraint is enforced during
14724 operand parsing so we don't need to do anything extra here. */
14725 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14726 }
14727
14728 static void
14729 do_neon_dyadic_if_i_d (void)
14730 {
14731 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14732 affected if we specify unsigned args. */
14733 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14734 }
14735
14736 enum vfp_or_neon_is_neon_bits
14737 {
14738 NEON_CHECK_CC = 1,
14739 NEON_CHECK_ARCH = 2,
14740 NEON_CHECK_ARCH8 = 4
14741 };
14742
14743 /* Call this function if an instruction which may have belonged to the VFP or
14744 Neon instruction sets, but turned out to be a Neon instruction (due to the
14745 operand types involved, etc.). We have to check and/or fix-up a couple of
14746 things:
14747
14748 - Make sure the user hasn't attempted to make a Neon instruction
14749 conditional.
14750 - Alter the value in the condition code field if necessary.
14751 - Make sure that the arch supports Neon instructions.
14752
14753 Which of these operations take place depends on bits from enum
14754 vfp_or_neon_is_neon_bits.
14755
14756 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14757 current instruction's condition is COND_ALWAYS, the condition field is
14758 changed to inst.uncond_value. This is necessary because instructions shared
14759 between VFP and Neon may be conditional for the VFP variants only, and the
14760 unconditional Neon version must have, e.g., 0xF in the condition field. */
14761
14762 static int
14763 vfp_or_neon_is_neon (unsigned check)
14764 {
14765 /* Conditions are always legal in Thumb mode (IT blocks). */
14766 if (!thumb_mode && (check & NEON_CHECK_CC))
14767 {
14768 if (inst.cond != COND_ALWAYS)
14769 {
14770 first_error (_(BAD_COND));
14771 return FAIL;
14772 }
14773 if (inst.uncond_value != -1)
14774 inst.instruction |= inst.uncond_value << 28;
14775 }
14776
14777 if ((check & NEON_CHECK_ARCH)
14778 && !mark_feature_used (&fpu_neon_ext_v1))
14779 {
14780 first_error (_(BAD_FPU));
14781 return FAIL;
14782 }
14783
14784 if ((check & NEON_CHECK_ARCH8)
14785 && !mark_feature_used (&fpu_neon_ext_armv8))
14786 {
14787 first_error (_(BAD_FPU));
14788 return FAIL;
14789 }
14790
14791 return SUCCESS;
14792 }
14793
14794 static void
14795 do_neon_addsub_if_i (void)
14796 {
14797 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14798 return;
14799
14800 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14801 return;
14802
14803 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14804 affected if we specify unsigned args. */
14805 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14806 }
14807
14808 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14809 result to be:
14810 V<op> A,B (A is operand 0, B is operand 2)
14811 to mean:
14812 V<op> A,B,A
14813 not:
14814 V<op> A,B,B
14815 so handle that case specially. */
14816
14817 static void
14818 neon_exchange_operands (void)
14819 {
14820 void *scratch = alloca (sizeof (inst.operands[0]));
14821 if (inst.operands[1].present)
14822 {
14823 /* Swap operands[1] and operands[2]. */
14824 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14825 inst.operands[1] = inst.operands[2];
14826 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14827 }
14828 else
14829 {
14830 inst.operands[1] = inst.operands[2];
14831 inst.operands[2] = inst.operands[0];
14832 }
14833 }
14834
14835 static void
14836 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14837 {
14838 if (inst.operands[2].isreg)
14839 {
14840 if (invert)
14841 neon_exchange_operands ();
14842 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14843 }
14844 else
14845 {
14846 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14847 struct neon_type_el et = neon_check_type (2, rs,
14848 N_EQK | N_SIZ, immtypes | N_KEY);
14849
14850 NEON_ENCODE (IMMED, inst);
14851 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14852 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14853 inst.instruction |= LOW4 (inst.operands[1].reg);
14854 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14855 inst.instruction |= neon_quad (rs) << 6;
14856 inst.instruction |= (et.type == NT_float) << 10;
14857 inst.instruction |= neon_logbits (et.size) << 18;
14858
14859 neon_dp_fixup (&inst);
14860 }
14861 }
14862
14863 static void
14864 do_neon_cmp (void)
14865 {
14866 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14867 }
14868
14869 static void
14870 do_neon_cmp_inv (void)
14871 {
14872 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14873 }
14874
14875 static void
14876 do_neon_ceq (void)
14877 {
14878 neon_compare (N_IF_32, N_IF_32, FALSE);
14879 }
14880
14881 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14882 scalars, which are encoded in 5 bits, M : Rm.
14883 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14884 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14885 index in M. */
14886
14887 static unsigned
14888 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14889 {
14890 unsigned regno = NEON_SCALAR_REG (scalar);
14891 unsigned elno = NEON_SCALAR_INDEX (scalar);
14892
14893 switch (elsize)
14894 {
14895 case 16:
14896 if (regno > 7 || elno > 3)
14897 goto bad_scalar;
14898 return regno | (elno << 3);
14899
14900 case 32:
14901 if (regno > 15 || elno > 1)
14902 goto bad_scalar;
14903 return regno | (elno << 4);
14904
14905 default:
14906 bad_scalar:
14907 first_error (_("scalar out of range for multiply instruction"));
14908 }
14909
14910 return 0;
14911 }
14912
14913 /* Encode multiply / multiply-accumulate scalar instructions. */
14914
14915 static void
14916 neon_mul_mac (struct neon_type_el et, int ubit)
14917 {
14918 unsigned scalar;
14919
14920 /* Give a more helpful error message if we have an invalid type. */
14921 if (et.type == NT_invtype)
14922 return;
14923
14924 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14925 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14926 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14927 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14928 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14929 inst.instruction |= LOW4 (scalar);
14930 inst.instruction |= HI1 (scalar) << 5;
14931 inst.instruction |= (et.type == NT_float) << 8;
14932 inst.instruction |= neon_logbits (et.size) << 20;
14933 inst.instruction |= (ubit != 0) << 24;
14934
14935 neon_dp_fixup (&inst);
14936 }
14937
14938 static void
14939 do_neon_mac_maybe_scalar (void)
14940 {
14941 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14942 return;
14943
14944 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14945 return;
14946
14947 if (inst.operands[2].isscalar)
14948 {
14949 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14950 struct neon_type_el et = neon_check_type (3, rs,
14951 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14952 NEON_ENCODE (SCALAR, inst);
14953 neon_mul_mac (et, neon_quad (rs));
14954 }
14955 else
14956 {
14957 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14958 affected if we specify unsigned args. */
14959 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14960 }
14961 }
14962
14963 static void
14964 do_neon_fmac (void)
14965 {
14966 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14967 return;
14968
14969 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14970 return;
14971
14972 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14973 }
14974
14975 static void
14976 do_neon_tst (void)
14977 {
14978 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14979 struct neon_type_el et = neon_check_type (3, rs,
14980 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14981 neon_three_same (neon_quad (rs), 0, et.size);
14982 }
14983
14984 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14985 same types as the MAC equivalents. The polynomial type for this instruction
14986 is encoded the same as the integer type. */
14987
14988 static void
14989 do_neon_mul (void)
14990 {
14991 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14992 return;
14993
14994 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14995 return;
14996
14997 if (inst.operands[2].isscalar)
14998 do_neon_mac_maybe_scalar ();
14999 else
15000 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
15001 }
15002
15003 static void
15004 do_neon_qdmulh (void)
15005 {
15006 if (inst.operands[2].isscalar)
15007 {
15008 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15009 struct neon_type_el et = neon_check_type (3, rs,
15010 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15011 NEON_ENCODE (SCALAR, inst);
15012 neon_mul_mac (et, neon_quad (rs));
15013 }
15014 else
15015 {
15016 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15017 struct neon_type_el et = neon_check_type (3, rs,
15018 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15019 NEON_ENCODE (INTEGER, inst);
15020 /* The U bit (rounding) comes from bit mask. */
15021 neon_three_same (neon_quad (rs), 0, et.size);
15022 }
15023 }
15024
15025 static void
15026 do_neon_fcmp_absolute (void)
15027 {
15028 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15029 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
15030 /* Size field comes from bit mask. */
15031 neon_three_same (neon_quad (rs), 1, -1);
15032 }
15033
15034 static void
15035 do_neon_fcmp_absolute_inv (void)
15036 {
15037 neon_exchange_operands ();
15038 do_neon_fcmp_absolute ();
15039 }
15040
15041 static void
15042 do_neon_step (void)
15043 {
15044 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15045 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
15046 neon_three_same (neon_quad (rs), 0, -1);
15047 }
15048
15049 static void
15050 do_neon_abs_neg (void)
15051 {
15052 enum neon_shape rs;
15053 struct neon_type_el et;
15054
15055 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15056 return;
15057
15058 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15059 return;
15060
15061 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15062 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
15063
15064 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15065 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15066 inst.instruction |= LOW4 (inst.operands[1].reg);
15067 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15068 inst.instruction |= neon_quad (rs) << 6;
15069 inst.instruction |= (et.type == NT_float) << 10;
15070 inst.instruction |= neon_logbits (et.size) << 18;
15071
15072 neon_dp_fixup (&inst);
15073 }
15074
15075 static void
15076 do_neon_sli (void)
15077 {
15078 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15079 struct neon_type_el et = neon_check_type (2, rs,
15080 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15081 int imm = inst.operands[2].imm;
15082 constraint (imm < 0 || (unsigned)imm >= et.size,
15083 _("immediate out of range for insert"));
15084 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15085 }
15086
15087 static void
15088 do_neon_sri (void)
15089 {
15090 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15091 struct neon_type_el et = neon_check_type (2, rs,
15092 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15093 int imm = inst.operands[2].imm;
15094 constraint (imm < 1 || (unsigned)imm > et.size,
15095 _("immediate out of range for insert"));
15096 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15097 }
15098
15099 static void
15100 do_neon_qshlu_imm (void)
15101 {
15102 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15103 struct neon_type_el et = neon_check_type (2, rs,
15104 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15105 int imm = inst.operands[2].imm;
15106 constraint (imm < 0 || (unsigned)imm >= et.size,
15107 _("immediate out of range for shift"));
15108 /* Only encodes the 'U present' variant of the instruction.
15109 In this case, signed types have OP (bit 8) set to 0.
15110 Unsigned types have OP set to 1. */
15111 inst.instruction |= (et.type == NT_unsigned) << 8;
15112 /* The rest of the bits are the same as other immediate shifts. */
15113 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15114 }
15115
15116 static void
15117 do_neon_qmovn (void)
15118 {
15119 struct neon_type_el et = neon_check_type (2, NS_DQ,
15120 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15121 /* Saturating move where operands can be signed or unsigned, and the
15122 destination has the same signedness. */
15123 NEON_ENCODE (INTEGER, inst);
15124 if (et.type == NT_unsigned)
15125 inst.instruction |= 0xc0;
15126 else
15127 inst.instruction |= 0x80;
15128 neon_two_same (0, 1, et.size / 2);
15129 }
15130
15131 static void
15132 do_neon_qmovun (void)
15133 {
15134 struct neon_type_el et = neon_check_type (2, NS_DQ,
15135 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15136 /* Saturating move with unsigned results. Operands must be signed. */
15137 NEON_ENCODE (INTEGER, inst);
15138 neon_two_same (0, 1, et.size / 2);
15139 }
15140
15141 static void
15142 do_neon_rshift_sat_narrow (void)
15143 {
15144 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15145 or unsigned. If operands are unsigned, results must also be unsigned. */
15146 struct neon_type_el et = neon_check_type (2, NS_DQI,
15147 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15148 int imm = inst.operands[2].imm;
15149 /* This gets the bounds check, size encoding and immediate bits calculation
15150 right. */
15151 et.size /= 2;
15152
15153 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15154 VQMOVN.I<size> <Dd>, <Qm>. */
15155 if (imm == 0)
15156 {
15157 inst.operands[2].present = 0;
15158 inst.instruction = N_MNEM_vqmovn;
15159 do_neon_qmovn ();
15160 return;
15161 }
15162
15163 constraint (imm < 1 || (unsigned)imm > et.size,
15164 _("immediate out of range"));
15165 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15166 }
15167
15168 static void
15169 do_neon_rshift_sat_narrow_u (void)
15170 {
15171 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15172 or unsigned. If operands are unsigned, results must also be unsigned. */
15173 struct neon_type_el et = neon_check_type (2, NS_DQI,
15174 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15175 int imm = inst.operands[2].imm;
15176 /* This gets the bounds check, size encoding and immediate bits calculation
15177 right. */
15178 et.size /= 2;
15179
15180 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15181 VQMOVUN.I<size> <Dd>, <Qm>. */
15182 if (imm == 0)
15183 {
15184 inst.operands[2].present = 0;
15185 inst.instruction = N_MNEM_vqmovun;
15186 do_neon_qmovun ();
15187 return;
15188 }
15189
15190 constraint (imm < 1 || (unsigned)imm > et.size,
15191 _("immediate out of range"));
15192 /* FIXME: The manual is kind of unclear about what value U should have in
15193 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15194 must be 1. */
15195 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15196 }
15197
15198 static void
15199 do_neon_movn (void)
15200 {
15201 struct neon_type_el et = neon_check_type (2, NS_DQ,
15202 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15203 NEON_ENCODE (INTEGER, inst);
15204 neon_two_same (0, 1, et.size / 2);
15205 }
15206
15207 static void
15208 do_neon_rshift_narrow (void)
15209 {
15210 struct neon_type_el et = neon_check_type (2, NS_DQI,
15211 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15212 int imm = inst.operands[2].imm;
15213 /* This gets the bounds check, size encoding and immediate bits calculation
15214 right. */
15215 et.size /= 2;
15216
15217 /* If immediate is zero then we are a pseudo-instruction for
15218 VMOVN.I<size> <Dd>, <Qm> */
15219 if (imm == 0)
15220 {
15221 inst.operands[2].present = 0;
15222 inst.instruction = N_MNEM_vmovn;
15223 do_neon_movn ();
15224 return;
15225 }
15226
15227 constraint (imm < 1 || (unsigned)imm > et.size,
15228 _("immediate out of range for narrowing operation"));
15229 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15230 }
15231
15232 static void
15233 do_neon_shll (void)
15234 {
15235 /* FIXME: Type checking when lengthening. */
15236 struct neon_type_el et = neon_check_type (2, NS_QDI,
15237 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15238 unsigned imm = inst.operands[2].imm;
15239
15240 if (imm == et.size)
15241 {
15242 /* Maximum shift variant. */
15243 NEON_ENCODE (INTEGER, inst);
15244 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15245 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15246 inst.instruction |= LOW4 (inst.operands[1].reg);
15247 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15248 inst.instruction |= neon_logbits (et.size) << 18;
15249
15250 neon_dp_fixup (&inst);
15251 }
15252 else
15253 {
15254 /* A more-specific type check for non-max versions. */
15255 et = neon_check_type (2, NS_QDI,
15256 N_EQK | N_DBL, N_SU_32 | N_KEY);
15257 NEON_ENCODE (IMMED, inst);
15258 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15259 }
15260 }
15261
15262 /* Check the various types for the VCVT instruction, and return which version
15263 the current instruction is. */
15264
15265 #define CVT_FLAVOUR_VAR \
15266 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15267 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15268 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15269 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15270 /* Half-precision conversions. */ \
15271 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15272 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15273 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15274 Compared with single/double precision variants, only the co-processor \
15275 field is different, so the encoding flow is reused here. */ \
15276 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15277 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15278 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15279 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15280 /* VFP instructions. */ \
15281 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15282 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15283 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15284 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15285 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15286 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15287 /* VFP instructions with bitshift. */ \
15288 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15289 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15290 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15291 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15292 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15293 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15294 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15295 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15296
15297 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15298 neon_cvt_flavour_##C,
15299
15300 /* The different types of conversions we can do. */
15301 enum neon_cvt_flavour
15302 {
15303 CVT_FLAVOUR_VAR
15304 neon_cvt_flavour_invalid,
15305 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15306 };
15307
15308 #undef CVT_VAR
15309
15310 static enum neon_cvt_flavour
15311 get_neon_cvt_flavour (enum neon_shape rs)
15312 {
15313 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15314 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15315 if (et.type != NT_invtype) \
15316 { \
15317 inst.error = NULL; \
15318 return (neon_cvt_flavour_##C); \
15319 }
15320
15321 struct neon_type_el et;
15322 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15323 || rs == NS_FF) ? N_VFP : 0;
15324 /* The instruction versions which take an immediate take one register
15325 argument, which is extended to the width of the full register. Thus the
15326 "source" and "destination" registers must have the same width. Hack that
15327 here by making the size equal to the key (wider, in this case) operand. */
15328 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15329
15330 CVT_FLAVOUR_VAR;
15331
15332 return neon_cvt_flavour_invalid;
15333 #undef CVT_VAR
15334 }
15335
15336 enum neon_cvt_mode
15337 {
15338 neon_cvt_mode_a,
15339 neon_cvt_mode_n,
15340 neon_cvt_mode_p,
15341 neon_cvt_mode_m,
15342 neon_cvt_mode_z,
15343 neon_cvt_mode_x,
15344 neon_cvt_mode_r
15345 };
15346
15347 /* Neon-syntax VFP conversions. */
15348
15349 static void
15350 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15351 {
15352 const char *opname = 0;
15353
15354 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15355 || rs == NS_FHI || rs == NS_HFI)
15356 {
15357 /* Conversions with immediate bitshift. */
15358 const char *enc[] =
15359 {
15360 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15361 CVT_FLAVOUR_VAR
15362 NULL
15363 #undef CVT_VAR
15364 };
15365
15366 if (flavour < (int) ARRAY_SIZE (enc))
15367 {
15368 opname = enc[flavour];
15369 constraint (inst.operands[0].reg != inst.operands[1].reg,
15370 _("operands 0 and 1 must be the same register"));
15371 inst.operands[1] = inst.operands[2];
15372 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15373 }
15374 }
15375 else
15376 {
15377 /* Conversions without bitshift. */
15378 const char *enc[] =
15379 {
15380 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15381 CVT_FLAVOUR_VAR
15382 NULL
15383 #undef CVT_VAR
15384 };
15385
15386 if (flavour < (int) ARRAY_SIZE (enc))
15387 opname = enc[flavour];
15388 }
15389
15390 if (opname)
15391 do_vfp_nsyn_opcode (opname);
15392
15393 /* ARMv8.2 fp16 VCVT instruction. */
15394 if (flavour == neon_cvt_flavour_s32_f16
15395 || flavour == neon_cvt_flavour_u32_f16
15396 || flavour == neon_cvt_flavour_f16_u32
15397 || flavour == neon_cvt_flavour_f16_s32)
15398 do_scalar_fp16_v82_encode ();
15399 }
15400
15401 static void
15402 do_vfp_nsyn_cvtz (void)
15403 {
15404 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15405 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15406 const char *enc[] =
15407 {
15408 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15409 CVT_FLAVOUR_VAR
15410 NULL
15411 #undef CVT_VAR
15412 };
15413
15414 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15415 do_vfp_nsyn_opcode (enc[flavour]);
15416 }
15417
15418 static void
15419 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15420 enum neon_cvt_mode mode)
15421 {
15422 int sz, op;
15423 int rm;
15424
15425 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15426 D register operands. */
15427 if (flavour == neon_cvt_flavour_s32_f64
15428 || flavour == neon_cvt_flavour_u32_f64)
15429 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15430 _(BAD_FPU));
15431
15432 if (flavour == neon_cvt_flavour_s32_f16
15433 || flavour == neon_cvt_flavour_u32_f16)
15434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15435 _(BAD_FP16));
15436
15437 set_it_insn_type (OUTSIDE_IT_INSN);
15438
15439 switch (flavour)
15440 {
15441 case neon_cvt_flavour_s32_f64:
15442 sz = 1;
15443 op = 1;
15444 break;
15445 case neon_cvt_flavour_s32_f32:
15446 sz = 0;
15447 op = 1;
15448 break;
15449 case neon_cvt_flavour_s32_f16:
15450 sz = 0;
15451 op = 1;
15452 break;
15453 case neon_cvt_flavour_u32_f64:
15454 sz = 1;
15455 op = 0;
15456 break;
15457 case neon_cvt_flavour_u32_f32:
15458 sz = 0;
15459 op = 0;
15460 break;
15461 case neon_cvt_flavour_u32_f16:
15462 sz = 0;
15463 op = 0;
15464 break;
15465 default:
15466 first_error (_("invalid instruction shape"));
15467 return;
15468 }
15469
15470 switch (mode)
15471 {
15472 case neon_cvt_mode_a: rm = 0; break;
15473 case neon_cvt_mode_n: rm = 1; break;
15474 case neon_cvt_mode_p: rm = 2; break;
15475 case neon_cvt_mode_m: rm = 3; break;
15476 default: first_error (_("invalid rounding mode")); return;
15477 }
15478
15479 NEON_ENCODE (FPV8, inst);
15480 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15481 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15482 inst.instruction |= sz << 8;
15483
15484 /* ARMv8.2 fp16 VCVT instruction. */
15485 if (flavour == neon_cvt_flavour_s32_f16
15486 ||flavour == neon_cvt_flavour_u32_f16)
15487 do_scalar_fp16_v82_encode ();
15488 inst.instruction |= op << 7;
15489 inst.instruction |= rm << 16;
15490 inst.instruction |= 0xf0000000;
15491 inst.is_neon = TRUE;
15492 }
15493
15494 static void
15495 do_neon_cvt_1 (enum neon_cvt_mode mode)
15496 {
15497 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15498 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15499 NS_FH, NS_HF, NS_FHI, NS_HFI,
15500 NS_NULL);
15501 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15502
15503 /* PR11109: Handle round-to-zero for VCVT conversions. */
15504 if (mode == neon_cvt_mode_z
15505 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15506 && (flavour == neon_cvt_flavour_s32_f32
15507 || flavour == neon_cvt_flavour_u32_f32
15508 || flavour == neon_cvt_flavour_s32_f64
15509 || flavour == neon_cvt_flavour_u32_f64)
15510 && (rs == NS_FD || rs == NS_FF))
15511 {
15512 do_vfp_nsyn_cvtz ();
15513 return;
15514 }
15515
15516 /* ARMv8.2 fp16 VCVT conversions. */
15517 if (mode == neon_cvt_mode_z
15518 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15519 && (flavour == neon_cvt_flavour_s32_f16
15520 || flavour == neon_cvt_flavour_u32_f16)
15521 && (rs == NS_FH))
15522 {
15523 do_vfp_nsyn_cvtz ();
15524 do_scalar_fp16_v82_encode ();
15525 return;
15526 }
15527
15528 /* VFP rather than Neon conversions. */
15529 if (flavour >= neon_cvt_flavour_first_fp)
15530 {
15531 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15532 do_vfp_nsyn_cvt (rs, flavour);
15533 else
15534 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15535
15536 return;
15537 }
15538
15539 switch (rs)
15540 {
15541 case NS_DDI:
15542 case NS_QQI:
15543 {
15544 unsigned immbits;
15545 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15546
15547 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15548 return;
15549
15550 /* Fixed-point conversion with #0 immediate is encoded as an
15551 integer conversion. */
15552 if (inst.operands[2].present && inst.operands[2].imm == 0)
15553 goto int_encode;
15554 immbits = 32 - inst.operands[2].imm;
15555 NEON_ENCODE (IMMED, inst);
15556 if (flavour != neon_cvt_flavour_invalid)
15557 inst.instruction |= enctab[flavour];
15558 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15559 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15560 inst.instruction |= LOW4 (inst.operands[1].reg);
15561 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15562 inst.instruction |= neon_quad (rs) << 6;
15563 inst.instruction |= 1 << 21;
15564 inst.instruction |= immbits << 16;
15565
15566 neon_dp_fixup (&inst);
15567 }
15568 break;
15569
15570 case NS_DD:
15571 case NS_QQ:
15572 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15573 {
15574 NEON_ENCODE (FLOAT, inst);
15575 set_it_insn_type (OUTSIDE_IT_INSN);
15576
15577 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15578 return;
15579
15580 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15582 inst.instruction |= LOW4 (inst.operands[1].reg);
15583 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15584 inst.instruction |= neon_quad (rs) << 6;
15585 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15586 inst.instruction |= mode << 8;
15587 if (thumb_mode)
15588 inst.instruction |= 0xfc000000;
15589 else
15590 inst.instruction |= 0xf0000000;
15591 }
15592 else
15593 {
15594 int_encode:
15595 {
15596 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15597
15598 NEON_ENCODE (INTEGER, inst);
15599
15600 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15601 return;
15602
15603 if (flavour != neon_cvt_flavour_invalid)
15604 inst.instruction |= enctab[flavour];
15605
15606 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15607 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15608 inst.instruction |= LOW4 (inst.operands[1].reg);
15609 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15610 inst.instruction |= neon_quad (rs) << 6;
15611 inst.instruction |= 2 << 18;
15612
15613 neon_dp_fixup (&inst);
15614 }
15615 }
15616 break;
15617
15618 /* Half-precision conversions for Advanced SIMD -- neon. */
15619 case NS_QD:
15620 case NS_DQ:
15621
15622 if ((rs == NS_DQ)
15623 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15624 {
15625 as_bad (_("operand size must match register width"));
15626 break;
15627 }
15628
15629 if ((rs == NS_QD)
15630 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15631 {
15632 as_bad (_("operand size must match register width"));
15633 break;
15634 }
15635
15636 if (rs == NS_DQ)
15637 inst.instruction = 0x3b60600;
15638 else
15639 inst.instruction = 0x3b60700;
15640
15641 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15642 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15643 inst.instruction |= LOW4 (inst.operands[1].reg);
15644 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15645 neon_dp_fixup (&inst);
15646 break;
15647
15648 default:
15649 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15650 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15651 do_vfp_nsyn_cvt (rs, flavour);
15652 else
15653 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15654 }
15655 }
15656
15657 static void
15658 do_neon_cvtr (void)
15659 {
15660 do_neon_cvt_1 (neon_cvt_mode_x);
15661 }
15662
15663 static void
15664 do_neon_cvt (void)
15665 {
15666 do_neon_cvt_1 (neon_cvt_mode_z);
15667 }
15668
15669 static void
15670 do_neon_cvta (void)
15671 {
15672 do_neon_cvt_1 (neon_cvt_mode_a);
15673 }
15674
15675 static void
15676 do_neon_cvtn (void)
15677 {
15678 do_neon_cvt_1 (neon_cvt_mode_n);
15679 }
15680
15681 static void
15682 do_neon_cvtp (void)
15683 {
15684 do_neon_cvt_1 (neon_cvt_mode_p);
15685 }
15686
15687 static void
15688 do_neon_cvtm (void)
15689 {
15690 do_neon_cvt_1 (neon_cvt_mode_m);
15691 }
15692
15693 static void
15694 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15695 {
15696 if (is_double)
15697 mark_feature_used (&fpu_vfp_ext_armv8);
15698
15699 encode_arm_vfp_reg (inst.operands[0].reg,
15700 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15701 encode_arm_vfp_reg (inst.operands[1].reg,
15702 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15703 inst.instruction |= to ? 0x10000 : 0;
15704 inst.instruction |= t ? 0x80 : 0;
15705 inst.instruction |= is_double ? 0x100 : 0;
15706 do_vfp_cond_or_thumb ();
15707 }
15708
15709 static void
15710 do_neon_cvttb_1 (bfd_boolean t)
15711 {
15712 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15713 NS_DF, NS_DH, NS_NULL);
15714
15715 if (rs == NS_NULL)
15716 return;
15717 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15718 {
15719 inst.error = NULL;
15720 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15721 }
15722 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15723 {
15724 inst.error = NULL;
15725 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15726 }
15727 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15728 {
15729 /* The VCVTB and VCVTT instructions with D-register operands
15730 don't work for SP only targets. */
15731 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15732 _(BAD_FPU));
15733
15734 inst.error = NULL;
15735 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15736 }
15737 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15738 {
15739 /* The VCVTB and VCVTT instructions with D-register operands
15740 don't work for SP only targets. */
15741 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15742 _(BAD_FPU));
15743
15744 inst.error = NULL;
15745 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15746 }
15747 else
15748 return;
15749 }
15750
15751 static void
15752 do_neon_cvtb (void)
15753 {
15754 do_neon_cvttb_1 (FALSE);
15755 }
15756
15757
15758 static void
15759 do_neon_cvtt (void)
15760 {
15761 do_neon_cvttb_1 (TRUE);
15762 }
15763
15764 static void
15765 neon_move_immediate (void)
15766 {
15767 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15768 struct neon_type_el et = neon_check_type (2, rs,
15769 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15770 unsigned immlo, immhi = 0, immbits;
15771 int op, cmode, float_p;
15772
15773 constraint (et.type == NT_invtype,
15774 _("operand size must be specified for immediate VMOV"));
15775
15776 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15777 op = (inst.instruction & (1 << 5)) != 0;
15778
15779 immlo = inst.operands[1].imm;
15780 if (inst.operands[1].regisimm)
15781 immhi = inst.operands[1].reg;
15782
15783 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15784 _("immediate has bits set outside the operand size"));
15785
15786 float_p = inst.operands[1].immisfloat;
15787
15788 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15789 et.size, et.type)) == FAIL)
15790 {
15791 /* Invert relevant bits only. */
15792 neon_invert_size (&immlo, &immhi, et.size);
15793 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15794 with one or the other; those cases are caught by
15795 neon_cmode_for_move_imm. */
15796 op = !op;
15797 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15798 &op, et.size, et.type)) == FAIL)
15799 {
15800 first_error (_("immediate out of range"));
15801 return;
15802 }
15803 }
15804
15805 inst.instruction &= ~(1 << 5);
15806 inst.instruction |= op << 5;
15807
15808 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15809 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15810 inst.instruction |= neon_quad (rs) << 6;
15811 inst.instruction |= cmode << 8;
15812
15813 neon_write_immbits (immbits);
15814 }
15815
15816 static void
15817 do_neon_mvn (void)
15818 {
15819 if (inst.operands[1].isreg)
15820 {
15821 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15822
15823 NEON_ENCODE (INTEGER, inst);
15824 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15825 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15826 inst.instruction |= LOW4 (inst.operands[1].reg);
15827 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15828 inst.instruction |= neon_quad (rs) << 6;
15829 }
15830 else
15831 {
15832 NEON_ENCODE (IMMED, inst);
15833 neon_move_immediate ();
15834 }
15835
15836 neon_dp_fixup (&inst);
15837 }
15838
15839 /* Encode instructions of form:
15840
15841 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15842 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15843
15844 static void
15845 neon_mixed_length (struct neon_type_el et, unsigned size)
15846 {
15847 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15848 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15849 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15850 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15851 inst.instruction |= LOW4 (inst.operands[2].reg);
15852 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15853 inst.instruction |= (et.type == NT_unsigned) << 24;
15854 inst.instruction |= neon_logbits (size) << 20;
15855
15856 neon_dp_fixup (&inst);
15857 }
15858
15859 static void
15860 do_neon_dyadic_long (void)
15861 {
15862 /* FIXME: Type checking for lengthening op. */
15863 struct neon_type_el et = neon_check_type (3, NS_QDD,
15864 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15865 neon_mixed_length (et, et.size);
15866 }
15867
15868 static void
15869 do_neon_abal (void)
15870 {
15871 struct neon_type_el et = neon_check_type (3, NS_QDD,
15872 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15873 neon_mixed_length (et, et.size);
15874 }
15875
15876 static void
15877 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15878 {
15879 if (inst.operands[2].isscalar)
15880 {
15881 struct neon_type_el et = neon_check_type (3, NS_QDS,
15882 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15883 NEON_ENCODE (SCALAR, inst);
15884 neon_mul_mac (et, et.type == NT_unsigned);
15885 }
15886 else
15887 {
15888 struct neon_type_el et = neon_check_type (3, NS_QDD,
15889 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15890 NEON_ENCODE (INTEGER, inst);
15891 neon_mixed_length (et, et.size);
15892 }
15893 }
15894
15895 static void
15896 do_neon_mac_maybe_scalar_long (void)
15897 {
15898 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15899 }
15900
15901 static void
15902 do_neon_dyadic_wide (void)
15903 {
15904 struct neon_type_el et = neon_check_type (3, NS_QQD,
15905 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15906 neon_mixed_length (et, et.size);
15907 }
15908
15909 static void
15910 do_neon_dyadic_narrow (void)
15911 {
15912 struct neon_type_el et = neon_check_type (3, NS_QDD,
15913 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15914 /* Operand sign is unimportant, and the U bit is part of the opcode,
15915 so force the operand type to integer. */
15916 et.type = NT_integer;
15917 neon_mixed_length (et, et.size / 2);
15918 }
15919
15920 static void
15921 do_neon_mul_sat_scalar_long (void)
15922 {
15923 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15924 }
15925
15926 static void
15927 do_neon_vmull (void)
15928 {
15929 if (inst.operands[2].isscalar)
15930 do_neon_mac_maybe_scalar_long ();
15931 else
15932 {
15933 struct neon_type_el et = neon_check_type (3, NS_QDD,
15934 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15935
15936 if (et.type == NT_poly)
15937 NEON_ENCODE (POLY, inst);
15938 else
15939 NEON_ENCODE (INTEGER, inst);
15940
15941 /* For polynomial encoding the U bit must be zero, and the size must
15942 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15943 obviously, as 0b10). */
15944 if (et.size == 64)
15945 {
15946 /* Check we're on the correct architecture. */
15947 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15948 inst.error =
15949 _("Instruction form not available on this architecture.");
15950
15951 et.size = 32;
15952 }
15953
15954 neon_mixed_length (et, et.size);
15955 }
15956 }
15957
15958 static void
15959 do_neon_ext (void)
15960 {
15961 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15962 struct neon_type_el et = neon_check_type (3, rs,
15963 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15964 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15965
15966 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15967 _("shift out of range"));
15968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15969 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15970 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15971 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15972 inst.instruction |= LOW4 (inst.operands[2].reg);
15973 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15974 inst.instruction |= neon_quad (rs) << 6;
15975 inst.instruction |= imm << 8;
15976
15977 neon_dp_fixup (&inst);
15978 }
15979
15980 static void
15981 do_neon_rev (void)
15982 {
15983 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15984 struct neon_type_el et = neon_check_type (2, rs,
15985 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15986 unsigned op = (inst.instruction >> 7) & 3;
15987 /* N (width of reversed regions) is encoded as part of the bitmask. We
15988 extract it here to check the elements to be reversed are smaller.
15989 Otherwise we'd get a reserved instruction. */
15990 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15991 gas_assert (elsize != 0);
15992 constraint (et.size >= elsize,
15993 _("elements must be smaller than reversal region"));
15994 neon_two_same (neon_quad (rs), 1, et.size);
15995 }
15996
15997 static void
15998 do_neon_dup (void)
15999 {
16000 if (inst.operands[1].isscalar)
16001 {
16002 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16003 struct neon_type_el et = neon_check_type (2, rs,
16004 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16005 unsigned sizebits = et.size >> 3;
16006 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16007 int logsize = neon_logbits (et.size);
16008 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16009
16010 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16011 return;
16012
16013 NEON_ENCODE (SCALAR, inst);
16014 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16015 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16016 inst.instruction |= LOW4 (dm);
16017 inst.instruction |= HI1 (dm) << 5;
16018 inst.instruction |= neon_quad (rs) << 6;
16019 inst.instruction |= x << 17;
16020 inst.instruction |= sizebits << 16;
16021
16022 neon_dp_fixup (&inst);
16023 }
16024 else
16025 {
16026 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16027 struct neon_type_el et = neon_check_type (2, rs,
16028 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16029 /* Duplicate ARM register to lanes of vector. */
16030 NEON_ENCODE (ARMREG, inst);
16031 switch (et.size)
16032 {
16033 case 8: inst.instruction |= 0x400000; break;
16034 case 16: inst.instruction |= 0x000020; break;
16035 case 32: inst.instruction |= 0x000000; break;
16036 default: break;
16037 }
16038 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16039 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16040 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16041 inst.instruction |= neon_quad (rs) << 21;
16042 /* The encoding for this instruction is identical for the ARM and Thumb
16043 variants, except for the condition field. */
16044 do_vfp_cond_or_thumb ();
16045 }
16046 }
16047
16048 /* VMOV has particularly many variations. It can be one of:
16049 0. VMOV<c><q> <Qd>, <Qm>
16050 1. VMOV<c><q> <Dd>, <Dm>
16051 (Register operations, which are VORR with Rm = Rn.)
16052 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16053 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16054 (Immediate loads.)
16055 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16056 (ARM register to scalar.)
16057 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16058 (Two ARM registers to vector.)
16059 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16060 (Scalar to ARM register.)
16061 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16062 (Vector to two ARM registers.)
16063 8. VMOV.F32 <Sd>, <Sm>
16064 9. VMOV.F64 <Dd>, <Dm>
16065 (VFP register moves.)
16066 10. VMOV.F32 <Sd>, #imm
16067 11. VMOV.F64 <Dd>, #imm
16068 (VFP float immediate load.)
16069 12. VMOV <Rd>, <Sm>
16070 (VFP single to ARM reg.)
16071 13. VMOV <Sd>, <Rm>
16072 (ARM reg to VFP single.)
16073 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16074 (Two ARM regs to two VFP singles.)
16075 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16076 (Two VFP singles to two ARM regs.)
16077
16078 These cases can be disambiguated using neon_select_shape, except cases 1/9
16079 and 3/11 which depend on the operand type too.
16080
16081 All the encoded bits are hardcoded by this function.
16082
16083 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16084 Cases 5, 7 may be used with VFPv2 and above.
16085
16086 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16087 can specify a type where it doesn't make sense to, and is ignored). */
16088
16089 static void
16090 do_neon_mov (void)
16091 {
16092 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16093 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16094 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16095 NS_HR, NS_RH, NS_HI, NS_NULL);
16096 struct neon_type_el et;
16097 const char *ldconst = 0;
16098
16099 switch (rs)
16100 {
16101 case NS_DD: /* case 1/9. */
16102 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16103 /* It is not an error here if no type is given. */
16104 inst.error = NULL;
16105 if (et.type == NT_float && et.size == 64)
16106 {
16107 do_vfp_nsyn_opcode ("fcpyd");
16108 break;
16109 }
16110 /* fall through. */
16111
16112 case NS_QQ: /* case 0/1. */
16113 {
16114 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16115 return;
16116 /* The architecture manual I have doesn't explicitly state which
16117 value the U bit should have for register->register moves, but
16118 the equivalent VORR instruction has U = 0, so do that. */
16119 inst.instruction = 0x0200110;
16120 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16121 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16122 inst.instruction |= LOW4 (inst.operands[1].reg);
16123 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16124 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16125 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16126 inst.instruction |= neon_quad (rs) << 6;
16127
16128 neon_dp_fixup (&inst);
16129 }
16130 break;
16131
16132 case NS_DI: /* case 3/11. */
16133 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16134 inst.error = NULL;
16135 if (et.type == NT_float && et.size == 64)
16136 {
16137 /* case 11 (fconstd). */
16138 ldconst = "fconstd";
16139 goto encode_fconstd;
16140 }
16141 /* fall through. */
16142
16143 case NS_QI: /* case 2/3. */
16144 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16145 return;
16146 inst.instruction = 0x0800010;
16147 neon_move_immediate ();
16148 neon_dp_fixup (&inst);
16149 break;
16150
16151 case NS_SR: /* case 4. */
16152 {
16153 unsigned bcdebits = 0;
16154 int logsize;
16155 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16156 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16157
16158 /* .<size> is optional here, defaulting to .32. */
16159 if (inst.vectype.elems == 0
16160 && inst.operands[0].vectype.type == NT_invtype
16161 && inst.operands[1].vectype.type == NT_invtype)
16162 {
16163 inst.vectype.el[0].type = NT_untyped;
16164 inst.vectype.el[0].size = 32;
16165 inst.vectype.elems = 1;
16166 }
16167
16168 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16169 logsize = neon_logbits (et.size);
16170
16171 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16172 _(BAD_FPU));
16173 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16174 && et.size != 32, _(BAD_FPU));
16175 constraint (et.type == NT_invtype, _("bad type for scalar"));
16176 constraint (x >= 64 / et.size, _("scalar index out of range"));
16177
16178 switch (et.size)
16179 {
16180 case 8: bcdebits = 0x8; break;
16181 case 16: bcdebits = 0x1; break;
16182 case 32: bcdebits = 0x0; break;
16183 default: ;
16184 }
16185
16186 bcdebits |= x << logsize;
16187
16188 inst.instruction = 0xe000b10;
16189 do_vfp_cond_or_thumb ();
16190 inst.instruction |= LOW4 (dn) << 16;
16191 inst.instruction |= HI1 (dn) << 7;
16192 inst.instruction |= inst.operands[1].reg << 12;
16193 inst.instruction |= (bcdebits & 3) << 5;
16194 inst.instruction |= (bcdebits >> 2) << 21;
16195 }
16196 break;
16197
16198 case NS_DRR: /* case 5 (fmdrr). */
16199 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16200 _(BAD_FPU));
16201
16202 inst.instruction = 0xc400b10;
16203 do_vfp_cond_or_thumb ();
16204 inst.instruction |= LOW4 (inst.operands[0].reg);
16205 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16206 inst.instruction |= inst.operands[1].reg << 12;
16207 inst.instruction |= inst.operands[2].reg << 16;
16208 break;
16209
16210 case NS_RS: /* case 6. */
16211 {
16212 unsigned logsize;
16213 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16214 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16215 unsigned abcdebits = 0;
16216
16217 /* .<dt> is optional here, defaulting to .32. */
16218 if (inst.vectype.elems == 0
16219 && inst.operands[0].vectype.type == NT_invtype
16220 && inst.operands[1].vectype.type == NT_invtype)
16221 {
16222 inst.vectype.el[0].type = NT_untyped;
16223 inst.vectype.el[0].size = 32;
16224 inst.vectype.elems = 1;
16225 }
16226
16227 et = neon_check_type (2, NS_NULL,
16228 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16229 logsize = neon_logbits (et.size);
16230
16231 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16232 _(BAD_FPU));
16233 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16234 && et.size != 32, _(BAD_FPU));
16235 constraint (et.type == NT_invtype, _("bad type for scalar"));
16236 constraint (x >= 64 / et.size, _("scalar index out of range"));
16237
16238 switch (et.size)
16239 {
16240 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16241 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16242 case 32: abcdebits = 0x00; break;
16243 default: ;
16244 }
16245
16246 abcdebits |= x << logsize;
16247 inst.instruction = 0xe100b10;
16248 do_vfp_cond_or_thumb ();
16249 inst.instruction |= LOW4 (dn) << 16;
16250 inst.instruction |= HI1 (dn) << 7;
16251 inst.instruction |= inst.operands[0].reg << 12;
16252 inst.instruction |= (abcdebits & 3) << 5;
16253 inst.instruction |= (abcdebits >> 2) << 21;
16254 }
16255 break;
16256
16257 case NS_RRD: /* case 7 (fmrrd). */
16258 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16259 _(BAD_FPU));
16260
16261 inst.instruction = 0xc500b10;
16262 do_vfp_cond_or_thumb ();
16263 inst.instruction |= inst.operands[0].reg << 12;
16264 inst.instruction |= inst.operands[1].reg << 16;
16265 inst.instruction |= LOW4 (inst.operands[2].reg);
16266 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16267 break;
16268
16269 case NS_FF: /* case 8 (fcpys). */
16270 do_vfp_nsyn_opcode ("fcpys");
16271 break;
16272
16273 case NS_HI:
16274 case NS_FI: /* case 10 (fconsts). */
16275 ldconst = "fconsts";
16276 encode_fconstd:
16277 if (is_quarter_float (inst.operands[1].imm))
16278 {
16279 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16280 do_vfp_nsyn_opcode (ldconst);
16281
16282 /* ARMv8.2 fp16 vmov.f16 instruction. */
16283 if (rs == NS_HI)
16284 do_scalar_fp16_v82_encode ();
16285 }
16286 else
16287 first_error (_("immediate out of range"));
16288 break;
16289
16290 case NS_RH:
16291 case NS_RF: /* case 12 (fmrs). */
16292 do_vfp_nsyn_opcode ("fmrs");
16293 /* ARMv8.2 fp16 vmov.f16 instruction. */
16294 if (rs == NS_RH)
16295 do_scalar_fp16_v82_encode ();
16296 break;
16297
16298 case NS_HR:
16299 case NS_FR: /* case 13 (fmsr). */
16300 do_vfp_nsyn_opcode ("fmsr");
16301 /* ARMv8.2 fp16 vmov.f16 instruction. */
16302 if (rs == NS_HR)
16303 do_scalar_fp16_v82_encode ();
16304 break;
16305
16306 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16307 (one of which is a list), but we have parsed four. Do some fiddling to
16308 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16309 expect. */
16310 case NS_RRFF: /* case 14 (fmrrs). */
16311 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16312 _("VFP registers must be adjacent"));
16313 inst.operands[2].imm = 2;
16314 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16315 do_vfp_nsyn_opcode ("fmrrs");
16316 break;
16317
16318 case NS_FFRR: /* case 15 (fmsrr). */
16319 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16320 _("VFP registers must be adjacent"));
16321 inst.operands[1] = inst.operands[2];
16322 inst.operands[2] = inst.operands[3];
16323 inst.operands[0].imm = 2;
16324 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16325 do_vfp_nsyn_opcode ("fmsrr");
16326 break;
16327
16328 case NS_NULL:
16329 /* neon_select_shape has determined that the instruction
16330 shape is wrong and has already set the error message. */
16331 break;
16332
16333 default:
16334 abort ();
16335 }
16336 }
16337
16338 static void
16339 do_neon_rshift_round_imm (void)
16340 {
16341 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16342 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16343 int imm = inst.operands[2].imm;
16344
16345 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16346 if (imm == 0)
16347 {
16348 inst.operands[2].present = 0;
16349 do_neon_mov ();
16350 return;
16351 }
16352
16353 constraint (imm < 1 || (unsigned)imm > et.size,
16354 _("immediate out of range for shift"));
16355 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16356 et.size - imm);
16357 }
16358
16359 static void
16360 do_neon_movhf (void)
16361 {
16362 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16363 constraint (rs != NS_HH, _("invalid suffix"));
16364
16365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16366 _(BAD_FPU));
16367
16368 do_vfp_sp_monadic ();
16369
16370 inst.is_neon = 1;
16371 inst.instruction |= 0xf0000000;
16372 }
16373
16374 static void
16375 do_neon_movl (void)
16376 {
16377 struct neon_type_el et = neon_check_type (2, NS_QD,
16378 N_EQK | N_DBL, N_SU_32 | N_KEY);
16379 unsigned sizebits = et.size >> 3;
16380 inst.instruction |= sizebits << 19;
16381 neon_two_same (0, et.type == NT_unsigned, -1);
16382 }
16383
16384 static void
16385 do_neon_trn (void)
16386 {
16387 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16388 struct neon_type_el et = neon_check_type (2, rs,
16389 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16390 NEON_ENCODE (INTEGER, inst);
16391 neon_two_same (neon_quad (rs), 1, et.size);
16392 }
16393
16394 static void
16395 do_neon_zip_uzp (void)
16396 {
16397 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16398 struct neon_type_el et = neon_check_type (2, rs,
16399 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16400 if (rs == NS_DD && et.size == 32)
16401 {
16402 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16403 inst.instruction = N_MNEM_vtrn;
16404 do_neon_trn ();
16405 return;
16406 }
16407 neon_two_same (neon_quad (rs), 1, et.size);
16408 }
16409
16410 static void
16411 do_neon_sat_abs_neg (void)
16412 {
16413 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16414 struct neon_type_el et = neon_check_type (2, rs,
16415 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16416 neon_two_same (neon_quad (rs), 1, et.size);
16417 }
16418
16419 static void
16420 do_neon_pair_long (void)
16421 {
16422 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16423 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16424 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16425 inst.instruction |= (et.type == NT_unsigned) << 7;
16426 neon_two_same (neon_quad (rs), 1, et.size);
16427 }
16428
16429 static void
16430 do_neon_recip_est (void)
16431 {
16432 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16433 struct neon_type_el et = neon_check_type (2, rs,
16434 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16435 inst.instruction |= (et.type == NT_float) << 8;
16436 neon_two_same (neon_quad (rs), 1, et.size);
16437 }
16438
16439 static void
16440 do_neon_cls (void)
16441 {
16442 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16443 struct neon_type_el et = neon_check_type (2, rs,
16444 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16445 neon_two_same (neon_quad (rs), 1, et.size);
16446 }
16447
16448 static void
16449 do_neon_clz (void)
16450 {
16451 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16452 struct neon_type_el et = neon_check_type (2, rs,
16453 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16454 neon_two_same (neon_quad (rs), 1, et.size);
16455 }
16456
16457 static void
16458 do_neon_cnt (void)
16459 {
16460 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16461 struct neon_type_el et = neon_check_type (2, rs,
16462 N_EQK | N_INT, N_8 | N_KEY);
16463 neon_two_same (neon_quad (rs), 1, et.size);
16464 }
16465
16466 static void
16467 do_neon_swp (void)
16468 {
16469 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16470 neon_two_same (neon_quad (rs), 1, -1);
16471 }
16472
16473 static void
16474 do_neon_tbl_tbx (void)
16475 {
16476 unsigned listlenbits;
16477 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16478
16479 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16480 {
16481 first_error (_("bad list length for table lookup"));
16482 return;
16483 }
16484
16485 listlenbits = inst.operands[1].imm - 1;
16486 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16487 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16488 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16489 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16490 inst.instruction |= LOW4 (inst.operands[2].reg);
16491 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16492 inst.instruction |= listlenbits << 8;
16493
16494 neon_dp_fixup (&inst);
16495 }
16496
16497 static void
16498 do_neon_ldm_stm (void)
16499 {
16500 /* P, U and L bits are part of bitmask. */
16501 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16502 unsigned offsetbits = inst.operands[1].imm * 2;
16503
16504 if (inst.operands[1].issingle)
16505 {
16506 do_vfp_nsyn_ldm_stm (is_dbmode);
16507 return;
16508 }
16509
16510 constraint (is_dbmode && !inst.operands[0].writeback,
16511 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16512
16513 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16514 _("register list must contain at least 1 and at most 16 "
16515 "registers"));
16516
16517 inst.instruction |= inst.operands[0].reg << 16;
16518 inst.instruction |= inst.operands[0].writeback << 21;
16519 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16520 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16521
16522 inst.instruction |= offsetbits;
16523
16524 do_vfp_cond_or_thumb ();
16525 }
16526
16527 static void
16528 do_neon_ldr_str (void)
16529 {
16530 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16531
16532 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16533 And is UNPREDICTABLE in thumb mode. */
16534 if (!is_ldr
16535 && inst.operands[1].reg == REG_PC
16536 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16537 {
16538 if (thumb_mode)
16539 inst.error = _("Use of PC here is UNPREDICTABLE");
16540 else if (warn_on_deprecated)
16541 as_tsktsk (_("Use of PC here is deprecated"));
16542 }
16543
16544 if (inst.operands[0].issingle)
16545 {
16546 if (is_ldr)
16547 do_vfp_nsyn_opcode ("flds");
16548 else
16549 do_vfp_nsyn_opcode ("fsts");
16550
16551 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16552 if (inst.vectype.el[0].size == 16)
16553 do_scalar_fp16_v82_encode ();
16554 }
16555 else
16556 {
16557 if (is_ldr)
16558 do_vfp_nsyn_opcode ("fldd");
16559 else
16560 do_vfp_nsyn_opcode ("fstd");
16561 }
16562 }
16563
16564 /* "interleave" version also handles non-interleaving register VLD1/VST1
16565 instructions. */
16566
16567 static void
16568 do_neon_ld_st_interleave (void)
16569 {
16570 struct neon_type_el et = neon_check_type (1, NS_NULL,
16571 N_8 | N_16 | N_32 | N_64);
16572 unsigned alignbits = 0;
16573 unsigned idx;
16574 /* The bits in this table go:
16575 0: register stride of one (0) or two (1)
16576 1,2: register list length, minus one (1, 2, 3, 4).
16577 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16578 We use -1 for invalid entries. */
16579 const int typetable[] =
16580 {
16581 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16582 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16583 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16584 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16585 };
16586 int typebits;
16587
16588 if (et.type == NT_invtype)
16589 return;
16590
16591 if (inst.operands[1].immisalign)
16592 switch (inst.operands[1].imm >> 8)
16593 {
16594 case 64: alignbits = 1; break;
16595 case 128:
16596 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16597 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16598 goto bad_alignment;
16599 alignbits = 2;
16600 break;
16601 case 256:
16602 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16603 goto bad_alignment;
16604 alignbits = 3;
16605 break;
16606 default:
16607 bad_alignment:
16608 first_error (_("bad alignment"));
16609 return;
16610 }
16611
16612 inst.instruction |= alignbits << 4;
16613 inst.instruction |= neon_logbits (et.size) << 6;
16614
16615 /* Bits [4:6] of the immediate in a list specifier encode register stride
16616 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16617 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16618 up the right value for "type" in a table based on this value and the given
16619 list style, then stick it back. */
16620 idx = ((inst.operands[0].imm >> 4) & 7)
16621 | (((inst.instruction >> 8) & 3) << 3);
16622
16623 typebits = typetable[idx];
16624
16625 constraint (typebits == -1, _("bad list type for instruction"));
16626 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16627 _("bad element type for instruction"));
16628
16629 inst.instruction &= ~0xf00;
16630 inst.instruction |= typebits << 8;
16631 }
16632
16633 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16634 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16635 otherwise. The variable arguments are a list of pairs of legal (size, align)
16636 values, terminated with -1. */
16637
16638 static int
16639 neon_alignment_bit (int size, int align, int *do_align, ...)
16640 {
16641 va_list ap;
16642 int result = FAIL, thissize, thisalign;
16643
16644 if (!inst.operands[1].immisalign)
16645 {
16646 *do_align = 0;
16647 return SUCCESS;
16648 }
16649
16650 va_start (ap, do_align);
16651
16652 do
16653 {
16654 thissize = va_arg (ap, int);
16655 if (thissize == -1)
16656 break;
16657 thisalign = va_arg (ap, int);
16658
16659 if (size == thissize && align == thisalign)
16660 result = SUCCESS;
16661 }
16662 while (result != SUCCESS);
16663
16664 va_end (ap);
16665
16666 if (result == SUCCESS)
16667 *do_align = 1;
16668 else
16669 first_error (_("unsupported alignment for instruction"));
16670
16671 return result;
16672 }
16673
16674 static void
16675 do_neon_ld_st_lane (void)
16676 {
16677 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16678 int align_good, do_align = 0;
16679 int logsize = neon_logbits (et.size);
16680 int align = inst.operands[1].imm >> 8;
16681 int n = (inst.instruction >> 8) & 3;
16682 int max_el = 64 / et.size;
16683
16684 if (et.type == NT_invtype)
16685 return;
16686
16687 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16688 _("bad list length"));
16689 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16690 _("scalar index out of range"));
16691 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16692 && et.size == 8,
16693 _("stride of 2 unavailable when element size is 8"));
16694
16695 switch (n)
16696 {
16697 case 0: /* VLD1 / VST1. */
16698 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16699 32, 32, -1);
16700 if (align_good == FAIL)
16701 return;
16702 if (do_align)
16703 {
16704 unsigned alignbits = 0;
16705 switch (et.size)
16706 {
16707 case 16: alignbits = 0x1; break;
16708 case 32: alignbits = 0x3; break;
16709 default: ;
16710 }
16711 inst.instruction |= alignbits << 4;
16712 }
16713 break;
16714
16715 case 1: /* VLD2 / VST2. */
16716 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16717 32, 64, -1);
16718 if (align_good == FAIL)
16719 return;
16720 if (do_align)
16721 inst.instruction |= 1 << 4;
16722 break;
16723
16724 case 2: /* VLD3 / VST3. */
16725 constraint (inst.operands[1].immisalign,
16726 _("can't use alignment with this instruction"));
16727 break;
16728
16729 case 3: /* VLD4 / VST4. */
16730 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16731 16, 64, 32, 64, 32, 128, -1);
16732 if (align_good == FAIL)
16733 return;
16734 if (do_align)
16735 {
16736 unsigned alignbits = 0;
16737 switch (et.size)
16738 {
16739 case 8: alignbits = 0x1; break;
16740 case 16: alignbits = 0x1; break;
16741 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16742 default: ;
16743 }
16744 inst.instruction |= alignbits << 4;
16745 }
16746 break;
16747
16748 default: ;
16749 }
16750
16751 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16752 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16753 inst.instruction |= 1 << (4 + logsize);
16754
16755 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16756 inst.instruction |= logsize << 10;
16757 }
16758
16759 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16760
16761 static void
16762 do_neon_ld_dup (void)
16763 {
16764 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16765 int align_good, do_align = 0;
16766
16767 if (et.type == NT_invtype)
16768 return;
16769
16770 switch ((inst.instruction >> 8) & 3)
16771 {
16772 case 0: /* VLD1. */
16773 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16774 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16775 &do_align, 16, 16, 32, 32, -1);
16776 if (align_good == FAIL)
16777 return;
16778 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16779 {
16780 case 1: break;
16781 case 2: inst.instruction |= 1 << 5; break;
16782 default: first_error (_("bad list length")); return;
16783 }
16784 inst.instruction |= neon_logbits (et.size) << 6;
16785 break;
16786
16787 case 1: /* VLD2. */
16788 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16789 &do_align, 8, 16, 16, 32, 32, 64, -1);
16790 if (align_good == FAIL)
16791 return;
16792 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16793 _("bad list length"));
16794 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16795 inst.instruction |= 1 << 5;
16796 inst.instruction |= neon_logbits (et.size) << 6;
16797 break;
16798
16799 case 2: /* VLD3. */
16800 constraint (inst.operands[1].immisalign,
16801 _("can't use alignment with this instruction"));
16802 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16803 _("bad list length"));
16804 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16805 inst.instruction |= 1 << 5;
16806 inst.instruction |= neon_logbits (et.size) << 6;
16807 break;
16808
16809 case 3: /* VLD4. */
16810 {
16811 int align = inst.operands[1].imm >> 8;
16812 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16813 16, 64, 32, 64, 32, 128, -1);
16814 if (align_good == FAIL)
16815 return;
16816 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16817 _("bad list length"));
16818 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16819 inst.instruction |= 1 << 5;
16820 if (et.size == 32 && align == 128)
16821 inst.instruction |= 0x3 << 6;
16822 else
16823 inst.instruction |= neon_logbits (et.size) << 6;
16824 }
16825 break;
16826
16827 default: ;
16828 }
16829
16830 inst.instruction |= do_align << 4;
16831 }
16832
16833 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16834 apart from bits [11:4]. */
16835
16836 static void
16837 do_neon_ldx_stx (void)
16838 {
16839 if (inst.operands[1].isreg)
16840 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16841
16842 switch (NEON_LANE (inst.operands[0].imm))
16843 {
16844 case NEON_INTERLEAVE_LANES:
16845 NEON_ENCODE (INTERLV, inst);
16846 do_neon_ld_st_interleave ();
16847 break;
16848
16849 case NEON_ALL_LANES:
16850 NEON_ENCODE (DUP, inst);
16851 if (inst.instruction == N_INV)
16852 {
16853 first_error ("only loads support such operands");
16854 break;
16855 }
16856 do_neon_ld_dup ();
16857 break;
16858
16859 default:
16860 NEON_ENCODE (LANE, inst);
16861 do_neon_ld_st_lane ();
16862 }
16863
16864 /* L bit comes from bit mask. */
16865 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16866 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16867 inst.instruction |= inst.operands[1].reg << 16;
16868
16869 if (inst.operands[1].postind)
16870 {
16871 int postreg = inst.operands[1].imm & 0xf;
16872 constraint (!inst.operands[1].immisreg,
16873 _("post-index must be a register"));
16874 constraint (postreg == 0xd || postreg == 0xf,
16875 _("bad register for post-index"));
16876 inst.instruction |= postreg;
16877 }
16878 else
16879 {
16880 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16881 constraint (inst.reloc.exp.X_op != O_constant
16882 || inst.reloc.exp.X_add_number != 0,
16883 BAD_ADDR_MODE);
16884
16885 if (inst.operands[1].writeback)
16886 {
16887 inst.instruction |= 0xd;
16888 }
16889 else
16890 inst.instruction |= 0xf;
16891 }
16892
16893 if (thumb_mode)
16894 inst.instruction |= 0xf9000000;
16895 else
16896 inst.instruction |= 0xf4000000;
16897 }
16898
16899 /* FP v8. */
16900 static void
16901 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16902 {
16903 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16904 D register operands. */
16905 if (neon_shape_class[rs] == SC_DOUBLE)
16906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16907 _(BAD_FPU));
16908
16909 NEON_ENCODE (FPV8, inst);
16910
16911 if (rs == NS_FFF || rs == NS_HHH)
16912 {
16913 do_vfp_sp_dyadic ();
16914
16915 /* ARMv8.2 fp16 instruction. */
16916 if (rs == NS_HHH)
16917 do_scalar_fp16_v82_encode ();
16918 }
16919 else
16920 do_vfp_dp_rd_rn_rm ();
16921
16922 if (rs == NS_DDD)
16923 inst.instruction |= 0x100;
16924
16925 inst.instruction |= 0xf0000000;
16926 }
16927
16928 static void
16929 do_vsel (void)
16930 {
16931 set_it_insn_type (OUTSIDE_IT_INSN);
16932
16933 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16934 first_error (_("invalid instruction shape"));
16935 }
16936
16937 static void
16938 do_vmaxnm (void)
16939 {
16940 set_it_insn_type (OUTSIDE_IT_INSN);
16941
16942 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16943 return;
16944
16945 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16946 return;
16947
16948 neon_dyadic_misc (NT_untyped, N_F32, 0);
16949 }
16950
16951 static void
16952 do_vrint_1 (enum neon_cvt_mode mode)
16953 {
16954 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
16955 struct neon_type_el et;
16956
16957 if (rs == NS_NULL)
16958 return;
16959
16960 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16961 D register operands. */
16962 if (neon_shape_class[rs] == SC_DOUBLE)
16963 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16964 _(BAD_FPU));
16965
16966 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
16967 | N_VFP);
16968 if (et.type != NT_invtype)
16969 {
16970 /* VFP encodings. */
16971 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16972 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16973 set_it_insn_type (OUTSIDE_IT_INSN);
16974
16975 NEON_ENCODE (FPV8, inst);
16976 if (rs == NS_FF || rs == NS_HH)
16977 do_vfp_sp_monadic ();
16978 else
16979 do_vfp_dp_rd_rm ();
16980
16981 switch (mode)
16982 {
16983 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16984 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16985 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16986 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16987 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16988 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16989 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16990 default: abort ();
16991 }
16992
16993 inst.instruction |= (rs == NS_DD) << 8;
16994 do_vfp_cond_or_thumb ();
16995
16996 /* ARMv8.2 fp16 vrint instruction. */
16997 if (rs == NS_HH)
16998 do_scalar_fp16_v82_encode ();
16999 }
17000 else
17001 {
17002 /* Neon encodings (or something broken...). */
17003 inst.error = NULL;
17004 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
17005
17006 if (et.type == NT_invtype)
17007 return;
17008
17009 set_it_insn_type (OUTSIDE_IT_INSN);
17010 NEON_ENCODE (FLOAT, inst);
17011
17012 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17013 return;
17014
17015 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17016 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17017 inst.instruction |= LOW4 (inst.operands[1].reg);
17018 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17019 inst.instruction |= neon_quad (rs) << 6;
17020 switch (mode)
17021 {
17022 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17023 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17024 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17025 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17026 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17027 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17028 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17029 default: abort ();
17030 }
17031
17032 if (thumb_mode)
17033 inst.instruction |= 0xfc000000;
17034 else
17035 inst.instruction |= 0xf0000000;
17036 }
17037 }
17038
17039 static void
17040 do_vrintx (void)
17041 {
17042 do_vrint_1 (neon_cvt_mode_x);
17043 }
17044
17045 static void
17046 do_vrintz (void)
17047 {
17048 do_vrint_1 (neon_cvt_mode_z);
17049 }
17050
17051 static void
17052 do_vrintr (void)
17053 {
17054 do_vrint_1 (neon_cvt_mode_r);
17055 }
17056
17057 static void
17058 do_vrinta (void)
17059 {
17060 do_vrint_1 (neon_cvt_mode_a);
17061 }
17062
17063 static void
17064 do_vrintn (void)
17065 {
17066 do_vrint_1 (neon_cvt_mode_n);
17067 }
17068
17069 static void
17070 do_vrintp (void)
17071 {
17072 do_vrint_1 (neon_cvt_mode_p);
17073 }
17074
17075 static void
17076 do_vrintm (void)
17077 {
17078 do_vrint_1 (neon_cvt_mode_m);
17079 }
17080
17081 /* Crypto v1 instructions. */
17082 static void
17083 do_crypto_2op_1 (unsigned elttype, int op)
17084 {
17085 set_it_insn_type (OUTSIDE_IT_INSN);
17086
17087 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17088 == NT_invtype)
17089 return;
17090
17091 inst.error = NULL;
17092
17093 NEON_ENCODE (INTEGER, inst);
17094 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17095 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17096 inst.instruction |= LOW4 (inst.operands[1].reg);
17097 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17098 if (op != -1)
17099 inst.instruction |= op << 6;
17100
17101 if (thumb_mode)
17102 inst.instruction |= 0xfc000000;
17103 else
17104 inst.instruction |= 0xf0000000;
17105 }
17106
17107 static void
17108 do_crypto_3op_1 (int u, int op)
17109 {
17110 set_it_insn_type (OUTSIDE_IT_INSN);
17111
17112 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17113 N_32 | N_UNT | N_KEY).type == NT_invtype)
17114 return;
17115
17116 inst.error = NULL;
17117
17118 NEON_ENCODE (INTEGER, inst);
17119 neon_three_same (1, u, 8 << op);
17120 }
17121
17122 static void
17123 do_aese (void)
17124 {
17125 do_crypto_2op_1 (N_8, 0);
17126 }
17127
17128 static void
17129 do_aesd (void)
17130 {
17131 do_crypto_2op_1 (N_8, 1);
17132 }
17133
17134 static void
17135 do_aesmc (void)
17136 {
17137 do_crypto_2op_1 (N_8, 2);
17138 }
17139
17140 static void
17141 do_aesimc (void)
17142 {
17143 do_crypto_2op_1 (N_8, 3);
17144 }
17145
17146 static void
17147 do_sha1c (void)
17148 {
17149 do_crypto_3op_1 (0, 0);
17150 }
17151
17152 static void
17153 do_sha1p (void)
17154 {
17155 do_crypto_3op_1 (0, 1);
17156 }
17157
17158 static void
17159 do_sha1m (void)
17160 {
17161 do_crypto_3op_1 (0, 2);
17162 }
17163
17164 static void
17165 do_sha1su0 (void)
17166 {
17167 do_crypto_3op_1 (0, 3);
17168 }
17169
17170 static void
17171 do_sha256h (void)
17172 {
17173 do_crypto_3op_1 (1, 0);
17174 }
17175
17176 static void
17177 do_sha256h2 (void)
17178 {
17179 do_crypto_3op_1 (1, 1);
17180 }
17181
17182 static void
17183 do_sha256su1 (void)
17184 {
17185 do_crypto_3op_1 (1, 2);
17186 }
17187
17188 static void
17189 do_sha1h (void)
17190 {
17191 do_crypto_2op_1 (N_32, -1);
17192 }
17193
17194 static void
17195 do_sha1su1 (void)
17196 {
17197 do_crypto_2op_1 (N_32, 0);
17198 }
17199
17200 static void
17201 do_sha256su0 (void)
17202 {
17203 do_crypto_2op_1 (N_32, 1);
17204 }
17205
17206 static void
17207 do_crc32_1 (unsigned int poly, unsigned int sz)
17208 {
17209 unsigned int Rd = inst.operands[0].reg;
17210 unsigned int Rn = inst.operands[1].reg;
17211 unsigned int Rm = inst.operands[2].reg;
17212
17213 set_it_insn_type (OUTSIDE_IT_INSN);
17214 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17215 inst.instruction |= LOW4 (Rn) << 16;
17216 inst.instruction |= LOW4 (Rm);
17217 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17218 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17219
17220 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17221 as_warn (UNPRED_REG ("r15"));
17222 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17223 as_warn (UNPRED_REG ("r13"));
17224 }
17225
17226 static void
17227 do_crc32b (void)
17228 {
17229 do_crc32_1 (0, 0);
17230 }
17231
17232 static void
17233 do_crc32h (void)
17234 {
17235 do_crc32_1 (0, 1);
17236 }
17237
17238 static void
17239 do_crc32w (void)
17240 {
17241 do_crc32_1 (0, 2);
17242 }
17243
17244 static void
17245 do_crc32cb (void)
17246 {
17247 do_crc32_1 (1, 0);
17248 }
17249
17250 static void
17251 do_crc32ch (void)
17252 {
17253 do_crc32_1 (1, 1);
17254 }
17255
17256 static void
17257 do_crc32cw (void)
17258 {
17259 do_crc32_1 (1, 2);
17260 }
17261
17262 \f
17263 /* Overall per-instruction processing. */
17264
17265 /* We need to be able to fix up arbitrary expressions in some statements.
17266 This is so that we can handle symbols that are an arbitrary distance from
17267 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17268 which returns part of an address in a form which will be valid for
17269 a data instruction. We do this by pushing the expression into a symbol
17270 in the expr_section, and creating a fix for that. */
17271
17272 static void
17273 fix_new_arm (fragS * frag,
17274 int where,
17275 short int size,
17276 expressionS * exp,
17277 int pc_rel,
17278 int reloc)
17279 {
17280 fixS * new_fix;
17281
17282 switch (exp->X_op)
17283 {
17284 case O_constant:
17285 if (pc_rel)
17286 {
17287 /* Create an absolute valued symbol, so we have something to
17288 refer to in the object file. Unfortunately for us, gas's
17289 generic expression parsing will already have folded out
17290 any use of .set foo/.type foo %function that may have
17291 been used to set type information of the target location,
17292 that's being specified symbolically. We have to presume
17293 the user knows what they are doing. */
17294 char name[16 + 8];
17295 symbolS *symbol;
17296
17297 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17298
17299 symbol = symbol_find_or_make (name);
17300 S_SET_SEGMENT (symbol, absolute_section);
17301 symbol_set_frag (symbol, &zero_address_frag);
17302 S_SET_VALUE (symbol, exp->X_add_number);
17303 exp->X_op = O_symbol;
17304 exp->X_add_symbol = symbol;
17305 exp->X_add_number = 0;
17306 }
17307 /* FALLTHROUGH */
17308 case O_symbol:
17309 case O_add:
17310 case O_subtract:
17311 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17312 (enum bfd_reloc_code_real) reloc);
17313 break;
17314
17315 default:
17316 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17317 pc_rel, (enum bfd_reloc_code_real) reloc);
17318 break;
17319 }
17320
17321 /* Mark whether the fix is to a THUMB instruction, or an ARM
17322 instruction. */
17323 new_fix->tc_fix_data = thumb_mode;
17324 }
17325
17326 /* Create a frg for an instruction requiring relaxation. */
17327 static void
17328 output_relax_insn (void)
17329 {
17330 char * to;
17331 symbolS *sym;
17332 int offset;
17333
17334 /* The size of the instruction is unknown, so tie the debug info to the
17335 start of the instruction. */
17336 dwarf2_emit_insn (0);
17337
17338 switch (inst.reloc.exp.X_op)
17339 {
17340 case O_symbol:
17341 sym = inst.reloc.exp.X_add_symbol;
17342 offset = inst.reloc.exp.X_add_number;
17343 break;
17344 case O_constant:
17345 sym = NULL;
17346 offset = inst.reloc.exp.X_add_number;
17347 break;
17348 default:
17349 sym = make_expr_symbol (&inst.reloc.exp);
17350 offset = 0;
17351 break;
17352 }
17353 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17354 inst.relax, sym, offset, NULL/*offset, opcode*/);
17355 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17356 }
17357
17358 /* Write a 32-bit thumb instruction to buf. */
17359 static void
17360 put_thumb32_insn (char * buf, unsigned long insn)
17361 {
17362 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17363 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17364 }
17365
17366 static void
17367 output_inst (const char * str)
17368 {
17369 char * to = NULL;
17370
17371 if (inst.error)
17372 {
17373 as_bad ("%s -- `%s'", inst.error, str);
17374 return;
17375 }
17376 if (inst.relax)
17377 {
17378 output_relax_insn ();
17379 return;
17380 }
17381 if (inst.size == 0)
17382 return;
17383
17384 to = frag_more (inst.size);
17385 /* PR 9814: Record the thumb mode into the current frag so that we know
17386 what type of NOP padding to use, if necessary. We override any previous
17387 setting so that if the mode has changed then the NOPS that we use will
17388 match the encoding of the last instruction in the frag. */
17389 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17390
17391 if (thumb_mode && (inst.size > THUMB_SIZE))
17392 {
17393 gas_assert (inst.size == (2 * THUMB_SIZE));
17394 put_thumb32_insn (to, inst.instruction);
17395 }
17396 else if (inst.size > INSN_SIZE)
17397 {
17398 gas_assert (inst.size == (2 * INSN_SIZE));
17399 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17400 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17401 }
17402 else
17403 md_number_to_chars (to, inst.instruction, inst.size);
17404
17405 if (inst.reloc.type != BFD_RELOC_UNUSED)
17406 fix_new_arm (frag_now, to - frag_now->fr_literal,
17407 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17408 inst.reloc.type);
17409
17410 dwarf2_emit_insn (inst.size);
17411 }
17412
17413 static char *
17414 output_it_inst (int cond, int mask, char * to)
17415 {
17416 unsigned long instruction = 0xbf00;
17417
17418 mask &= 0xf;
17419 instruction |= mask;
17420 instruction |= cond << 4;
17421
17422 if (to == NULL)
17423 {
17424 to = frag_more (2);
17425 #ifdef OBJ_ELF
17426 dwarf2_emit_insn (2);
17427 #endif
17428 }
17429
17430 md_number_to_chars (to, instruction, 2);
17431
17432 return to;
17433 }
17434
17435 /* Tag values used in struct asm_opcode's tag field. */
17436 enum opcode_tag
17437 {
17438 OT_unconditional, /* Instruction cannot be conditionalized.
17439 The ARM condition field is still 0xE. */
17440 OT_unconditionalF, /* Instruction cannot be conditionalized
17441 and carries 0xF in its ARM condition field. */
17442 OT_csuffix, /* Instruction takes a conditional suffix. */
17443 OT_csuffixF, /* Some forms of the instruction take a conditional
17444 suffix, others place 0xF where the condition field
17445 would be. */
17446 OT_cinfix3, /* Instruction takes a conditional infix,
17447 beginning at character index 3. (In
17448 unified mode, it becomes a suffix.) */
17449 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17450 tsts, cmps, cmns, and teqs. */
17451 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17452 character index 3, even in unified mode. Used for
17453 legacy instructions where suffix and infix forms
17454 may be ambiguous. */
17455 OT_csuf_or_in3, /* Instruction takes either a conditional
17456 suffix or an infix at character index 3. */
17457 OT_odd_infix_unc, /* This is the unconditional variant of an
17458 instruction that takes a conditional infix
17459 at an unusual position. In unified mode,
17460 this variant will accept a suffix. */
17461 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17462 are the conditional variants of instructions that
17463 take conditional infixes in unusual positions.
17464 The infix appears at character index
17465 (tag - OT_odd_infix_0). These are not accepted
17466 in unified mode. */
17467 };
17468
17469 /* Subroutine of md_assemble, responsible for looking up the primary
17470 opcode from the mnemonic the user wrote. STR points to the
17471 beginning of the mnemonic.
17472
17473 This is not simply a hash table lookup, because of conditional
17474 variants. Most instructions have conditional variants, which are
17475 expressed with a _conditional affix_ to the mnemonic. If we were
17476 to encode each conditional variant as a literal string in the opcode
17477 table, it would have approximately 20,000 entries.
17478
17479 Most mnemonics take this affix as a suffix, and in unified syntax,
17480 'most' is upgraded to 'all'. However, in the divided syntax, some
17481 instructions take the affix as an infix, notably the s-variants of
17482 the arithmetic instructions. Of those instructions, all but six
17483 have the infix appear after the third character of the mnemonic.
17484
17485 Accordingly, the algorithm for looking up primary opcodes given
17486 an identifier is:
17487
17488 1. Look up the identifier in the opcode table.
17489 If we find a match, go to step U.
17490
17491 2. Look up the last two characters of the identifier in the
17492 conditions table. If we find a match, look up the first N-2
17493 characters of the identifier in the opcode table. If we
17494 find a match, go to step CE.
17495
17496 3. Look up the fourth and fifth characters of the identifier in
17497 the conditions table. If we find a match, extract those
17498 characters from the identifier, and look up the remaining
17499 characters in the opcode table. If we find a match, go
17500 to step CM.
17501
17502 4. Fail.
17503
17504 U. Examine the tag field of the opcode structure, in case this is
17505 one of the six instructions with its conditional infix in an
17506 unusual place. If it is, the tag tells us where to find the
17507 infix; look it up in the conditions table and set inst.cond
17508 accordingly. Otherwise, this is an unconditional instruction.
17509 Again set inst.cond accordingly. Return the opcode structure.
17510
17511 CE. Examine the tag field to make sure this is an instruction that
17512 should receive a conditional suffix. If it is not, fail.
17513 Otherwise, set inst.cond from the suffix we already looked up,
17514 and return the opcode structure.
17515
17516 CM. Examine the tag field to make sure this is an instruction that
17517 should receive a conditional infix after the third character.
17518 If it is not, fail. Otherwise, undo the edits to the current
17519 line of input and proceed as for case CE. */
17520
17521 static const struct asm_opcode *
17522 opcode_lookup (char **str)
17523 {
17524 char *end, *base;
17525 char *affix;
17526 const struct asm_opcode *opcode;
17527 const struct asm_cond *cond;
17528 char save[2];
17529
17530 /* Scan up to the end of the mnemonic, which must end in white space,
17531 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17532 for (base = end = *str; *end != '\0'; end++)
17533 if (*end == ' ' || *end == '.')
17534 break;
17535
17536 if (end == base)
17537 return NULL;
17538
17539 /* Handle a possible width suffix and/or Neon type suffix. */
17540 if (end[0] == '.')
17541 {
17542 int offset = 2;
17543
17544 /* The .w and .n suffixes are only valid if the unified syntax is in
17545 use. */
17546 if (unified_syntax && end[1] == 'w')
17547 inst.size_req = 4;
17548 else if (unified_syntax && end[1] == 'n')
17549 inst.size_req = 2;
17550 else
17551 offset = 0;
17552
17553 inst.vectype.elems = 0;
17554
17555 *str = end + offset;
17556
17557 if (end[offset] == '.')
17558 {
17559 /* See if we have a Neon type suffix (possible in either unified or
17560 non-unified ARM syntax mode). */
17561 if (parse_neon_type (&inst.vectype, str) == FAIL)
17562 return NULL;
17563 }
17564 else if (end[offset] != '\0' && end[offset] != ' ')
17565 return NULL;
17566 }
17567 else
17568 *str = end;
17569
17570 /* Look for unaffixed or special-case affixed mnemonic. */
17571 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17572 end - base);
17573 if (opcode)
17574 {
17575 /* step U */
17576 if (opcode->tag < OT_odd_infix_0)
17577 {
17578 inst.cond = COND_ALWAYS;
17579 return opcode;
17580 }
17581
17582 if (warn_on_deprecated && unified_syntax)
17583 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17584 affix = base + (opcode->tag - OT_odd_infix_0);
17585 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17586 gas_assert (cond);
17587
17588 inst.cond = cond->value;
17589 return opcode;
17590 }
17591
17592 /* Cannot have a conditional suffix on a mnemonic of less than two
17593 characters. */
17594 if (end - base < 3)
17595 return NULL;
17596
17597 /* Look for suffixed mnemonic. */
17598 affix = end - 2;
17599 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17600 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17601 affix - base);
17602 if (opcode && cond)
17603 {
17604 /* step CE */
17605 switch (opcode->tag)
17606 {
17607 case OT_cinfix3_legacy:
17608 /* Ignore conditional suffixes matched on infix only mnemonics. */
17609 break;
17610
17611 case OT_cinfix3:
17612 case OT_cinfix3_deprecated:
17613 case OT_odd_infix_unc:
17614 if (!unified_syntax)
17615 return 0;
17616 /* else fall through */
17617
17618 case OT_csuffix:
17619 case OT_csuffixF:
17620 case OT_csuf_or_in3:
17621 inst.cond = cond->value;
17622 return opcode;
17623
17624 case OT_unconditional:
17625 case OT_unconditionalF:
17626 if (thumb_mode)
17627 inst.cond = cond->value;
17628 else
17629 {
17630 /* Delayed diagnostic. */
17631 inst.error = BAD_COND;
17632 inst.cond = COND_ALWAYS;
17633 }
17634 return opcode;
17635
17636 default:
17637 return NULL;
17638 }
17639 }
17640
17641 /* Cannot have a usual-position infix on a mnemonic of less than
17642 six characters (five would be a suffix). */
17643 if (end - base < 6)
17644 return NULL;
17645
17646 /* Look for infixed mnemonic in the usual position. */
17647 affix = base + 3;
17648 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17649 if (!cond)
17650 return NULL;
17651
17652 memcpy (save, affix, 2);
17653 memmove (affix, affix + 2, (end - affix) - 2);
17654 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17655 (end - base) - 2);
17656 memmove (affix + 2, affix, (end - affix) - 2);
17657 memcpy (affix, save, 2);
17658
17659 if (opcode
17660 && (opcode->tag == OT_cinfix3
17661 || opcode->tag == OT_cinfix3_deprecated
17662 || opcode->tag == OT_csuf_or_in3
17663 || opcode->tag == OT_cinfix3_legacy))
17664 {
17665 /* Step CM. */
17666 if (warn_on_deprecated && unified_syntax
17667 && (opcode->tag == OT_cinfix3
17668 || opcode->tag == OT_cinfix3_deprecated))
17669 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17670
17671 inst.cond = cond->value;
17672 return opcode;
17673 }
17674
17675 return NULL;
17676 }
17677
17678 /* This function generates an initial IT instruction, leaving its block
17679 virtually open for the new instructions. Eventually,
17680 the mask will be updated by now_it_add_mask () each time
17681 a new instruction needs to be included in the IT block.
17682 Finally, the block is closed with close_automatic_it_block ().
17683 The block closure can be requested either from md_assemble (),
17684 a tencode (), or due to a label hook. */
17685
17686 static void
17687 new_automatic_it_block (int cond)
17688 {
17689 now_it.state = AUTOMATIC_IT_BLOCK;
17690 now_it.mask = 0x18;
17691 now_it.cc = cond;
17692 now_it.block_length = 1;
17693 mapping_state (MAP_THUMB);
17694 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17695 now_it.warn_deprecated = FALSE;
17696 now_it.insn_cond = TRUE;
17697 }
17698
17699 /* Close an automatic IT block.
17700 See comments in new_automatic_it_block (). */
17701
17702 static void
17703 close_automatic_it_block (void)
17704 {
17705 now_it.mask = 0x10;
17706 now_it.block_length = 0;
17707 }
17708
17709 /* Update the mask of the current automatically-generated IT
17710 instruction. See comments in new_automatic_it_block (). */
17711
17712 static void
17713 now_it_add_mask (int cond)
17714 {
17715 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17716 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17717 | ((bitvalue) << (nbit)))
17718 const int resulting_bit = (cond & 1);
17719
17720 now_it.mask &= 0xf;
17721 now_it.mask = SET_BIT_VALUE (now_it.mask,
17722 resulting_bit,
17723 (5 - now_it.block_length));
17724 now_it.mask = SET_BIT_VALUE (now_it.mask,
17725 1,
17726 ((5 - now_it.block_length) - 1) );
17727 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17728
17729 #undef CLEAR_BIT
17730 #undef SET_BIT_VALUE
17731 }
17732
17733 /* The IT blocks handling machinery is accessed through the these functions:
17734 it_fsm_pre_encode () from md_assemble ()
17735 set_it_insn_type () optional, from the tencode functions
17736 set_it_insn_type_last () ditto
17737 in_it_block () ditto
17738 it_fsm_post_encode () from md_assemble ()
17739 force_automatic_it_block_close () from label habdling functions
17740
17741 Rationale:
17742 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17743 initializing the IT insn type with a generic initial value depending
17744 on the inst.condition.
17745 2) During the tencode function, two things may happen:
17746 a) The tencode function overrides the IT insn type by
17747 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17748 b) The tencode function queries the IT block state by
17749 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17750
17751 Both set_it_insn_type and in_it_block run the internal FSM state
17752 handling function (handle_it_state), because: a) setting the IT insn
17753 type may incur in an invalid state (exiting the function),
17754 and b) querying the state requires the FSM to be updated.
17755 Specifically we want to avoid creating an IT block for conditional
17756 branches, so it_fsm_pre_encode is actually a guess and we can't
17757 determine whether an IT block is required until the tencode () routine
17758 has decided what type of instruction this actually it.
17759 Because of this, if set_it_insn_type and in_it_block have to be used,
17760 set_it_insn_type has to be called first.
17761
17762 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17763 determines the insn IT type depending on the inst.cond code.
17764 When a tencode () routine encodes an instruction that can be
17765 either outside an IT block, or, in the case of being inside, has to be
17766 the last one, set_it_insn_type_last () will determine the proper
17767 IT instruction type based on the inst.cond code. Otherwise,
17768 set_it_insn_type can be called for overriding that logic or
17769 for covering other cases.
17770
17771 Calling handle_it_state () may not transition the IT block state to
17772 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17773 still queried. Instead, if the FSM determines that the state should
17774 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17775 after the tencode () function: that's what it_fsm_post_encode () does.
17776
17777 Since in_it_block () calls the state handling function to get an
17778 updated state, an error may occur (due to invalid insns combination).
17779 In that case, inst.error is set.
17780 Therefore, inst.error has to be checked after the execution of
17781 the tencode () routine.
17782
17783 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17784 any pending state change (if any) that didn't take place in
17785 handle_it_state () as explained above. */
17786
17787 static void
17788 it_fsm_pre_encode (void)
17789 {
17790 if (inst.cond != COND_ALWAYS)
17791 inst.it_insn_type = INSIDE_IT_INSN;
17792 else
17793 inst.it_insn_type = OUTSIDE_IT_INSN;
17794
17795 now_it.state_handled = 0;
17796 }
17797
17798 /* IT state FSM handling function. */
17799
17800 static int
17801 handle_it_state (void)
17802 {
17803 now_it.state_handled = 1;
17804 now_it.insn_cond = FALSE;
17805
17806 switch (now_it.state)
17807 {
17808 case OUTSIDE_IT_BLOCK:
17809 switch (inst.it_insn_type)
17810 {
17811 case OUTSIDE_IT_INSN:
17812 break;
17813
17814 case INSIDE_IT_INSN:
17815 case INSIDE_IT_LAST_INSN:
17816 if (thumb_mode == 0)
17817 {
17818 if (unified_syntax
17819 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17820 as_tsktsk (_("Warning: conditional outside an IT block"\
17821 " for Thumb."));
17822 }
17823 else
17824 {
17825 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17826 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17827 {
17828 /* Automatically generate the IT instruction. */
17829 new_automatic_it_block (inst.cond);
17830 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17831 close_automatic_it_block ();
17832 }
17833 else
17834 {
17835 inst.error = BAD_OUT_IT;
17836 return FAIL;
17837 }
17838 }
17839 break;
17840
17841 case IF_INSIDE_IT_LAST_INSN:
17842 case NEUTRAL_IT_INSN:
17843 break;
17844
17845 case IT_INSN:
17846 now_it.state = MANUAL_IT_BLOCK;
17847 now_it.block_length = 0;
17848 break;
17849 }
17850 break;
17851
17852 case AUTOMATIC_IT_BLOCK:
17853 /* Three things may happen now:
17854 a) We should increment current it block size;
17855 b) We should close current it block (closing insn or 4 insns);
17856 c) We should close current it block and start a new one (due
17857 to incompatible conditions or
17858 4 insns-length block reached). */
17859
17860 switch (inst.it_insn_type)
17861 {
17862 case OUTSIDE_IT_INSN:
17863 /* The closure of the block shall happen immediatelly,
17864 so any in_it_block () call reports the block as closed. */
17865 force_automatic_it_block_close ();
17866 break;
17867
17868 case INSIDE_IT_INSN:
17869 case INSIDE_IT_LAST_INSN:
17870 case IF_INSIDE_IT_LAST_INSN:
17871 now_it.block_length++;
17872
17873 if (now_it.block_length > 4
17874 || !now_it_compatible (inst.cond))
17875 {
17876 force_automatic_it_block_close ();
17877 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17878 new_automatic_it_block (inst.cond);
17879 }
17880 else
17881 {
17882 now_it.insn_cond = TRUE;
17883 now_it_add_mask (inst.cond);
17884 }
17885
17886 if (now_it.state == AUTOMATIC_IT_BLOCK
17887 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17888 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17889 close_automatic_it_block ();
17890 break;
17891
17892 case NEUTRAL_IT_INSN:
17893 now_it.block_length++;
17894 now_it.insn_cond = TRUE;
17895
17896 if (now_it.block_length > 4)
17897 force_automatic_it_block_close ();
17898 else
17899 now_it_add_mask (now_it.cc & 1);
17900 break;
17901
17902 case IT_INSN:
17903 close_automatic_it_block ();
17904 now_it.state = MANUAL_IT_BLOCK;
17905 break;
17906 }
17907 break;
17908
17909 case MANUAL_IT_BLOCK:
17910 {
17911 /* Check conditional suffixes. */
17912 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17913 int is_last;
17914 now_it.mask <<= 1;
17915 now_it.mask &= 0x1f;
17916 is_last = (now_it.mask == 0x10);
17917 now_it.insn_cond = TRUE;
17918
17919 switch (inst.it_insn_type)
17920 {
17921 case OUTSIDE_IT_INSN:
17922 inst.error = BAD_NOT_IT;
17923 return FAIL;
17924
17925 case INSIDE_IT_INSN:
17926 if (cond != inst.cond)
17927 {
17928 inst.error = BAD_IT_COND;
17929 return FAIL;
17930 }
17931 break;
17932
17933 case INSIDE_IT_LAST_INSN:
17934 case IF_INSIDE_IT_LAST_INSN:
17935 if (cond != inst.cond)
17936 {
17937 inst.error = BAD_IT_COND;
17938 return FAIL;
17939 }
17940 if (!is_last)
17941 {
17942 inst.error = BAD_BRANCH;
17943 return FAIL;
17944 }
17945 break;
17946
17947 case NEUTRAL_IT_INSN:
17948 /* The BKPT instruction is unconditional even in an IT block. */
17949 break;
17950
17951 case IT_INSN:
17952 inst.error = BAD_IT_IT;
17953 return FAIL;
17954 }
17955 }
17956 break;
17957 }
17958
17959 return SUCCESS;
17960 }
17961
17962 struct depr_insn_mask
17963 {
17964 unsigned long pattern;
17965 unsigned long mask;
17966 const char* description;
17967 };
17968
17969 /* List of 16-bit instruction patterns deprecated in an IT block in
17970 ARMv8. */
17971 static const struct depr_insn_mask depr_it_insns[] = {
17972 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17973 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17974 { 0xa000, 0xb800, N_("ADR") },
17975 { 0x4800, 0xf800, N_("Literal loads") },
17976 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17977 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17978 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17979 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17980 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17981 { 0, 0, NULL }
17982 };
17983
17984 static void
17985 it_fsm_post_encode (void)
17986 {
17987 int is_last;
17988
17989 if (!now_it.state_handled)
17990 handle_it_state ();
17991
17992 if (now_it.insn_cond
17993 && !now_it.warn_deprecated
17994 && warn_on_deprecated
17995 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17996 {
17997 if (inst.instruction >= 0x10000)
17998 {
17999 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18000 "deprecated in ARMv8"));
18001 now_it.warn_deprecated = TRUE;
18002 }
18003 else
18004 {
18005 const struct depr_insn_mask *p = depr_it_insns;
18006
18007 while (p->mask != 0)
18008 {
18009 if ((inst.instruction & p->mask) == p->pattern)
18010 {
18011 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18012 "of the following class are deprecated in ARMv8: "
18013 "%s"), p->description);
18014 now_it.warn_deprecated = TRUE;
18015 break;
18016 }
18017
18018 ++p;
18019 }
18020 }
18021
18022 if (now_it.block_length > 1)
18023 {
18024 as_tsktsk (_("IT blocks containing more than one conditional "
18025 "instruction are deprecated in ARMv8"));
18026 now_it.warn_deprecated = TRUE;
18027 }
18028 }
18029
18030 is_last = (now_it.mask == 0x10);
18031 if (is_last)
18032 {
18033 now_it.state = OUTSIDE_IT_BLOCK;
18034 now_it.mask = 0;
18035 }
18036 }
18037
18038 static void
18039 force_automatic_it_block_close (void)
18040 {
18041 if (now_it.state == AUTOMATIC_IT_BLOCK)
18042 {
18043 close_automatic_it_block ();
18044 now_it.state = OUTSIDE_IT_BLOCK;
18045 now_it.mask = 0;
18046 }
18047 }
18048
18049 static int
18050 in_it_block (void)
18051 {
18052 if (!now_it.state_handled)
18053 handle_it_state ();
18054
18055 return now_it.state != OUTSIDE_IT_BLOCK;
18056 }
18057
18058 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18059 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18060 here, hence the "known" in the function name. */
18061
18062 static bfd_boolean
18063 known_t32_only_insn (const struct asm_opcode *opcode)
18064 {
18065 /* Original Thumb-1 wide instruction. */
18066 if (opcode->tencode == do_t_blx
18067 || opcode->tencode == do_t_branch23
18068 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18069 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18070 return TRUE;
18071
18072 /* Wide-only instruction added to ARMv8-M. */
18073 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m)
18074 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18075 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18076 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18077 return TRUE;
18078
18079 return FALSE;
18080 }
18081
18082 /* Whether wide instruction variant can be used if available for a valid OPCODE
18083 in ARCH. */
18084
18085 static bfd_boolean
18086 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18087 {
18088 if (known_t32_only_insn (opcode))
18089 return TRUE;
18090
18091 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18092 of variant T3 of B.W is checked in do_t_branch. */
18093 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18094 && opcode->tencode == do_t_branch)
18095 return TRUE;
18096
18097 /* Wide instruction variants of all instructions with narrow *and* wide
18098 variants become available with ARMv6t2. Other opcodes are either
18099 narrow-only or wide-only and are thus available if OPCODE is valid. */
18100 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18101 return TRUE;
18102
18103 /* OPCODE with narrow only instruction variant or wide variant not
18104 available. */
18105 return FALSE;
18106 }
18107
18108 void
18109 md_assemble (char *str)
18110 {
18111 char *p = str;
18112 const struct asm_opcode * opcode;
18113
18114 /* Align the previous label if needed. */
18115 if (last_label_seen != NULL)
18116 {
18117 symbol_set_frag (last_label_seen, frag_now);
18118 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18119 S_SET_SEGMENT (last_label_seen, now_seg);
18120 }
18121
18122 memset (&inst, '\0', sizeof (inst));
18123 inst.reloc.type = BFD_RELOC_UNUSED;
18124
18125 opcode = opcode_lookup (&p);
18126 if (!opcode)
18127 {
18128 /* It wasn't an instruction, but it might be a register alias of
18129 the form alias .req reg, or a Neon .dn/.qn directive. */
18130 if (! create_register_alias (str, p)
18131 && ! create_neon_reg_alias (str, p))
18132 as_bad (_("bad instruction `%s'"), str);
18133
18134 return;
18135 }
18136
18137 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18138 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18139
18140 /* The value which unconditional instructions should have in place of the
18141 condition field. */
18142 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18143
18144 if (thumb_mode)
18145 {
18146 arm_feature_set variant;
18147
18148 variant = cpu_variant;
18149 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18150 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18151 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18152 /* Check that this instruction is supported for this CPU. */
18153 if (!opcode->tvariant
18154 || (thumb_mode == 1
18155 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18156 {
18157 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18158 return;
18159 }
18160 if (inst.cond != COND_ALWAYS && !unified_syntax
18161 && opcode->tencode != do_t_branch)
18162 {
18163 as_bad (_("Thumb does not support conditional execution"));
18164 return;
18165 }
18166
18167 /* Two things are addressed here:
18168 1) Implicit require narrow instructions on Thumb-1.
18169 This avoids relaxation accidentally introducing Thumb-2
18170 instructions.
18171 2) Reject wide instructions in non Thumb-2 cores.
18172
18173 Only instructions with narrow and wide variants need to be handled
18174 but selecting all non wide-only instructions is easier. */
18175 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18176 && !t32_insn_ok (variant, opcode))
18177 {
18178 if (inst.size_req == 0)
18179 inst.size_req = 2;
18180 else if (inst.size_req == 4)
18181 {
18182 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18183 as_bad (_("selected processor does not support 32bit wide "
18184 "variant of instruction `%s'"), str);
18185 else
18186 as_bad (_("selected processor does not support `%s' in "
18187 "Thumb-2 mode"), str);
18188 return;
18189 }
18190 }
18191
18192 inst.instruction = opcode->tvalue;
18193
18194 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18195 {
18196 /* Prepare the it_insn_type for those encodings that don't set
18197 it. */
18198 it_fsm_pre_encode ();
18199
18200 opcode->tencode ();
18201
18202 it_fsm_post_encode ();
18203 }
18204
18205 if (!(inst.error || inst.relax))
18206 {
18207 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18208 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18209 if (inst.size_req && inst.size_req != inst.size)
18210 {
18211 as_bad (_("cannot honor width suffix -- `%s'"), str);
18212 return;
18213 }
18214 }
18215
18216 /* Something has gone badly wrong if we try to relax a fixed size
18217 instruction. */
18218 gas_assert (inst.size_req == 0 || !inst.relax);
18219
18220 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18221 *opcode->tvariant);
18222 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18223 set those bits when Thumb-2 32-bit instructions are seen. The impact
18224 of relaxable instructions will be considered later after we finish all
18225 relaxation. */
18226 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18227 variant = arm_arch_none;
18228 else
18229 variant = cpu_variant;
18230 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18231 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18232 arm_ext_v6t2);
18233
18234 check_neon_suffixes;
18235
18236 if (!inst.error)
18237 {
18238 mapping_state (MAP_THUMB);
18239 }
18240 }
18241 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18242 {
18243 bfd_boolean is_bx;
18244
18245 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18246 is_bx = (opcode->aencode == do_bx);
18247
18248 /* Check that this instruction is supported for this CPU. */
18249 if (!(is_bx && fix_v4bx)
18250 && !(opcode->avariant &&
18251 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18252 {
18253 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18254 return;
18255 }
18256 if (inst.size_req)
18257 {
18258 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18259 return;
18260 }
18261
18262 inst.instruction = opcode->avalue;
18263 if (opcode->tag == OT_unconditionalF)
18264 inst.instruction |= 0xFU << 28;
18265 else
18266 inst.instruction |= inst.cond << 28;
18267 inst.size = INSN_SIZE;
18268 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18269 {
18270 it_fsm_pre_encode ();
18271 opcode->aencode ();
18272 it_fsm_post_encode ();
18273 }
18274 /* Arm mode bx is marked as both v4T and v5 because it's still required
18275 on a hypothetical non-thumb v5 core. */
18276 if (is_bx)
18277 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18278 else
18279 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18280 *opcode->avariant);
18281
18282 check_neon_suffixes;
18283
18284 if (!inst.error)
18285 {
18286 mapping_state (MAP_ARM);
18287 }
18288 }
18289 else
18290 {
18291 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18292 "-- `%s'"), str);
18293 return;
18294 }
18295 output_inst (str);
18296 }
18297
18298 static void
18299 check_it_blocks_finished (void)
18300 {
18301 #ifdef OBJ_ELF
18302 asection *sect;
18303
18304 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18305 if (seg_info (sect)->tc_segment_info_data.current_it.state
18306 == MANUAL_IT_BLOCK)
18307 {
18308 as_warn (_("section '%s' finished with an open IT block."),
18309 sect->name);
18310 }
18311 #else
18312 if (now_it.state == MANUAL_IT_BLOCK)
18313 as_warn (_("file finished with an open IT block."));
18314 #endif
18315 }
18316
18317 /* Various frobbings of labels and their addresses. */
18318
18319 void
18320 arm_start_line_hook (void)
18321 {
18322 last_label_seen = NULL;
18323 }
18324
18325 void
18326 arm_frob_label (symbolS * sym)
18327 {
18328 last_label_seen = sym;
18329
18330 ARM_SET_THUMB (sym, thumb_mode);
18331
18332 #if defined OBJ_COFF || defined OBJ_ELF
18333 ARM_SET_INTERWORK (sym, support_interwork);
18334 #endif
18335
18336 force_automatic_it_block_close ();
18337
18338 /* Note - do not allow local symbols (.Lxxx) to be labelled
18339 as Thumb functions. This is because these labels, whilst
18340 they exist inside Thumb code, are not the entry points for
18341 possible ARM->Thumb calls. Also, these labels can be used
18342 as part of a computed goto or switch statement. eg gcc
18343 can generate code that looks like this:
18344
18345 ldr r2, [pc, .Laaa]
18346 lsl r3, r3, #2
18347 ldr r2, [r3, r2]
18348 mov pc, r2
18349
18350 .Lbbb: .word .Lxxx
18351 .Lccc: .word .Lyyy
18352 ..etc...
18353 .Laaa: .word Lbbb
18354
18355 The first instruction loads the address of the jump table.
18356 The second instruction converts a table index into a byte offset.
18357 The third instruction gets the jump address out of the table.
18358 The fourth instruction performs the jump.
18359
18360 If the address stored at .Laaa is that of a symbol which has the
18361 Thumb_Func bit set, then the linker will arrange for this address
18362 to have the bottom bit set, which in turn would mean that the
18363 address computation performed by the third instruction would end
18364 up with the bottom bit set. Since the ARM is capable of unaligned
18365 word loads, the instruction would then load the incorrect address
18366 out of the jump table, and chaos would ensue. */
18367 if (label_is_thumb_function_name
18368 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18369 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18370 {
18371 /* When the address of a Thumb function is taken the bottom
18372 bit of that address should be set. This will allow
18373 interworking between Arm and Thumb functions to work
18374 correctly. */
18375
18376 THUMB_SET_FUNC (sym, 1);
18377
18378 label_is_thumb_function_name = FALSE;
18379 }
18380
18381 dwarf2_emit_label (sym);
18382 }
18383
18384 bfd_boolean
18385 arm_data_in_code (void)
18386 {
18387 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18388 {
18389 *input_line_pointer = '/';
18390 input_line_pointer += 5;
18391 *input_line_pointer = 0;
18392 return TRUE;
18393 }
18394
18395 return FALSE;
18396 }
18397
18398 char *
18399 arm_canonicalize_symbol_name (char * name)
18400 {
18401 int len;
18402
18403 if (thumb_mode && (len = strlen (name)) > 5
18404 && streq (name + len - 5, "/data"))
18405 *(name + len - 5) = 0;
18406
18407 return name;
18408 }
18409 \f
18410 /* Table of all register names defined by default. The user can
18411 define additional names with .req. Note that all register names
18412 should appear in both upper and lowercase variants. Some registers
18413 also have mixed-case names. */
18414
18415 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18416 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18417 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18418 #define REGSET(p,t) \
18419 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18420 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18421 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18422 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18423 #define REGSETH(p,t) \
18424 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18425 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18426 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18427 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18428 #define REGSET2(p,t) \
18429 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18430 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18431 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18432 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18433 #define SPLRBANK(base,bank,t) \
18434 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18435 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18436 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18437 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18438 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18439 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18440
18441 static const struct reg_entry reg_names[] =
18442 {
18443 /* ARM integer registers. */
18444 REGSET(r, RN), REGSET(R, RN),
18445
18446 /* ATPCS synonyms. */
18447 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18448 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18449 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18450
18451 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18452 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18453 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18454
18455 /* Well-known aliases. */
18456 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18457 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18458
18459 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18460 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18461
18462 /* Coprocessor numbers. */
18463 REGSET(p, CP), REGSET(P, CP),
18464
18465 /* Coprocessor register numbers. The "cr" variants are for backward
18466 compatibility. */
18467 REGSET(c, CN), REGSET(C, CN),
18468 REGSET(cr, CN), REGSET(CR, CN),
18469
18470 /* ARM banked registers. */
18471 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18472 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18473 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18474 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18475 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18476 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18477 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18478
18479 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18480 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18481 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18482 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18483 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18484 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18485 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18486 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18487
18488 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18489 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18490 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18491 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18492 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18493 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18494 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18495 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18496 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18497
18498 /* FPA registers. */
18499 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18500 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18501
18502 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18503 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18504
18505 /* VFP SP registers. */
18506 REGSET(s,VFS), REGSET(S,VFS),
18507 REGSETH(s,VFS), REGSETH(S,VFS),
18508
18509 /* VFP DP Registers. */
18510 REGSET(d,VFD), REGSET(D,VFD),
18511 /* Extra Neon DP registers. */
18512 REGSETH(d,VFD), REGSETH(D,VFD),
18513
18514 /* Neon QP registers. */
18515 REGSET2(q,NQ), REGSET2(Q,NQ),
18516
18517 /* VFP control registers. */
18518 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18519 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18520 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18521 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18522 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18523 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18524
18525 /* Maverick DSP coprocessor registers. */
18526 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18527 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18528
18529 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18530 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18531 REGDEF(dspsc,0,DSPSC),
18532
18533 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18534 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18535 REGDEF(DSPSC,0,DSPSC),
18536
18537 /* iWMMXt data registers - p0, c0-15. */
18538 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18539
18540 /* iWMMXt control registers - p1, c0-3. */
18541 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18542 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18543 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18544 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18545
18546 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18547 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18548 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18549 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18550 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18551
18552 /* XScale accumulator registers. */
18553 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18554 };
18555 #undef REGDEF
18556 #undef REGNUM
18557 #undef REGSET
18558
18559 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18560 within psr_required_here. */
18561 static const struct asm_psr psrs[] =
18562 {
18563 /* Backward compatibility notation. Note that "all" is no longer
18564 truly all possible PSR bits. */
18565 {"all", PSR_c | PSR_f},
18566 {"flg", PSR_f},
18567 {"ctl", PSR_c},
18568
18569 /* Individual flags. */
18570 {"f", PSR_f},
18571 {"c", PSR_c},
18572 {"x", PSR_x},
18573 {"s", PSR_s},
18574
18575 /* Combinations of flags. */
18576 {"fs", PSR_f | PSR_s},
18577 {"fx", PSR_f | PSR_x},
18578 {"fc", PSR_f | PSR_c},
18579 {"sf", PSR_s | PSR_f},
18580 {"sx", PSR_s | PSR_x},
18581 {"sc", PSR_s | PSR_c},
18582 {"xf", PSR_x | PSR_f},
18583 {"xs", PSR_x | PSR_s},
18584 {"xc", PSR_x | PSR_c},
18585 {"cf", PSR_c | PSR_f},
18586 {"cs", PSR_c | PSR_s},
18587 {"cx", PSR_c | PSR_x},
18588 {"fsx", PSR_f | PSR_s | PSR_x},
18589 {"fsc", PSR_f | PSR_s | PSR_c},
18590 {"fxs", PSR_f | PSR_x | PSR_s},
18591 {"fxc", PSR_f | PSR_x | PSR_c},
18592 {"fcs", PSR_f | PSR_c | PSR_s},
18593 {"fcx", PSR_f | PSR_c | PSR_x},
18594 {"sfx", PSR_s | PSR_f | PSR_x},
18595 {"sfc", PSR_s | PSR_f | PSR_c},
18596 {"sxf", PSR_s | PSR_x | PSR_f},
18597 {"sxc", PSR_s | PSR_x | PSR_c},
18598 {"scf", PSR_s | PSR_c | PSR_f},
18599 {"scx", PSR_s | PSR_c | PSR_x},
18600 {"xfs", PSR_x | PSR_f | PSR_s},
18601 {"xfc", PSR_x | PSR_f | PSR_c},
18602 {"xsf", PSR_x | PSR_s | PSR_f},
18603 {"xsc", PSR_x | PSR_s | PSR_c},
18604 {"xcf", PSR_x | PSR_c | PSR_f},
18605 {"xcs", PSR_x | PSR_c | PSR_s},
18606 {"cfs", PSR_c | PSR_f | PSR_s},
18607 {"cfx", PSR_c | PSR_f | PSR_x},
18608 {"csf", PSR_c | PSR_s | PSR_f},
18609 {"csx", PSR_c | PSR_s | PSR_x},
18610 {"cxf", PSR_c | PSR_x | PSR_f},
18611 {"cxs", PSR_c | PSR_x | PSR_s},
18612 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18613 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18614 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18615 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18616 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18617 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18618 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18619 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18620 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18621 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18622 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18623 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18624 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18625 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18626 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18627 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18628 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18629 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18630 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18631 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18632 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18633 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18634 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18635 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18636 };
18637
18638 /* Table of V7M psr names. */
18639 static const struct asm_psr v7m_psrs[] =
18640 {
18641 {"apsr", 0 }, {"APSR", 0 },
18642 {"iapsr", 1 }, {"IAPSR", 1 },
18643 {"eapsr", 2 }, {"EAPSR", 2 },
18644 {"psr", 3 }, {"PSR", 3 },
18645 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18646 {"ipsr", 5 }, {"IPSR", 5 },
18647 {"epsr", 6 }, {"EPSR", 6 },
18648 {"iepsr", 7 }, {"IEPSR", 7 },
18649 {"msp", 8 }, {"MSP", 8 },
18650 {"psp", 9 }, {"PSP", 9 },
18651 {"primask", 16}, {"PRIMASK", 16},
18652 {"basepri", 17}, {"BASEPRI", 17},
18653 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18654 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18655 {"faultmask", 19}, {"FAULTMASK", 19},
18656 {"control", 20}, {"CONTROL", 20}
18657 };
18658
18659 /* Table of all shift-in-operand names. */
18660 static const struct asm_shift_name shift_names [] =
18661 {
18662 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18663 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18664 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18665 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18666 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18667 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18668 };
18669
18670 /* Table of all explicit relocation names. */
18671 #ifdef OBJ_ELF
18672 static struct reloc_entry reloc_names[] =
18673 {
18674 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18675 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18676 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18677 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18678 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18679 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18680 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18681 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18682 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18683 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18684 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18685 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18686 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18687 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18688 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18689 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18690 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18691 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18692 };
18693 #endif
18694
18695 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18696 static const struct asm_cond conds[] =
18697 {
18698 {"eq", 0x0},
18699 {"ne", 0x1},
18700 {"cs", 0x2}, {"hs", 0x2},
18701 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18702 {"mi", 0x4},
18703 {"pl", 0x5},
18704 {"vs", 0x6},
18705 {"vc", 0x7},
18706 {"hi", 0x8},
18707 {"ls", 0x9},
18708 {"ge", 0xa},
18709 {"lt", 0xb},
18710 {"gt", 0xc},
18711 {"le", 0xd},
18712 {"al", 0xe}
18713 };
18714
18715 #define UL_BARRIER(L,U,CODE,FEAT) \
18716 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18717 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18718
18719 static struct asm_barrier_opt barrier_opt_names[] =
18720 {
18721 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18722 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18723 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18724 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18725 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18726 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18727 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18728 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18729 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18730 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18731 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18732 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18733 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18734 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18735 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18736 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18737 };
18738
18739 #undef UL_BARRIER
18740
18741 /* Table of ARM-format instructions. */
18742
18743 /* Macros for gluing together operand strings. N.B. In all cases
18744 other than OPS0, the trailing OP_stop comes from default
18745 zero-initialization of the unspecified elements of the array. */
18746 #define OPS0() { OP_stop, }
18747 #define OPS1(a) { OP_##a, }
18748 #define OPS2(a,b) { OP_##a,OP_##b, }
18749 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18750 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18751 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18752 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18753
18754 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18755 This is useful when mixing operands for ARM and THUMB, i.e. using the
18756 MIX_ARM_THUMB_OPERANDS macro.
18757 In order to use these macros, prefix the number of operands with _
18758 e.g. _3. */
18759 #define OPS_1(a) { a, }
18760 #define OPS_2(a,b) { a,b, }
18761 #define OPS_3(a,b,c) { a,b,c, }
18762 #define OPS_4(a,b,c,d) { a,b,c,d, }
18763 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18764 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18765
18766 /* These macros abstract out the exact format of the mnemonic table and
18767 save some repeated characters. */
18768
18769 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18770 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18771 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18772 THUMB_VARIANT, do_##ae, do_##te }
18773
18774 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18775 a T_MNEM_xyz enumerator. */
18776 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18777 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18778 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18779 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18780
18781 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18782 infix after the third character. */
18783 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18784 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18785 THUMB_VARIANT, do_##ae, do_##te }
18786 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18787 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18788 THUMB_VARIANT, do_##ae, do_##te }
18789 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18790 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18791 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18792 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18793 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18794 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18795 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18796 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18797
18798 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18799 field is still 0xE. Many of the Thumb variants can be executed
18800 conditionally, so this is checked separately. */
18801 #define TUE(mnem, op, top, nops, ops, ae, te) \
18802 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18803 THUMB_VARIANT, do_##ae, do_##te }
18804
18805 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18806 Used by mnemonics that have very minimal differences in the encoding for
18807 ARM and Thumb variants and can be handled in a common function. */
18808 #define TUEc(mnem, op, top, nops, ops, en) \
18809 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18810 THUMB_VARIANT, do_##en, do_##en }
18811
18812 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18813 condition code field. */
18814 #define TUF(mnem, op, top, nops, ops, ae, te) \
18815 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18816 THUMB_VARIANT, do_##ae, do_##te }
18817
18818 /* ARM-only variants of all the above. */
18819 #define CE(mnem, op, nops, ops, ae) \
18820 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18821
18822 #define C3(mnem, op, nops, ops, ae) \
18823 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18824
18825 /* Legacy mnemonics that always have conditional infix after the third
18826 character. */
18827 #define CL(mnem, op, nops, ops, ae) \
18828 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18829 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18830
18831 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18832 #define cCE(mnem, op, nops, ops, ae) \
18833 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18834
18835 /* Legacy coprocessor instructions where conditional infix and conditional
18836 suffix are ambiguous. For consistency this includes all FPA instructions,
18837 not just the potentially ambiguous ones. */
18838 #define cCL(mnem, op, nops, ops, ae) \
18839 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18840 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18841
18842 /* Coprocessor, takes either a suffix or a position-3 infix
18843 (for an FPA corner case). */
18844 #define C3E(mnem, op, nops, ops, ae) \
18845 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18846 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18847
18848 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18849 { m1 #m2 m3, OPS##nops ops, \
18850 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18851 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18852
18853 #define CM(m1, m2, op, nops, ops, ae) \
18854 xCM_ (m1, , m2, op, nops, ops, ae), \
18855 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18856 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18857 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18858 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18859 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18860 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18861 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18862 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18863 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18864 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18865 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18866 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18867 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18868 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18869 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18870 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18871 xCM_ (m1, le, m2, op, nops, ops, ae), \
18872 xCM_ (m1, al, m2, op, nops, ops, ae)
18873
18874 #define UE(mnem, op, nops, ops, ae) \
18875 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18876
18877 #define UF(mnem, op, nops, ops, ae) \
18878 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18879
18880 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18881 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18882 use the same encoding function for each. */
18883 #define NUF(mnem, op, nops, ops, enc) \
18884 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18885 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18886
18887 /* Neon data processing, version which indirects through neon_enc_tab for
18888 the various overloaded versions of opcodes. */
18889 #define nUF(mnem, op, nops, ops, enc) \
18890 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18891 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18892
18893 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18894 version. */
18895 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18896 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18897 THUMB_VARIANT, do_##enc, do_##enc }
18898
18899 #define NCE(mnem, op, nops, ops, enc) \
18900 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18901
18902 #define NCEF(mnem, op, nops, ops, enc) \
18903 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18904
18905 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18906 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18907 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18908 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18909
18910 #define nCE(mnem, op, nops, ops, enc) \
18911 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18912
18913 #define nCEF(mnem, op, nops, ops, enc) \
18914 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18915
18916 #define do_0 0
18917
18918 static const struct asm_opcode insns[] =
18919 {
18920 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18921 #define THUMB_VARIANT & arm_ext_v4t
18922 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18923 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18924 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18925 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18926 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18927 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18928 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18929 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18930 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18931 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18932 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18933 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18934 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18935 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18936 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18937 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18938
18939 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18940 for setting PSR flag bits. They are obsolete in V6 and do not
18941 have Thumb equivalents. */
18942 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18943 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18944 CL("tstp", 110f000, 2, (RR, SH), cmp),
18945 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18946 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18947 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18948 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18949 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18950 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18951
18952 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18953 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
18954 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18955 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18956
18957 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18958 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18959 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18960 OP_RRnpc),
18961 OP_ADDRGLDR),ldst, t_ldst),
18962 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18963
18964 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18965 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18966 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18967 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18968 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18969 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18970
18971 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18972 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18973 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18974 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18975
18976 /* Pseudo ops. */
18977 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18978 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18979 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18980 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18981
18982 /* Thumb-compatibility pseudo ops. */
18983 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18984 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18985 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18986 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18987 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18988 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18989 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18990 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18991 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18992 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18993 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18994 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18995
18996 /* These may simplify to neg. */
18997 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18998 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18999
19000 #undef THUMB_VARIANT
19001 #define THUMB_VARIANT & arm_ext_v6
19002
19003 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19004
19005 /* V1 instructions with no Thumb analogue prior to V6T2. */
19006 #undef THUMB_VARIANT
19007 #define THUMB_VARIANT & arm_ext_v6t2
19008
19009 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19010 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19011 CL("teqp", 130f000, 2, (RR, SH), cmp),
19012
19013 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19014 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19015 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19016 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19017
19018 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19019 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19020
19021 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19022 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19023
19024 /* V1 instructions with no Thumb analogue at all. */
19025 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19026 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19027
19028 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19029 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19030 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19031 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19032 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19033 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19034 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19035 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19036
19037 #undef ARM_VARIANT
19038 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19039 #undef THUMB_VARIANT
19040 #define THUMB_VARIANT & arm_ext_v4t
19041
19042 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19043 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19044
19045 #undef THUMB_VARIANT
19046 #define THUMB_VARIANT & arm_ext_v6t2
19047
19048 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19049 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19050
19051 /* Generic coprocessor instructions. */
19052 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19053 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19054 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19055 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19056 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19057 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19058 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19059
19060 #undef ARM_VARIANT
19061 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19062
19063 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19064 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19065
19066 #undef ARM_VARIANT
19067 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19068 #undef THUMB_VARIANT
19069 #define THUMB_VARIANT & arm_ext_msr
19070
19071 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19072 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19073
19074 #undef ARM_VARIANT
19075 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19076 #undef THUMB_VARIANT
19077 #define THUMB_VARIANT & arm_ext_v6t2
19078
19079 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19080 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19081 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19082 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19083 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19084 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19085 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19086 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19087
19088 #undef ARM_VARIANT
19089 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19090 #undef THUMB_VARIANT
19091 #define THUMB_VARIANT & arm_ext_v4t
19092
19093 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19094 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19095 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19096 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19097 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19098 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19099
19100 #undef ARM_VARIANT
19101 #define ARM_VARIANT & arm_ext_v4t_5
19102
19103 /* ARM Architecture 4T. */
19104 /* Note: bx (and blx) are required on V5, even if the processor does
19105 not support Thumb. */
19106 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19107
19108 #undef ARM_VARIANT
19109 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19110 #undef THUMB_VARIANT
19111 #define THUMB_VARIANT & arm_ext_v5t
19112
19113 /* Note: blx has 2 variants; the .value coded here is for
19114 BLX(2). Only this variant has conditional execution. */
19115 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19116 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19117
19118 #undef THUMB_VARIANT
19119 #define THUMB_VARIANT & arm_ext_v6t2
19120
19121 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19122 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19123 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19124 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19125 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19126 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19127 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19128 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19129
19130 #undef ARM_VARIANT
19131 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19132 #undef THUMB_VARIANT
19133 #define THUMB_VARIANT & arm_ext_v5exp
19134
19135 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19136 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19137 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19138 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19139
19140 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19141 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19142
19143 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19144 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19145 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19146 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19147
19148 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19149 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19150 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19151 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19152
19153 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19154 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19155
19156 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19157 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19158 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19159 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19160
19161 #undef ARM_VARIANT
19162 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19163 #undef THUMB_VARIANT
19164 #define THUMB_VARIANT & arm_ext_v6t2
19165
19166 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19167 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19168 ldrd, t_ldstd),
19169 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19170 ADDRGLDRS), ldrd, t_ldstd),
19171
19172 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19173 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19174
19175 #undef ARM_VARIANT
19176 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19177
19178 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19179
19180 #undef ARM_VARIANT
19181 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19182 #undef THUMB_VARIANT
19183 #define THUMB_VARIANT & arm_ext_v6
19184
19185 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19186 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19187 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19188 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19189 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19190 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19191 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19192 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19193 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19194 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19195
19196 #undef THUMB_VARIANT
19197 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19198
19199 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19200 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19201 strex, t_strex),
19202 #undef THUMB_VARIANT
19203 #define THUMB_VARIANT & arm_ext_v6t2
19204
19205 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19206 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19207
19208 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19209 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19210
19211 /* ARM V6 not included in V7M. */
19212 #undef THUMB_VARIANT
19213 #define THUMB_VARIANT & arm_ext_v6_notm
19214 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19215 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19216 UF(rfeib, 9900a00, 1, (RRw), rfe),
19217 UF(rfeda, 8100a00, 1, (RRw), rfe),
19218 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19219 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19220 UF(rfefa, 8100a00, 1, (RRw), rfe),
19221 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19222 UF(rfeed, 9900a00, 1, (RRw), rfe),
19223 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19224 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19225 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19226 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19227 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19228 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19229 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19230 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19231 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19232 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19233
19234 /* ARM V6 not included in V7M (eg. integer SIMD). */
19235 #undef THUMB_VARIANT
19236 #define THUMB_VARIANT & arm_ext_v6_dsp
19237 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19238 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19239 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19240 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19241 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19242 /* Old name for QASX. */
19243 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19244 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19245 /* Old name for QSAX. */
19246 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19247 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19248 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19249 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19250 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19251 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19252 /* Old name for SASX. */
19253 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19254 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19255 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19256 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19257 /* Old name for SHASX. */
19258 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19259 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19260 /* Old name for SHSAX. */
19261 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19262 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19263 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19264 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19265 /* Old name for SSAX. */
19266 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19267 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19268 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19269 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19270 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19271 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19272 /* Old name for UASX. */
19273 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19274 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19275 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19276 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19277 /* Old name for UHASX. */
19278 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19279 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19280 /* Old name for UHSAX. */
19281 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19282 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19283 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19284 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19285 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19286 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19287 /* Old name for UQASX. */
19288 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19289 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19290 /* Old name for UQSAX. */
19291 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19292 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19293 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19294 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19295 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19296 /* Old name for USAX. */
19297 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19298 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19299 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19300 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19301 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19302 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19303 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19304 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19305 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19306 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19307 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19308 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19309 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19310 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19311 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19312 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19313 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19314 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19315 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19316 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19317 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19318 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19319 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19320 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19321 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19322 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19323 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19324 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19325 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19326 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19327 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19328 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19329 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19330 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19331
19332 #undef ARM_VARIANT
19333 #define ARM_VARIANT & arm_ext_v6k
19334 #undef THUMB_VARIANT
19335 #define THUMB_VARIANT & arm_ext_v6k
19336
19337 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19338 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19339 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19340 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19341
19342 #undef THUMB_VARIANT
19343 #define THUMB_VARIANT & arm_ext_v6_notm
19344 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19345 ldrexd, t_ldrexd),
19346 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19347 RRnpcb), strexd, t_strexd),
19348
19349 #undef THUMB_VARIANT
19350 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19351 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19352 rd_rn, rd_rn),
19353 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19354 rd_rn, rd_rn),
19355 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19356 strex, t_strexbh),
19357 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19358 strex, t_strexbh),
19359 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19360
19361 #undef ARM_VARIANT
19362 #define ARM_VARIANT & arm_ext_sec
19363 #undef THUMB_VARIANT
19364 #define THUMB_VARIANT & arm_ext_sec
19365
19366 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19367
19368 #undef ARM_VARIANT
19369 #define ARM_VARIANT & arm_ext_virt
19370 #undef THUMB_VARIANT
19371 #define THUMB_VARIANT & arm_ext_virt
19372
19373 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19374 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19375
19376 #undef ARM_VARIANT
19377 #define ARM_VARIANT & arm_ext_pan
19378 #undef THUMB_VARIANT
19379 #define THUMB_VARIANT & arm_ext_pan
19380
19381 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19382
19383 #undef ARM_VARIANT
19384 #define ARM_VARIANT & arm_ext_v6t2
19385 #undef THUMB_VARIANT
19386 #define THUMB_VARIANT & arm_ext_v6t2
19387
19388 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19389 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19390 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19391 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19392
19393 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19394 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19395
19396 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19397 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19398 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19399 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19400
19401 #undef THUMB_VARIANT
19402 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19403 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19404 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19405
19406 /* Thumb-only instructions. */
19407 #undef ARM_VARIANT
19408 #define ARM_VARIANT NULL
19409 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19410 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19411
19412 /* ARM does not really have an IT instruction, so always allow it.
19413 The opcode is copied from Thumb in order to allow warnings in
19414 -mimplicit-it=[never | arm] modes. */
19415 #undef ARM_VARIANT
19416 #define ARM_VARIANT & arm_ext_v1
19417 #undef THUMB_VARIANT
19418 #define THUMB_VARIANT & arm_ext_v6t2
19419
19420 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19421 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19422 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19423 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19424 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19425 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19426 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19427 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19428 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19429 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19430 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19431 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19432 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19433 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19434 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19435 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19436 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19437 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19438
19439 /* Thumb2 only instructions. */
19440 #undef ARM_VARIANT
19441 #define ARM_VARIANT NULL
19442
19443 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19444 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19445 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19446 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19447 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19448 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19449
19450 /* Hardware division instructions. */
19451 #undef ARM_VARIANT
19452 #define ARM_VARIANT & arm_ext_adiv
19453 #undef THUMB_VARIANT
19454 #define THUMB_VARIANT & arm_ext_div
19455
19456 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19457 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19458
19459 /* ARM V6M/V7 instructions. */
19460 #undef ARM_VARIANT
19461 #define ARM_VARIANT & arm_ext_barrier
19462 #undef THUMB_VARIANT
19463 #define THUMB_VARIANT & arm_ext_barrier
19464
19465 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19466 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19467 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19468
19469 /* ARM V7 instructions. */
19470 #undef ARM_VARIANT
19471 #define ARM_VARIANT & arm_ext_v7
19472 #undef THUMB_VARIANT
19473 #define THUMB_VARIANT & arm_ext_v7
19474
19475 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19476 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19477
19478 #undef ARM_VARIANT
19479 #define ARM_VARIANT & arm_ext_mp
19480 #undef THUMB_VARIANT
19481 #define THUMB_VARIANT & arm_ext_mp
19482
19483 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19484
19485 /* AArchv8 instructions. */
19486 #undef ARM_VARIANT
19487 #define ARM_VARIANT & arm_ext_v8
19488
19489 /* Instructions shared between armv8-a and armv8-m. */
19490 #undef THUMB_VARIANT
19491 #define THUMB_VARIANT & arm_ext_atomics
19492
19493 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19494 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19495 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19496 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19497 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19498 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19499 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19500 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19501 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19502 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19503 stlex, t_stlex),
19504 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19505 stlex, t_stlex),
19506 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19507 stlex, t_stlex),
19508 #undef THUMB_VARIANT
19509 #define THUMB_VARIANT & arm_ext_v8
19510
19511 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19512 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19513 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19514 ldrexd, t_ldrexd),
19515 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19516 strexd, t_strexd),
19517 /* ARMv8 T32 only. */
19518 #undef ARM_VARIANT
19519 #define ARM_VARIANT NULL
19520 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19521 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19522 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19523
19524 /* FP for ARMv8. */
19525 #undef ARM_VARIANT
19526 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19527 #undef THUMB_VARIANT
19528 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19529
19530 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19531 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19532 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19533 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19534 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19535 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19536 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19537 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19538 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19539 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19540 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19541 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19542 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19543 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19544 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19545 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19546 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19547
19548 /* Crypto v1 extensions. */
19549 #undef ARM_VARIANT
19550 #define ARM_VARIANT & fpu_crypto_ext_armv8
19551 #undef THUMB_VARIANT
19552 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19553
19554 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19555 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19556 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19557 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19558 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19559 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19560 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19561 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19562 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19563 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19564 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19565 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19566 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19567 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19568
19569 #undef ARM_VARIANT
19570 #define ARM_VARIANT & crc_ext_armv8
19571 #undef THUMB_VARIANT
19572 #define THUMB_VARIANT & crc_ext_armv8
19573 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19574 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19575 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19576 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19577 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19578 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19579
19580 /* ARMv8.2 RAS extension. */
19581 #undef ARM_VARIANT
19582 #define ARM_VARIANT & arm_ext_v8_2
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v8_2
19585 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19586
19587 #undef ARM_VARIANT
19588 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19589 #undef THUMB_VARIANT
19590 #define THUMB_VARIANT NULL
19591
19592 cCE("wfs", e200110, 1, (RR), rd),
19593 cCE("rfs", e300110, 1, (RR), rd),
19594 cCE("wfc", e400110, 1, (RR), rd),
19595 cCE("rfc", e500110, 1, (RR), rd),
19596
19597 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19598 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19599 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19600 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19601
19602 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19603 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19604 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19605 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19606
19607 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19608 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19609 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19610 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19611 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19612 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19613 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19614 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19615 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19616 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19617 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19618 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19619
19620 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19621 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19622 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19623 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19624 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19625 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19626 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19627 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19628 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19629 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19630 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19631 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19632
19633 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19634 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19635 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19636 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19637 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19638 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19639 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19640 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19641 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19642 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19643 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19644 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19645
19646 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19647 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19648 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19649 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19650 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19651 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19652 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19653 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19654 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19655 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19656 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19657 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19658
19659 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19660 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19661 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19662 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19663 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19664 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19665 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19666 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19667 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19668 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19669 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19670 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19671
19672 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19673 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19674 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19675 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19676 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19677 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19678 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19679 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19680 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19681 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19682 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19683 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19684
19685 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19686 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19687 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19688 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19689 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19690 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19691 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19692 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19693 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19694 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19695 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19696 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19697
19698 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19699 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19700 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19701 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19702 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19703 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19704 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19705 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19706 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19707 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19708 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19709 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19710
19711 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19712 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19713 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19714 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19715 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19716 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19717 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19718 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19719 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19720 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19721 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19722 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19723
19724 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19725 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19726 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19727 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19728 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19729 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19730 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19731 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19732 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19733 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19734 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19735 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19736
19737 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19738 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19739 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19740 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19741 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19742 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19743 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19744 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19745 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19746 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19747 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19748 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19749
19750 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19751 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19752 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19753 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19754 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19755 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19756 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19757 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19758 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19759 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19760 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19761 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19762
19763 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19764 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19765 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19766 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19767 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19768 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19769 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19770 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19771 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19772 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19773 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19774 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19775
19776 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19777 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19778 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19779 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19780 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19781 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19782 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19783 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19784 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19785 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19786 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19787 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19788
19789 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19790 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19791 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19792 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19793 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19794 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19795 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19796 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19797 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19798 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19799 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19800 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19801
19802 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19803 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19804 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19805 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19806 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19807 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19808 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19809 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19810 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19811 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19812 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19813 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19814
19815 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19816 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19817 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19818 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19819 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19820 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19821 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19822 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19823 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19824 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19825 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19826 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19827
19828 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19829 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19830 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19831 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19832 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19833 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19834 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19835 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19836 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19837 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19838 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19839 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19840
19841 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19842 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19843 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19844 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19845 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19846 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19847 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19848 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19849 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19850 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19851 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19852 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19853
19854 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19855 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19856 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19857 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19858 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19859 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19860 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19861 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19862 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19863 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19864 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19865 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19866
19867 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19868 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19869 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19870 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19871 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19872 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19873 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19874 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19875 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19876 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19877 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19878 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19879
19880 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19881 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19882 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19883 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19884 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19885 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19886 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19887 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19888 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19889 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19890 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19891 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19892
19893 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19894 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19895 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19896 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19897 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19898 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19899 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19900 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19901 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19902 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19903 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19904 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19905
19906 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19907 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19908 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19909 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19910 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19911 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19912 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19913 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19914 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19915 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19916 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19917 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19918
19919 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19920 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19921 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19922 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19923 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19924 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19925 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19926 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19927 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19928 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19929 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19930 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19931
19932 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19933 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19934 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19935 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19936 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19937 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19938 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19939 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19940 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19941 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19942 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19943 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19944
19945 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19946 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19947 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19948 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19949 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19950 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19951 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19952 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19953 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19954 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19955 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19956 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19957
19958 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19959 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19960 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19961 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19962 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19963 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19964 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19965 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19966 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19967 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19968 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19969 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19970
19971 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19972 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19973 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19974 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19975 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19976 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19977 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19978 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19979 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19980 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19981 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19982 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19983
19984 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19985 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19986 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19987 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19988
19989 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19990 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19991 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19992 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19993 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19994 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19995 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19996 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19997 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19998 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19999 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20000 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20001
20002 /* The implementation of the FIX instruction is broken on some
20003 assemblers, in that it accepts a precision specifier as well as a
20004 rounding specifier, despite the fact that this is meaningless.
20005 To be more compatible, we accept it as well, though of course it
20006 does not set any bits. */
20007 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20008 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20009 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20010 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20011 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20012 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20013 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20014 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20015 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20016 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20017 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20018 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20019 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20020
20021 /* Instructions that were new with the real FPA, call them V2. */
20022 #undef ARM_VARIANT
20023 #define ARM_VARIANT & fpu_fpa_ext_v2
20024
20025 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20026 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20027 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20028 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20029 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20030 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20031
20032 #undef ARM_VARIANT
20033 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20034
20035 /* Moves and type conversions. */
20036 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20037 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20038 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20039 cCE("fmstat", ef1fa10, 0, (), noargs),
20040 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20041 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20042 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20043 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20044 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20045 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20046 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20047 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20048 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20049 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20050
20051 /* Memory operations. */
20052 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20053 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20054 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20055 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20056 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20057 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20058 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20059 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20060 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20061 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20062 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20063 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20064 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20065 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20066 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20067 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20068 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20069 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20070
20071 /* Monadic operations. */
20072 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20073 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20074 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20075
20076 /* Dyadic operations. */
20077 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20078 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20079 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20080 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20081 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20082 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20083 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20084 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20085 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20086
20087 /* Comparisons. */
20088 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20089 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20090 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20091 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20092
20093 /* Double precision load/store are still present on single precision
20094 implementations. */
20095 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20096 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20097 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20098 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20099 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20100 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20101 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20102 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20103 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20104 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20105
20106 #undef ARM_VARIANT
20107 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20108
20109 /* Moves and type conversions. */
20110 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20111 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20112 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20113 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20114 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20115 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20116 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20117 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20118 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20119 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20120 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20121 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20122 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20123
20124 /* Monadic operations. */
20125 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20126 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20127 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20128
20129 /* Dyadic operations. */
20130 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20131 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20132 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20133 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20134 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20135 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20136 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20137 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20138 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20139
20140 /* Comparisons. */
20141 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20142 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20143 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20144 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20145
20146 #undef ARM_VARIANT
20147 #define ARM_VARIANT & fpu_vfp_ext_v2
20148
20149 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20150 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20151 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20152 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20153
20154 /* Instructions which may belong to either the Neon or VFP instruction sets.
20155 Individual encoder functions perform additional architecture checks. */
20156 #undef ARM_VARIANT
20157 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20158 #undef THUMB_VARIANT
20159 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20160
20161 /* These mnemonics are unique to VFP. */
20162 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20163 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20164 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20165 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20166 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20167 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20168 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20169 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20170 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20171 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20172
20173 /* Mnemonics shared by Neon and VFP. */
20174 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20175 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20176 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20177
20178 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20179 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20180
20181 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20182 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20183
20184 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20185 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20186 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20187 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20188 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20189 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20190 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20191 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20192
20193 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20194 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20195 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20196 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20197
20198
20199 /* NOTE: All VMOV encoding is special-cased! */
20200 NCE(vmov, 0, 1, (VMOV), neon_mov),
20201 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20202
20203 #undef ARM_VARIANT
20204 #define ARM_VARIANT & arm_ext_fp16
20205 #undef THUMB_VARIANT
20206 #define THUMB_VARIANT & arm_ext_fp16
20207 /* New instructions added from v8.2, allowing the extraction and insertion of
20208 the upper 16 bits of a 32-bit vector register. */
20209 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20210 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20211
20212 #undef THUMB_VARIANT
20213 #define THUMB_VARIANT & fpu_neon_ext_v1
20214 #undef ARM_VARIANT
20215 #define ARM_VARIANT & fpu_neon_ext_v1
20216
20217 /* Data processing with three registers of the same length. */
20218 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20219 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20220 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20221 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20222 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20223 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20224 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20225 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20226 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20227 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20228 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20229 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20230 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20231 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20232 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20233 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20234 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20235 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20236 /* If not immediate, fall back to neon_dyadic_i64_su.
20237 shl_imm should accept I8 I16 I32 I64,
20238 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20239 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20240 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20241 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20242 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20243 /* Logic ops, types optional & ignored. */
20244 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20245 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20246 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20247 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20248 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20249 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20250 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20251 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20252 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20253 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20254 /* Bitfield ops, untyped. */
20255 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20256 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20257 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20258 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20259 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20260 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20261 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20262 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20263 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20264 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20265 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20266 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20267 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20268 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20269 back to neon_dyadic_if_su. */
20270 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20271 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20272 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20273 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20274 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20275 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20276 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20277 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20278 /* Comparison. Type I8 I16 I32 F32. */
20279 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20280 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20281 /* As above, D registers only. */
20282 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20283 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20284 /* Int and float variants, signedness unimportant. */
20285 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20286 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20287 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20288 /* Add/sub take types I8 I16 I32 I64 F32. */
20289 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20290 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20291 /* vtst takes sizes 8, 16, 32. */
20292 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20293 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20294 /* VMUL takes I8 I16 I32 F32 P8. */
20295 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20296 /* VQD{R}MULH takes S16 S32. */
20297 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20298 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20299 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20300 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20301 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20302 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20303 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20304 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20305 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20306 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20307 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20308 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20309 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20310 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20311 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20312 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20313 /* ARM v8.1 extension. */
20314 nUF(vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20315 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20316 nUF(vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20317 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20318
20319 /* Two address, int/float. Types S8 S16 S32 F32. */
20320 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20321 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20322
20323 /* Data processing with two registers and a shift amount. */
20324 /* Right shifts, and variants with rounding.
20325 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20326 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20327 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20328 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20329 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20330 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20331 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20332 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20333 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20334 /* Shift and insert. Sizes accepted 8 16 32 64. */
20335 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20336 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20337 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20338 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20339 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20340 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20341 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20342 /* Right shift immediate, saturating & narrowing, with rounding variants.
20343 Types accepted S16 S32 S64 U16 U32 U64. */
20344 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20345 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20346 /* As above, unsigned. Types accepted S16 S32 S64. */
20347 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20348 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20349 /* Right shift narrowing. Types accepted I16 I32 I64. */
20350 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20351 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20352 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20353 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20354 /* CVT with optional immediate for fixed-point variant. */
20355 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20356
20357 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20358 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20359
20360 /* Data processing, three registers of different lengths. */
20361 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20362 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20363 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20364 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20365 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20366 /* If not scalar, fall back to neon_dyadic_long.
20367 Vector types as above, scalar types S16 S32 U16 U32. */
20368 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20369 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20370 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20371 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20372 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20373 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20374 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20375 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20376 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20377 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20378 /* Saturating doubling multiplies. Types S16 S32. */
20379 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20380 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20381 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20382 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20383 S16 S32 U16 U32. */
20384 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20385
20386 /* Extract. Size 8. */
20387 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20388 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20389
20390 /* Two registers, miscellaneous. */
20391 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20392 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20393 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20394 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20395 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20396 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20397 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20398 /* Vector replicate. Sizes 8 16 32. */
20399 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20400 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20401 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20402 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20403 /* VMOVN. Types I16 I32 I64. */
20404 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20405 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20406 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20407 /* VQMOVUN. Types S16 S32 S64. */
20408 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20409 /* VZIP / VUZP. Sizes 8 16 32. */
20410 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20411 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20412 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20413 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20414 /* VQABS / VQNEG. Types S8 S16 S32. */
20415 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20416 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20417 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20418 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20419 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20420 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20421 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20422 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20423 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20424 /* Reciprocal estimates. Types U32 F32. */
20425 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20426 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20427 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20428 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20429 /* VCLS. Types S8 S16 S32. */
20430 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20431 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20432 /* VCLZ. Types I8 I16 I32. */
20433 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20434 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20435 /* VCNT. Size 8. */
20436 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20437 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20438 /* Two address, untyped. */
20439 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20440 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20441 /* VTRN. Sizes 8 16 32. */
20442 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20443 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20444
20445 /* Table lookup. Size 8. */
20446 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20447 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20448
20449 #undef THUMB_VARIANT
20450 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20451 #undef ARM_VARIANT
20452 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20453
20454 /* Neon element/structure load/store. */
20455 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20456 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20457 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20458 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20459 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20460 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20461 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20462 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20463
20464 #undef THUMB_VARIANT
20465 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20466 #undef ARM_VARIANT
20467 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20468 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20469 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20470 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20471 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20472 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20473 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20474 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20475 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20476 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20477
20478 #undef THUMB_VARIANT
20479 #define THUMB_VARIANT & fpu_vfp_ext_v3
20480 #undef ARM_VARIANT
20481 #define ARM_VARIANT & fpu_vfp_ext_v3
20482
20483 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20484 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20485 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20486 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20487 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20488 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20489 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20490 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20491 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20492
20493 #undef ARM_VARIANT
20494 #define ARM_VARIANT & fpu_vfp_ext_fma
20495 #undef THUMB_VARIANT
20496 #define THUMB_VARIANT & fpu_vfp_ext_fma
20497 /* Mnemonics shared by Neon and VFP. These are included in the
20498 VFP FMA variant; NEON and VFP FMA always includes the NEON
20499 FMA instructions. */
20500 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20501 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20502 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20503 the v form should always be used. */
20504 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20505 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20506 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20507 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20508 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20509 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20510
20511 #undef THUMB_VARIANT
20512 #undef ARM_VARIANT
20513 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20514
20515 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20516 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20517 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20518 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20519 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20520 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20521 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20522 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20523
20524 #undef ARM_VARIANT
20525 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20526
20527 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20528 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20529 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20530 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20531 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20532 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20533 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20534 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20535 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20536 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20537 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20538 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20539 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20540 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20541 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20542 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20543 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20544 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20545 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20546 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20547 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20548 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20549 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20550 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20551 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20552 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20553 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20554 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20555 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20556 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20557 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20558 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20559 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20560 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20561 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20562 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20563 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20564 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20565 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20566 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20567 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20568 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20569 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20570 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20571 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20572 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20573 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20574 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20575 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20576 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20577 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20578 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20579 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20580 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20581 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20582 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20583 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20584 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20585 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20586 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20587 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20588 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20589 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20590 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20591 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20592 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20593 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20594 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20595 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20596 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20597 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20598 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20599 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20600 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20601 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20602 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20603 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20604 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20605 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20606 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20607 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20608 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20609 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20610 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20611 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20612 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20613 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20614 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20615 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20616 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20617 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20618 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20619 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20620 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20621 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20622 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20623 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20624 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20625 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20626 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20627 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20628 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20629 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20630 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20631 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20632 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20633 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20634 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20635 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20636 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20637 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20638 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20639 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20640 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20641 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20642 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20643 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20644 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20645 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20646 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20647 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20648 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20649 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20650 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20651 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20652 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20653 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20654 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20655 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20656 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20657 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20658 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20659 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20660 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20661 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20662 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20663 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20664 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20665 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20666 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20667 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20668 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20669 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20670 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20671 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20672 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20673 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20674 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20675 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20676 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20677 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20678 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20679 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20680 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20681 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20682 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20683 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20684 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20685 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20686 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20687 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20688 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20689
20690 #undef ARM_VARIANT
20691 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20692
20693 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20694 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20695 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20696 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20697 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20698 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20699 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20700 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20701 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20702 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20703 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20704 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20705 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20706 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20707 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20708 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20709 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20710 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20711 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20712 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20713 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20714 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20715 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20716 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20717 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20718 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20719 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20720 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20721 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20722 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20723 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20724 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20725 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20726 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20727 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20728 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20729 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20730 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20731 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20732 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20733 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20734 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20735 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20736 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20737 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20738 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20739 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20740 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20741 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20742 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20743 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20744 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20745 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20746 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20747 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20748 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20749 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20750
20751 #undef ARM_VARIANT
20752 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20753
20754 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20755 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20756 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20757 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20758 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20759 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20760 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20761 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20762 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20763 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20764 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20765 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20766 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20767 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20768 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20769 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20770 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20771 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20772 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20773 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20774 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20775 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20776 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20777 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20778 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20779 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20780 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20781 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20782 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20783 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20784 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20785 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20786 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20787 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20788 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20789 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20790 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20791 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20792 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20793 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20794 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20795 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20796 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20797 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20798 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20799 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20800 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20801 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20802 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20803 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20804 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20805 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20806 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20807 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20808 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20809 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20810 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20811 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20812 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20813 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20814 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20815 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20816 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20817 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20818 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20819 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20820 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20821 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20822 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20823 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20824 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20825 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20826 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20827 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20828 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20829 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20830
20831 #undef ARM_VARIANT
20832 #define ARM_VARIANT NULL
20833 #undef THUMB_VARIANT
20834 #define THUMB_VARIANT & arm_ext_v8m
20835 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20836 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20837 };
20838 #undef ARM_VARIANT
20839 #undef THUMB_VARIANT
20840 #undef TCE
20841 #undef TUE
20842 #undef TUF
20843 #undef TCC
20844 #undef cCE
20845 #undef cCL
20846 #undef C3E
20847 #undef CE
20848 #undef CM
20849 #undef UE
20850 #undef UF
20851 #undef UT
20852 #undef NUF
20853 #undef nUF
20854 #undef NCE
20855 #undef nCE
20856 #undef OPS0
20857 #undef OPS1
20858 #undef OPS2
20859 #undef OPS3
20860 #undef OPS4
20861 #undef OPS5
20862 #undef OPS6
20863 #undef do_0
20864 \f
20865 /* MD interface: bits in the object file. */
20866
20867 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20868 for use in the a.out file, and stores them in the array pointed to by buf.
20869 This knows about the endian-ness of the target machine and does
20870 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20871 2 (short) and 4 (long) Floating numbers are put out as a series of
20872 LITTLENUMS (shorts, here at least). */
20873
20874 void
20875 md_number_to_chars (char * buf, valueT val, int n)
20876 {
20877 if (target_big_endian)
20878 number_to_chars_bigendian (buf, val, n);
20879 else
20880 number_to_chars_littleendian (buf, val, n);
20881 }
20882
20883 static valueT
20884 md_chars_to_number (char * buf, int n)
20885 {
20886 valueT result = 0;
20887 unsigned char * where = (unsigned char *) buf;
20888
20889 if (target_big_endian)
20890 {
20891 while (n--)
20892 {
20893 result <<= 8;
20894 result |= (*where++ & 255);
20895 }
20896 }
20897 else
20898 {
20899 while (n--)
20900 {
20901 result <<= 8;
20902 result |= (where[n] & 255);
20903 }
20904 }
20905
20906 return result;
20907 }
20908
20909 /* MD interface: Sections. */
20910
20911 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20912 that an rs_machine_dependent frag may reach. */
20913
20914 unsigned int
20915 arm_frag_max_var (fragS *fragp)
20916 {
20917 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20918 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20919
20920 Note that we generate relaxable instructions even for cases that don't
20921 really need it, like an immediate that's a trivial constant. So we're
20922 overestimating the instruction size for some of those cases. Rather
20923 than putting more intelligence here, it would probably be better to
20924 avoid generating a relaxation frag in the first place when it can be
20925 determined up front that a short instruction will suffice. */
20926
20927 gas_assert (fragp->fr_type == rs_machine_dependent);
20928 return INSN_SIZE;
20929 }
20930
20931 /* Estimate the size of a frag before relaxing. Assume everything fits in
20932 2 bytes. */
20933
20934 int
20935 md_estimate_size_before_relax (fragS * fragp,
20936 segT segtype ATTRIBUTE_UNUSED)
20937 {
20938 fragp->fr_var = 2;
20939 return 2;
20940 }
20941
20942 /* Convert a machine dependent frag. */
20943
20944 void
20945 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20946 {
20947 unsigned long insn;
20948 unsigned long old_op;
20949 char *buf;
20950 expressionS exp;
20951 fixS *fixp;
20952 int reloc_type;
20953 int pc_rel;
20954 int opcode;
20955
20956 buf = fragp->fr_literal + fragp->fr_fix;
20957
20958 old_op = bfd_get_16(abfd, buf);
20959 if (fragp->fr_symbol)
20960 {
20961 exp.X_op = O_symbol;
20962 exp.X_add_symbol = fragp->fr_symbol;
20963 }
20964 else
20965 {
20966 exp.X_op = O_constant;
20967 }
20968 exp.X_add_number = fragp->fr_offset;
20969 opcode = fragp->fr_subtype;
20970 switch (opcode)
20971 {
20972 case T_MNEM_ldr_pc:
20973 case T_MNEM_ldr_pc2:
20974 case T_MNEM_ldr_sp:
20975 case T_MNEM_str_sp:
20976 case T_MNEM_ldr:
20977 case T_MNEM_ldrb:
20978 case T_MNEM_ldrh:
20979 case T_MNEM_str:
20980 case T_MNEM_strb:
20981 case T_MNEM_strh:
20982 if (fragp->fr_var == 4)
20983 {
20984 insn = THUMB_OP32 (opcode);
20985 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20986 {
20987 insn |= (old_op & 0x700) << 4;
20988 }
20989 else
20990 {
20991 insn |= (old_op & 7) << 12;
20992 insn |= (old_op & 0x38) << 13;
20993 }
20994 insn |= 0x00000c00;
20995 put_thumb32_insn (buf, insn);
20996 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20997 }
20998 else
20999 {
21000 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21001 }
21002 pc_rel = (opcode == T_MNEM_ldr_pc2);
21003 break;
21004 case T_MNEM_adr:
21005 if (fragp->fr_var == 4)
21006 {
21007 insn = THUMB_OP32 (opcode);
21008 insn |= (old_op & 0xf0) << 4;
21009 put_thumb32_insn (buf, insn);
21010 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21011 }
21012 else
21013 {
21014 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21015 exp.X_add_number -= 4;
21016 }
21017 pc_rel = 1;
21018 break;
21019 case T_MNEM_mov:
21020 case T_MNEM_movs:
21021 case T_MNEM_cmp:
21022 case T_MNEM_cmn:
21023 if (fragp->fr_var == 4)
21024 {
21025 int r0off = (opcode == T_MNEM_mov
21026 || opcode == T_MNEM_movs) ? 0 : 8;
21027 insn = THUMB_OP32 (opcode);
21028 insn = (insn & 0xe1ffffff) | 0x10000000;
21029 insn |= (old_op & 0x700) << r0off;
21030 put_thumb32_insn (buf, insn);
21031 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21032 }
21033 else
21034 {
21035 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21036 }
21037 pc_rel = 0;
21038 break;
21039 case T_MNEM_b:
21040 if (fragp->fr_var == 4)
21041 {
21042 insn = THUMB_OP32(opcode);
21043 put_thumb32_insn (buf, insn);
21044 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21045 }
21046 else
21047 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21048 pc_rel = 1;
21049 break;
21050 case T_MNEM_bcond:
21051 if (fragp->fr_var == 4)
21052 {
21053 insn = THUMB_OP32(opcode);
21054 insn |= (old_op & 0xf00) << 14;
21055 put_thumb32_insn (buf, insn);
21056 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21057 }
21058 else
21059 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21060 pc_rel = 1;
21061 break;
21062 case T_MNEM_add_sp:
21063 case T_MNEM_add_pc:
21064 case T_MNEM_inc_sp:
21065 case T_MNEM_dec_sp:
21066 if (fragp->fr_var == 4)
21067 {
21068 /* ??? Choose between add and addw. */
21069 insn = THUMB_OP32 (opcode);
21070 insn |= (old_op & 0xf0) << 4;
21071 put_thumb32_insn (buf, insn);
21072 if (opcode == T_MNEM_add_pc)
21073 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21074 else
21075 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21076 }
21077 else
21078 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21079 pc_rel = 0;
21080 break;
21081
21082 case T_MNEM_addi:
21083 case T_MNEM_addis:
21084 case T_MNEM_subi:
21085 case T_MNEM_subis:
21086 if (fragp->fr_var == 4)
21087 {
21088 insn = THUMB_OP32 (opcode);
21089 insn |= (old_op & 0xf0) << 4;
21090 insn |= (old_op & 0xf) << 16;
21091 put_thumb32_insn (buf, insn);
21092 if (insn & (1 << 20))
21093 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21094 else
21095 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21096 }
21097 else
21098 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21099 pc_rel = 0;
21100 break;
21101 default:
21102 abort ();
21103 }
21104 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21105 (enum bfd_reloc_code_real) reloc_type);
21106 fixp->fx_file = fragp->fr_file;
21107 fixp->fx_line = fragp->fr_line;
21108 fragp->fr_fix += fragp->fr_var;
21109
21110 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21111 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21112 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21113 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21114 }
21115
21116 /* Return the size of a relaxable immediate operand instruction.
21117 SHIFT and SIZE specify the form of the allowable immediate. */
21118 static int
21119 relax_immediate (fragS *fragp, int size, int shift)
21120 {
21121 offsetT offset;
21122 offsetT mask;
21123 offsetT low;
21124
21125 /* ??? Should be able to do better than this. */
21126 if (fragp->fr_symbol)
21127 return 4;
21128
21129 low = (1 << shift) - 1;
21130 mask = (1 << (shift + size)) - (1 << shift);
21131 offset = fragp->fr_offset;
21132 /* Force misaligned offsets to 32-bit variant. */
21133 if (offset & low)
21134 return 4;
21135 if (offset & ~mask)
21136 return 4;
21137 return 2;
21138 }
21139
21140 /* Get the address of a symbol during relaxation. */
21141 static addressT
21142 relaxed_symbol_addr (fragS *fragp, long stretch)
21143 {
21144 fragS *sym_frag;
21145 addressT addr;
21146 symbolS *sym;
21147
21148 sym = fragp->fr_symbol;
21149 sym_frag = symbol_get_frag (sym);
21150 know (S_GET_SEGMENT (sym) != absolute_section
21151 || sym_frag == &zero_address_frag);
21152 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21153
21154 /* If frag has yet to be reached on this pass, assume it will
21155 move by STRETCH just as we did. If this is not so, it will
21156 be because some frag between grows, and that will force
21157 another pass. */
21158
21159 if (stretch != 0
21160 && sym_frag->relax_marker != fragp->relax_marker)
21161 {
21162 fragS *f;
21163
21164 /* Adjust stretch for any alignment frag. Note that if have
21165 been expanding the earlier code, the symbol may be
21166 defined in what appears to be an earlier frag. FIXME:
21167 This doesn't handle the fr_subtype field, which specifies
21168 a maximum number of bytes to skip when doing an
21169 alignment. */
21170 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21171 {
21172 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21173 {
21174 if (stretch < 0)
21175 stretch = - ((- stretch)
21176 & ~ ((1 << (int) f->fr_offset) - 1));
21177 else
21178 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21179 if (stretch == 0)
21180 break;
21181 }
21182 }
21183 if (f != NULL)
21184 addr += stretch;
21185 }
21186
21187 return addr;
21188 }
21189
21190 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21191 load. */
21192 static int
21193 relax_adr (fragS *fragp, asection *sec, long stretch)
21194 {
21195 addressT addr;
21196 offsetT val;
21197
21198 /* Assume worst case for symbols not known to be in the same section. */
21199 if (fragp->fr_symbol == NULL
21200 || !S_IS_DEFINED (fragp->fr_symbol)
21201 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21202 || S_IS_WEAK (fragp->fr_symbol))
21203 return 4;
21204
21205 val = relaxed_symbol_addr (fragp, stretch);
21206 addr = fragp->fr_address + fragp->fr_fix;
21207 addr = (addr + 4) & ~3;
21208 /* Force misaligned targets to 32-bit variant. */
21209 if (val & 3)
21210 return 4;
21211 val -= addr;
21212 if (val < 0 || val > 1020)
21213 return 4;
21214 return 2;
21215 }
21216
21217 /* Return the size of a relaxable add/sub immediate instruction. */
21218 static int
21219 relax_addsub (fragS *fragp, asection *sec)
21220 {
21221 char *buf;
21222 int op;
21223
21224 buf = fragp->fr_literal + fragp->fr_fix;
21225 op = bfd_get_16(sec->owner, buf);
21226 if ((op & 0xf) == ((op >> 4) & 0xf))
21227 return relax_immediate (fragp, 8, 0);
21228 else
21229 return relax_immediate (fragp, 3, 0);
21230 }
21231
21232 /* Return TRUE iff the definition of symbol S could be pre-empted
21233 (overridden) at link or load time. */
21234 static bfd_boolean
21235 symbol_preemptible (symbolS *s)
21236 {
21237 /* Weak symbols can always be pre-empted. */
21238 if (S_IS_WEAK (s))
21239 return TRUE;
21240
21241 /* Non-global symbols cannot be pre-empted. */
21242 if (! S_IS_EXTERNAL (s))
21243 return FALSE;
21244
21245 #ifdef OBJ_ELF
21246 /* In ELF, a global symbol can be marked protected, or private. In that
21247 case it can't be pre-empted (other definitions in the same link unit
21248 would violate the ODR). */
21249 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21250 return FALSE;
21251 #endif
21252
21253 /* Other global symbols might be pre-empted. */
21254 return TRUE;
21255 }
21256
21257 /* Return the size of a relaxable branch instruction. BITS is the
21258 size of the offset field in the narrow instruction. */
21259
21260 static int
21261 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21262 {
21263 addressT addr;
21264 offsetT val;
21265 offsetT limit;
21266
21267 /* Assume worst case for symbols not known to be in the same section. */
21268 if (!S_IS_DEFINED (fragp->fr_symbol)
21269 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21270 || S_IS_WEAK (fragp->fr_symbol))
21271 return 4;
21272
21273 #ifdef OBJ_ELF
21274 /* A branch to a function in ARM state will require interworking. */
21275 if (S_IS_DEFINED (fragp->fr_symbol)
21276 && ARM_IS_FUNC (fragp->fr_symbol))
21277 return 4;
21278 #endif
21279
21280 if (symbol_preemptible (fragp->fr_symbol))
21281 return 4;
21282
21283 val = relaxed_symbol_addr (fragp, stretch);
21284 addr = fragp->fr_address + fragp->fr_fix + 4;
21285 val -= addr;
21286
21287 /* Offset is a signed value *2 */
21288 limit = 1 << bits;
21289 if (val >= limit || val < -limit)
21290 return 4;
21291 return 2;
21292 }
21293
21294
21295 /* Relax a machine dependent frag. This returns the amount by which
21296 the current size of the frag should change. */
21297
21298 int
21299 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21300 {
21301 int oldsize;
21302 int newsize;
21303
21304 oldsize = fragp->fr_var;
21305 switch (fragp->fr_subtype)
21306 {
21307 case T_MNEM_ldr_pc2:
21308 newsize = relax_adr (fragp, sec, stretch);
21309 break;
21310 case T_MNEM_ldr_pc:
21311 case T_MNEM_ldr_sp:
21312 case T_MNEM_str_sp:
21313 newsize = relax_immediate (fragp, 8, 2);
21314 break;
21315 case T_MNEM_ldr:
21316 case T_MNEM_str:
21317 newsize = relax_immediate (fragp, 5, 2);
21318 break;
21319 case T_MNEM_ldrh:
21320 case T_MNEM_strh:
21321 newsize = relax_immediate (fragp, 5, 1);
21322 break;
21323 case T_MNEM_ldrb:
21324 case T_MNEM_strb:
21325 newsize = relax_immediate (fragp, 5, 0);
21326 break;
21327 case T_MNEM_adr:
21328 newsize = relax_adr (fragp, sec, stretch);
21329 break;
21330 case T_MNEM_mov:
21331 case T_MNEM_movs:
21332 case T_MNEM_cmp:
21333 case T_MNEM_cmn:
21334 newsize = relax_immediate (fragp, 8, 0);
21335 break;
21336 case T_MNEM_b:
21337 newsize = relax_branch (fragp, sec, 11, stretch);
21338 break;
21339 case T_MNEM_bcond:
21340 newsize = relax_branch (fragp, sec, 8, stretch);
21341 break;
21342 case T_MNEM_add_sp:
21343 case T_MNEM_add_pc:
21344 newsize = relax_immediate (fragp, 8, 2);
21345 break;
21346 case T_MNEM_inc_sp:
21347 case T_MNEM_dec_sp:
21348 newsize = relax_immediate (fragp, 7, 2);
21349 break;
21350 case T_MNEM_addi:
21351 case T_MNEM_addis:
21352 case T_MNEM_subi:
21353 case T_MNEM_subis:
21354 newsize = relax_addsub (fragp, sec);
21355 break;
21356 default:
21357 abort ();
21358 }
21359
21360 fragp->fr_var = newsize;
21361 /* Freeze wide instructions that are at or before the same location as
21362 in the previous pass. This avoids infinite loops.
21363 Don't freeze them unconditionally because targets may be artificially
21364 misaligned by the expansion of preceding frags. */
21365 if (stretch <= 0 && newsize > 2)
21366 {
21367 md_convert_frag (sec->owner, sec, fragp);
21368 frag_wane (fragp);
21369 }
21370
21371 return newsize - oldsize;
21372 }
21373
21374 /* Round up a section size to the appropriate boundary. */
21375
21376 valueT
21377 md_section_align (segT segment ATTRIBUTE_UNUSED,
21378 valueT size)
21379 {
21380 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21381 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21382 {
21383 /* For a.out, force the section size to be aligned. If we don't do
21384 this, BFD will align it for us, but it will not write out the
21385 final bytes of the section. This may be a bug in BFD, but it is
21386 easier to fix it here since that is how the other a.out targets
21387 work. */
21388 int align;
21389
21390 align = bfd_get_section_alignment (stdoutput, segment);
21391 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21392 }
21393 #endif
21394
21395 return size;
21396 }
21397
21398 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21399 of an rs_align_code fragment. */
21400
21401 void
21402 arm_handle_align (fragS * fragP)
21403 {
21404 static char const arm_noop[2][2][4] =
21405 {
21406 { /* ARMv1 */
21407 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21408 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21409 },
21410 { /* ARMv6k */
21411 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21412 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21413 },
21414 };
21415 static char const thumb_noop[2][2][2] =
21416 {
21417 { /* Thumb-1 */
21418 {0xc0, 0x46}, /* LE */
21419 {0x46, 0xc0}, /* BE */
21420 },
21421 { /* Thumb-2 */
21422 {0x00, 0xbf}, /* LE */
21423 {0xbf, 0x00} /* BE */
21424 }
21425 };
21426 static char const wide_thumb_noop[2][4] =
21427 { /* Wide Thumb-2 */
21428 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21429 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21430 };
21431
21432 unsigned bytes, fix, noop_size;
21433 char * p;
21434 const char * noop;
21435 const char *narrow_noop = NULL;
21436 #ifdef OBJ_ELF
21437 enum mstate state;
21438 #endif
21439
21440 if (fragP->fr_type != rs_align_code)
21441 return;
21442
21443 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21444 p = fragP->fr_literal + fragP->fr_fix;
21445 fix = 0;
21446
21447 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21448 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21449
21450 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21451
21452 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21453 {
21454 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21455 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21456 {
21457 narrow_noop = thumb_noop[1][target_big_endian];
21458 noop = wide_thumb_noop[target_big_endian];
21459 }
21460 else
21461 noop = thumb_noop[0][target_big_endian];
21462 noop_size = 2;
21463 #ifdef OBJ_ELF
21464 state = MAP_THUMB;
21465 #endif
21466 }
21467 else
21468 {
21469 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21470 ? selected_cpu : arm_arch_none,
21471 arm_ext_v6k) != 0]
21472 [target_big_endian];
21473 noop_size = 4;
21474 #ifdef OBJ_ELF
21475 state = MAP_ARM;
21476 #endif
21477 }
21478
21479 fragP->fr_var = noop_size;
21480
21481 if (bytes & (noop_size - 1))
21482 {
21483 fix = bytes & (noop_size - 1);
21484 #ifdef OBJ_ELF
21485 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21486 #endif
21487 memset (p, 0, fix);
21488 p += fix;
21489 bytes -= fix;
21490 }
21491
21492 if (narrow_noop)
21493 {
21494 if (bytes & noop_size)
21495 {
21496 /* Insert a narrow noop. */
21497 memcpy (p, narrow_noop, noop_size);
21498 p += noop_size;
21499 bytes -= noop_size;
21500 fix += noop_size;
21501 }
21502
21503 /* Use wide noops for the remainder */
21504 noop_size = 4;
21505 }
21506
21507 while (bytes >= noop_size)
21508 {
21509 memcpy (p, noop, noop_size);
21510 p += noop_size;
21511 bytes -= noop_size;
21512 fix += noop_size;
21513 }
21514
21515 fragP->fr_fix += fix;
21516 }
21517
21518 /* Called from md_do_align. Used to create an alignment
21519 frag in a code section. */
21520
21521 void
21522 arm_frag_align_code (int n, int max)
21523 {
21524 char * p;
21525
21526 /* We assume that there will never be a requirement
21527 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21528 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21529 {
21530 char err_msg[128];
21531
21532 sprintf (err_msg,
21533 _("alignments greater than %d bytes not supported in .text sections."),
21534 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21535 as_fatal ("%s", err_msg);
21536 }
21537
21538 p = frag_var (rs_align_code,
21539 MAX_MEM_FOR_RS_ALIGN_CODE,
21540 1,
21541 (relax_substateT) max,
21542 (symbolS *) NULL,
21543 (offsetT) n,
21544 (char *) NULL);
21545 *p = 0;
21546 }
21547
21548 /* Perform target specific initialisation of a frag.
21549 Note - despite the name this initialisation is not done when the frag
21550 is created, but only when its type is assigned. A frag can be created
21551 and used a long time before its type is set, so beware of assuming that
21552 this initialisationis performed first. */
21553
21554 #ifndef OBJ_ELF
21555 void
21556 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21557 {
21558 /* Record whether this frag is in an ARM or a THUMB area. */
21559 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21560 }
21561
21562 #else /* OBJ_ELF is defined. */
21563 void
21564 arm_init_frag (fragS * fragP, int max_chars)
21565 {
21566 int frag_thumb_mode;
21567
21568 /* If the current ARM vs THUMB mode has not already
21569 been recorded into this frag then do so now. */
21570 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21571 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21572
21573 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21574
21575 /* Record a mapping symbol for alignment frags. We will delete this
21576 later if the alignment ends up empty. */
21577 switch (fragP->fr_type)
21578 {
21579 case rs_align:
21580 case rs_align_test:
21581 case rs_fill:
21582 mapping_state_2 (MAP_DATA, max_chars);
21583 break;
21584 case rs_align_code:
21585 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21586 break;
21587 default:
21588 break;
21589 }
21590 }
21591
21592 /* When we change sections we need to issue a new mapping symbol. */
21593
21594 void
21595 arm_elf_change_section (void)
21596 {
21597 /* Link an unlinked unwind index table section to the .text section. */
21598 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21599 && elf_linked_to_section (now_seg) == NULL)
21600 elf_linked_to_section (now_seg) = text_section;
21601 }
21602
21603 int
21604 arm_elf_section_type (const char * str, size_t len)
21605 {
21606 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21607 return SHT_ARM_EXIDX;
21608
21609 return -1;
21610 }
21611 \f
21612 /* Code to deal with unwinding tables. */
21613
21614 static void add_unwind_adjustsp (offsetT);
21615
21616 /* Generate any deferred unwind frame offset. */
21617
21618 static void
21619 flush_pending_unwind (void)
21620 {
21621 offsetT offset;
21622
21623 offset = unwind.pending_offset;
21624 unwind.pending_offset = 0;
21625 if (offset != 0)
21626 add_unwind_adjustsp (offset);
21627 }
21628
21629 /* Add an opcode to this list for this function. Two-byte opcodes should
21630 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21631 order. */
21632
21633 static void
21634 add_unwind_opcode (valueT op, int length)
21635 {
21636 /* Add any deferred stack adjustment. */
21637 if (unwind.pending_offset)
21638 flush_pending_unwind ();
21639
21640 unwind.sp_restored = 0;
21641
21642 if (unwind.opcode_count + length > unwind.opcode_alloc)
21643 {
21644 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21645 if (unwind.opcodes)
21646 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21647 unwind.opcode_alloc);
21648 else
21649 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21650 }
21651 while (length > 0)
21652 {
21653 length--;
21654 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21655 op >>= 8;
21656 unwind.opcode_count++;
21657 }
21658 }
21659
21660 /* Add unwind opcodes to adjust the stack pointer. */
21661
21662 static void
21663 add_unwind_adjustsp (offsetT offset)
21664 {
21665 valueT op;
21666
21667 if (offset > 0x200)
21668 {
21669 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21670 char bytes[5];
21671 int n;
21672 valueT o;
21673
21674 /* Long form: 0xb2, uleb128. */
21675 /* This might not fit in a word so add the individual bytes,
21676 remembering the list is built in reverse order. */
21677 o = (valueT) ((offset - 0x204) >> 2);
21678 if (o == 0)
21679 add_unwind_opcode (0, 1);
21680
21681 /* Calculate the uleb128 encoding of the offset. */
21682 n = 0;
21683 while (o)
21684 {
21685 bytes[n] = o & 0x7f;
21686 o >>= 7;
21687 if (o)
21688 bytes[n] |= 0x80;
21689 n++;
21690 }
21691 /* Add the insn. */
21692 for (; n; n--)
21693 add_unwind_opcode (bytes[n - 1], 1);
21694 add_unwind_opcode (0xb2, 1);
21695 }
21696 else if (offset > 0x100)
21697 {
21698 /* Two short opcodes. */
21699 add_unwind_opcode (0x3f, 1);
21700 op = (offset - 0x104) >> 2;
21701 add_unwind_opcode (op, 1);
21702 }
21703 else if (offset > 0)
21704 {
21705 /* Short opcode. */
21706 op = (offset - 4) >> 2;
21707 add_unwind_opcode (op, 1);
21708 }
21709 else if (offset < 0)
21710 {
21711 offset = -offset;
21712 while (offset > 0x100)
21713 {
21714 add_unwind_opcode (0x7f, 1);
21715 offset -= 0x100;
21716 }
21717 op = ((offset - 4) >> 2) | 0x40;
21718 add_unwind_opcode (op, 1);
21719 }
21720 }
21721
21722 /* Finish the list of unwind opcodes for this function. */
21723 static void
21724 finish_unwind_opcodes (void)
21725 {
21726 valueT op;
21727
21728 if (unwind.fp_used)
21729 {
21730 /* Adjust sp as necessary. */
21731 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21732 flush_pending_unwind ();
21733
21734 /* After restoring sp from the frame pointer. */
21735 op = 0x90 | unwind.fp_reg;
21736 add_unwind_opcode (op, 1);
21737 }
21738 else
21739 flush_pending_unwind ();
21740 }
21741
21742
21743 /* Start an exception table entry. If idx is nonzero this is an index table
21744 entry. */
21745
21746 static void
21747 start_unwind_section (const segT text_seg, int idx)
21748 {
21749 const char * text_name;
21750 const char * prefix;
21751 const char * prefix_once;
21752 const char * group_name;
21753 size_t prefix_len;
21754 size_t text_len;
21755 char * sec_name;
21756 size_t sec_name_len;
21757 int type;
21758 int flags;
21759 int linkonce;
21760
21761 if (idx)
21762 {
21763 prefix = ELF_STRING_ARM_unwind;
21764 prefix_once = ELF_STRING_ARM_unwind_once;
21765 type = SHT_ARM_EXIDX;
21766 }
21767 else
21768 {
21769 prefix = ELF_STRING_ARM_unwind_info;
21770 prefix_once = ELF_STRING_ARM_unwind_info_once;
21771 type = SHT_PROGBITS;
21772 }
21773
21774 text_name = segment_name (text_seg);
21775 if (streq (text_name, ".text"))
21776 text_name = "";
21777
21778 if (strncmp (text_name, ".gnu.linkonce.t.",
21779 strlen (".gnu.linkonce.t.")) == 0)
21780 {
21781 prefix = prefix_once;
21782 text_name += strlen (".gnu.linkonce.t.");
21783 }
21784
21785 prefix_len = strlen (prefix);
21786 text_len = strlen (text_name);
21787 sec_name_len = prefix_len + text_len;
21788 sec_name = (char *) xmalloc (sec_name_len + 1);
21789 memcpy (sec_name, prefix, prefix_len);
21790 memcpy (sec_name + prefix_len, text_name, text_len);
21791 sec_name[prefix_len + text_len] = '\0';
21792
21793 flags = SHF_ALLOC;
21794 linkonce = 0;
21795 group_name = 0;
21796
21797 /* Handle COMDAT group. */
21798 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21799 {
21800 group_name = elf_group_name (text_seg);
21801 if (group_name == NULL)
21802 {
21803 as_bad (_("Group section `%s' has no group signature"),
21804 segment_name (text_seg));
21805 ignore_rest_of_line ();
21806 return;
21807 }
21808 flags |= SHF_GROUP;
21809 linkonce = 1;
21810 }
21811
21812 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21813
21814 /* Set the section link for index tables. */
21815 if (idx)
21816 elf_linked_to_section (now_seg) = text_seg;
21817 }
21818
21819
21820 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21821 personality routine data. Returns zero, or the index table value for
21822 an inline entry. */
21823
21824 static valueT
21825 create_unwind_entry (int have_data)
21826 {
21827 int size;
21828 addressT where;
21829 char *ptr;
21830 /* The current word of data. */
21831 valueT data;
21832 /* The number of bytes left in this word. */
21833 int n;
21834
21835 finish_unwind_opcodes ();
21836
21837 /* Remember the current text section. */
21838 unwind.saved_seg = now_seg;
21839 unwind.saved_subseg = now_subseg;
21840
21841 start_unwind_section (now_seg, 0);
21842
21843 if (unwind.personality_routine == NULL)
21844 {
21845 if (unwind.personality_index == -2)
21846 {
21847 if (have_data)
21848 as_bad (_("handlerdata in cantunwind frame"));
21849 return 1; /* EXIDX_CANTUNWIND. */
21850 }
21851
21852 /* Use a default personality routine if none is specified. */
21853 if (unwind.personality_index == -1)
21854 {
21855 if (unwind.opcode_count > 3)
21856 unwind.personality_index = 1;
21857 else
21858 unwind.personality_index = 0;
21859 }
21860
21861 /* Space for the personality routine entry. */
21862 if (unwind.personality_index == 0)
21863 {
21864 if (unwind.opcode_count > 3)
21865 as_bad (_("too many unwind opcodes for personality routine 0"));
21866
21867 if (!have_data)
21868 {
21869 /* All the data is inline in the index table. */
21870 data = 0x80;
21871 n = 3;
21872 while (unwind.opcode_count > 0)
21873 {
21874 unwind.opcode_count--;
21875 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21876 n--;
21877 }
21878
21879 /* Pad with "finish" opcodes. */
21880 while (n--)
21881 data = (data << 8) | 0xb0;
21882
21883 return data;
21884 }
21885 size = 0;
21886 }
21887 else
21888 /* We get two opcodes "free" in the first word. */
21889 size = unwind.opcode_count - 2;
21890 }
21891 else
21892 {
21893 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21894 if (unwind.personality_index != -1)
21895 {
21896 as_bad (_("attempt to recreate an unwind entry"));
21897 return 1;
21898 }
21899
21900 /* An extra byte is required for the opcode count. */
21901 size = unwind.opcode_count + 1;
21902 }
21903
21904 size = (size + 3) >> 2;
21905 if (size > 0xff)
21906 as_bad (_("too many unwind opcodes"));
21907
21908 frag_align (2, 0, 0);
21909 record_alignment (now_seg, 2);
21910 unwind.table_entry = expr_build_dot ();
21911
21912 /* Allocate the table entry. */
21913 ptr = frag_more ((size << 2) + 4);
21914 /* PR 13449: Zero the table entries in case some of them are not used. */
21915 memset (ptr, 0, (size << 2) + 4);
21916 where = frag_now_fix () - ((size << 2) + 4);
21917
21918 switch (unwind.personality_index)
21919 {
21920 case -1:
21921 /* ??? Should this be a PLT generating relocation? */
21922 /* Custom personality routine. */
21923 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21924 BFD_RELOC_ARM_PREL31);
21925
21926 where += 4;
21927 ptr += 4;
21928
21929 /* Set the first byte to the number of additional words. */
21930 data = size > 0 ? size - 1 : 0;
21931 n = 3;
21932 break;
21933
21934 /* ABI defined personality routines. */
21935 case 0:
21936 /* Three opcodes bytes are packed into the first word. */
21937 data = 0x80;
21938 n = 3;
21939 break;
21940
21941 case 1:
21942 case 2:
21943 /* The size and first two opcode bytes go in the first word. */
21944 data = ((0x80 + unwind.personality_index) << 8) | size;
21945 n = 2;
21946 break;
21947
21948 default:
21949 /* Should never happen. */
21950 abort ();
21951 }
21952
21953 /* Pack the opcodes into words (MSB first), reversing the list at the same
21954 time. */
21955 while (unwind.opcode_count > 0)
21956 {
21957 if (n == 0)
21958 {
21959 md_number_to_chars (ptr, data, 4);
21960 ptr += 4;
21961 n = 4;
21962 data = 0;
21963 }
21964 unwind.opcode_count--;
21965 n--;
21966 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21967 }
21968
21969 /* Finish off the last word. */
21970 if (n < 4)
21971 {
21972 /* Pad with "finish" opcodes. */
21973 while (n--)
21974 data = (data << 8) | 0xb0;
21975
21976 md_number_to_chars (ptr, data, 4);
21977 }
21978
21979 if (!have_data)
21980 {
21981 /* Add an empty descriptor if there is no user-specified data. */
21982 ptr = frag_more (4);
21983 md_number_to_chars (ptr, 0, 4);
21984 }
21985
21986 return 0;
21987 }
21988
21989
21990 /* Initialize the DWARF-2 unwind information for this procedure. */
21991
21992 void
21993 tc_arm_frame_initial_instructions (void)
21994 {
21995 cfi_add_CFA_def_cfa (REG_SP, 0);
21996 }
21997 #endif /* OBJ_ELF */
21998
21999 /* Convert REGNAME to a DWARF-2 register number. */
22000
22001 int
22002 tc_arm_regname_to_dw2regnum (char *regname)
22003 {
22004 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22005 if (reg != FAIL)
22006 return reg;
22007
22008 /* PR 16694: Allow VFP registers as well. */
22009 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22010 if (reg != FAIL)
22011 return 64 + reg;
22012
22013 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22014 if (reg != FAIL)
22015 return reg + 256;
22016
22017 return -1;
22018 }
22019
22020 #ifdef TE_PE
22021 void
22022 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22023 {
22024 expressionS exp;
22025
22026 exp.X_op = O_secrel;
22027 exp.X_add_symbol = symbol;
22028 exp.X_add_number = 0;
22029 emit_expr (&exp, size);
22030 }
22031 #endif
22032
22033 /* MD interface: Symbol and relocation handling. */
22034
22035 /* Return the address within the segment that a PC-relative fixup is
22036 relative to. For ARM, PC-relative fixups applied to instructions
22037 are generally relative to the location of the fixup plus 8 bytes.
22038 Thumb branches are offset by 4, and Thumb loads relative to PC
22039 require special handling. */
22040
22041 long
22042 md_pcrel_from_section (fixS * fixP, segT seg)
22043 {
22044 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22045
22046 /* If this is pc-relative and we are going to emit a relocation
22047 then we just want to put out any pipeline compensation that the linker
22048 will need. Otherwise we want to use the calculated base.
22049 For WinCE we skip the bias for externals as well, since this
22050 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22051 if (fixP->fx_pcrel
22052 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22053 || (arm_force_relocation (fixP)
22054 #ifdef TE_WINCE
22055 && !S_IS_EXTERNAL (fixP->fx_addsy)
22056 #endif
22057 )))
22058 base = 0;
22059
22060
22061 switch (fixP->fx_r_type)
22062 {
22063 /* PC relative addressing on the Thumb is slightly odd as the
22064 bottom two bits of the PC are forced to zero for the
22065 calculation. This happens *after* application of the
22066 pipeline offset. However, Thumb adrl already adjusts for
22067 this, so we need not do it again. */
22068 case BFD_RELOC_ARM_THUMB_ADD:
22069 return base & ~3;
22070
22071 case BFD_RELOC_ARM_THUMB_OFFSET:
22072 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22073 case BFD_RELOC_ARM_T32_ADD_PC12:
22074 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22075 return (base + 4) & ~3;
22076
22077 /* Thumb branches are simply offset by +4. */
22078 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22079 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22080 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22081 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22082 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22083 return base + 4;
22084
22085 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22086 if (fixP->fx_addsy
22087 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22088 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22089 && ARM_IS_FUNC (fixP->fx_addsy)
22090 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22091 base = fixP->fx_where + fixP->fx_frag->fr_address;
22092 return base + 4;
22093
22094 /* BLX is like branches above, but forces the low two bits of PC to
22095 zero. */
22096 case BFD_RELOC_THUMB_PCREL_BLX:
22097 if (fixP->fx_addsy
22098 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22099 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22100 && THUMB_IS_FUNC (fixP->fx_addsy)
22101 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22102 base = fixP->fx_where + fixP->fx_frag->fr_address;
22103 return (base + 4) & ~3;
22104
22105 /* ARM mode branches are offset by +8. However, the Windows CE
22106 loader expects the relocation not to take this into account. */
22107 case BFD_RELOC_ARM_PCREL_BLX:
22108 if (fixP->fx_addsy
22109 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22110 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22111 && ARM_IS_FUNC (fixP->fx_addsy)
22112 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22113 base = fixP->fx_where + fixP->fx_frag->fr_address;
22114 return base + 8;
22115
22116 case BFD_RELOC_ARM_PCREL_CALL:
22117 if (fixP->fx_addsy
22118 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22119 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22120 && THUMB_IS_FUNC (fixP->fx_addsy)
22121 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22122 base = fixP->fx_where + fixP->fx_frag->fr_address;
22123 return base + 8;
22124
22125 case BFD_RELOC_ARM_PCREL_BRANCH:
22126 case BFD_RELOC_ARM_PCREL_JUMP:
22127 case BFD_RELOC_ARM_PLT32:
22128 #ifdef TE_WINCE
22129 /* When handling fixups immediately, because we have already
22130 discovered the value of a symbol, or the address of the frag involved
22131 we must account for the offset by +8, as the OS loader will never see the reloc.
22132 see fixup_segment() in write.c
22133 The S_IS_EXTERNAL test handles the case of global symbols.
22134 Those need the calculated base, not just the pipe compensation the linker will need. */
22135 if (fixP->fx_pcrel
22136 && fixP->fx_addsy != NULL
22137 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22138 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22139 return base + 8;
22140 return base;
22141 #else
22142 return base + 8;
22143 #endif
22144
22145
22146 /* ARM mode loads relative to PC are also offset by +8. Unlike
22147 branches, the Windows CE loader *does* expect the relocation
22148 to take this into account. */
22149 case BFD_RELOC_ARM_OFFSET_IMM:
22150 case BFD_RELOC_ARM_OFFSET_IMM8:
22151 case BFD_RELOC_ARM_HWLITERAL:
22152 case BFD_RELOC_ARM_LITERAL:
22153 case BFD_RELOC_ARM_CP_OFF_IMM:
22154 return base + 8;
22155
22156
22157 /* Other PC-relative relocations are un-offset. */
22158 default:
22159 return base;
22160 }
22161 }
22162
22163 static bfd_boolean flag_warn_syms = TRUE;
22164
22165 bfd_boolean
22166 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22167 {
22168 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22169 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22170 does mean that the resulting code might be very confusing to the reader.
22171 Also this warning can be triggered if the user omits an operand before
22172 an immediate address, eg:
22173
22174 LDR =foo
22175
22176 GAS treats this as an assignment of the value of the symbol foo to a
22177 symbol LDR, and so (without this code) it will not issue any kind of
22178 warning or error message.
22179
22180 Note - ARM instructions are case-insensitive but the strings in the hash
22181 table are all stored in lower case, so we must first ensure that name is
22182 lower case too. */
22183 if (flag_warn_syms && arm_ops_hsh)
22184 {
22185 char * nbuf = strdup (name);
22186 char * p;
22187
22188 for (p = nbuf; *p; p++)
22189 *p = TOLOWER (*p);
22190 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22191 {
22192 static struct hash_control * already_warned = NULL;
22193
22194 if (already_warned == NULL)
22195 already_warned = hash_new ();
22196 /* Only warn about the symbol once. To keep the code
22197 simple we let hash_insert do the lookup for us. */
22198 if (hash_insert (already_warned, name, NULL) == NULL)
22199 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22200 }
22201 else
22202 free (nbuf);
22203 }
22204
22205 return FALSE;
22206 }
22207
22208 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22209 Otherwise we have no need to default values of symbols. */
22210
22211 symbolS *
22212 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22213 {
22214 #ifdef OBJ_ELF
22215 if (name[0] == '_' && name[1] == 'G'
22216 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22217 {
22218 if (!GOT_symbol)
22219 {
22220 if (symbol_find (name))
22221 as_bad (_("GOT already in the symbol table"));
22222
22223 GOT_symbol = symbol_new (name, undefined_section,
22224 (valueT) 0, & zero_address_frag);
22225 }
22226
22227 return GOT_symbol;
22228 }
22229 #endif
22230
22231 return NULL;
22232 }
22233
22234 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22235 computed as two separate immediate values, added together. We
22236 already know that this value cannot be computed by just one ARM
22237 instruction. */
22238
22239 static unsigned int
22240 validate_immediate_twopart (unsigned int val,
22241 unsigned int * highpart)
22242 {
22243 unsigned int a;
22244 unsigned int i;
22245
22246 for (i = 0; i < 32; i += 2)
22247 if (((a = rotate_left (val, i)) & 0xff) != 0)
22248 {
22249 if (a & 0xff00)
22250 {
22251 if (a & ~ 0xffff)
22252 continue;
22253 * highpart = (a >> 8) | ((i + 24) << 7);
22254 }
22255 else if (a & 0xff0000)
22256 {
22257 if (a & 0xff000000)
22258 continue;
22259 * highpart = (a >> 16) | ((i + 16) << 7);
22260 }
22261 else
22262 {
22263 gas_assert (a & 0xff000000);
22264 * highpart = (a >> 24) | ((i + 8) << 7);
22265 }
22266
22267 return (a & 0xff) | (i << 7);
22268 }
22269
22270 return FAIL;
22271 }
22272
22273 static int
22274 validate_offset_imm (unsigned int val, int hwse)
22275 {
22276 if ((hwse && val > 255) || val > 4095)
22277 return FAIL;
22278 return val;
22279 }
22280
22281 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22282 negative immediate constant by altering the instruction. A bit of
22283 a hack really.
22284 MOV <-> MVN
22285 AND <-> BIC
22286 ADC <-> SBC
22287 by inverting the second operand, and
22288 ADD <-> SUB
22289 CMP <-> CMN
22290 by negating the second operand. */
22291
22292 static int
22293 negate_data_op (unsigned long * instruction,
22294 unsigned long value)
22295 {
22296 int op, new_inst;
22297 unsigned long negated, inverted;
22298
22299 negated = encode_arm_immediate (-value);
22300 inverted = encode_arm_immediate (~value);
22301
22302 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22303 switch (op)
22304 {
22305 /* First negates. */
22306 case OPCODE_SUB: /* ADD <-> SUB */
22307 new_inst = OPCODE_ADD;
22308 value = negated;
22309 break;
22310
22311 case OPCODE_ADD:
22312 new_inst = OPCODE_SUB;
22313 value = negated;
22314 break;
22315
22316 case OPCODE_CMP: /* CMP <-> CMN */
22317 new_inst = OPCODE_CMN;
22318 value = negated;
22319 break;
22320
22321 case OPCODE_CMN:
22322 new_inst = OPCODE_CMP;
22323 value = negated;
22324 break;
22325
22326 /* Now Inverted ops. */
22327 case OPCODE_MOV: /* MOV <-> MVN */
22328 new_inst = OPCODE_MVN;
22329 value = inverted;
22330 break;
22331
22332 case OPCODE_MVN:
22333 new_inst = OPCODE_MOV;
22334 value = inverted;
22335 break;
22336
22337 case OPCODE_AND: /* AND <-> BIC */
22338 new_inst = OPCODE_BIC;
22339 value = inverted;
22340 break;
22341
22342 case OPCODE_BIC:
22343 new_inst = OPCODE_AND;
22344 value = inverted;
22345 break;
22346
22347 case OPCODE_ADC: /* ADC <-> SBC */
22348 new_inst = OPCODE_SBC;
22349 value = inverted;
22350 break;
22351
22352 case OPCODE_SBC:
22353 new_inst = OPCODE_ADC;
22354 value = inverted;
22355 break;
22356
22357 /* We cannot do anything. */
22358 default:
22359 return FAIL;
22360 }
22361
22362 if (value == (unsigned) FAIL)
22363 return FAIL;
22364
22365 *instruction &= OPCODE_MASK;
22366 *instruction |= new_inst << DATA_OP_SHIFT;
22367 return value;
22368 }
22369
22370 /* Like negate_data_op, but for Thumb-2. */
22371
22372 static unsigned int
22373 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22374 {
22375 int op, new_inst;
22376 int rd;
22377 unsigned int negated, inverted;
22378
22379 negated = encode_thumb32_immediate (-value);
22380 inverted = encode_thumb32_immediate (~value);
22381
22382 rd = (*instruction >> 8) & 0xf;
22383 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22384 switch (op)
22385 {
22386 /* ADD <-> SUB. Includes CMP <-> CMN. */
22387 case T2_OPCODE_SUB:
22388 new_inst = T2_OPCODE_ADD;
22389 value = negated;
22390 break;
22391
22392 case T2_OPCODE_ADD:
22393 new_inst = T2_OPCODE_SUB;
22394 value = negated;
22395 break;
22396
22397 /* ORR <-> ORN. Includes MOV <-> MVN. */
22398 case T2_OPCODE_ORR:
22399 new_inst = T2_OPCODE_ORN;
22400 value = inverted;
22401 break;
22402
22403 case T2_OPCODE_ORN:
22404 new_inst = T2_OPCODE_ORR;
22405 value = inverted;
22406 break;
22407
22408 /* AND <-> BIC. TST has no inverted equivalent. */
22409 case T2_OPCODE_AND:
22410 new_inst = T2_OPCODE_BIC;
22411 if (rd == 15)
22412 value = FAIL;
22413 else
22414 value = inverted;
22415 break;
22416
22417 case T2_OPCODE_BIC:
22418 new_inst = T2_OPCODE_AND;
22419 value = inverted;
22420 break;
22421
22422 /* ADC <-> SBC */
22423 case T2_OPCODE_ADC:
22424 new_inst = T2_OPCODE_SBC;
22425 value = inverted;
22426 break;
22427
22428 case T2_OPCODE_SBC:
22429 new_inst = T2_OPCODE_ADC;
22430 value = inverted;
22431 break;
22432
22433 /* We cannot do anything. */
22434 default:
22435 return FAIL;
22436 }
22437
22438 if (value == (unsigned int)FAIL)
22439 return FAIL;
22440
22441 *instruction &= T2_OPCODE_MASK;
22442 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22443 return value;
22444 }
22445
22446 /* Read a 32-bit thumb instruction from buf. */
22447 static unsigned long
22448 get_thumb32_insn (char * buf)
22449 {
22450 unsigned long insn;
22451 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22452 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22453
22454 return insn;
22455 }
22456
22457
22458 /* We usually want to set the low bit on the address of thumb function
22459 symbols. In particular .word foo - . should have the low bit set.
22460 Generic code tries to fold the difference of two symbols to
22461 a constant. Prevent this and force a relocation when the first symbols
22462 is a thumb function. */
22463
22464 bfd_boolean
22465 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22466 {
22467 if (op == O_subtract
22468 && l->X_op == O_symbol
22469 && r->X_op == O_symbol
22470 && THUMB_IS_FUNC (l->X_add_symbol))
22471 {
22472 l->X_op = O_subtract;
22473 l->X_op_symbol = r->X_add_symbol;
22474 l->X_add_number -= r->X_add_number;
22475 return TRUE;
22476 }
22477
22478 /* Process as normal. */
22479 return FALSE;
22480 }
22481
22482 /* Encode Thumb2 unconditional branches and calls. The encoding
22483 for the 2 are identical for the immediate values. */
22484
22485 static void
22486 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22487 {
22488 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22489 offsetT newval;
22490 offsetT newval2;
22491 addressT S, I1, I2, lo, hi;
22492
22493 S = (value >> 24) & 0x01;
22494 I1 = (value >> 23) & 0x01;
22495 I2 = (value >> 22) & 0x01;
22496 hi = (value >> 12) & 0x3ff;
22497 lo = (value >> 1) & 0x7ff;
22498 newval = md_chars_to_number (buf, THUMB_SIZE);
22499 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22500 newval |= (S << 10) | hi;
22501 newval2 &= ~T2I1I2MASK;
22502 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22503 md_number_to_chars (buf, newval, THUMB_SIZE);
22504 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22505 }
22506
22507 void
22508 md_apply_fix (fixS * fixP,
22509 valueT * valP,
22510 segT seg)
22511 {
22512 offsetT value = * valP;
22513 offsetT newval;
22514 unsigned int newimm;
22515 unsigned long temp;
22516 int sign;
22517 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22518
22519 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22520
22521 /* Note whether this will delete the relocation. */
22522
22523 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22524 fixP->fx_done = 1;
22525
22526 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22527 consistency with the behaviour on 32-bit hosts. Remember value
22528 for emit_reloc. */
22529 value &= 0xffffffff;
22530 value ^= 0x80000000;
22531 value -= 0x80000000;
22532
22533 *valP = value;
22534 fixP->fx_addnumber = value;
22535
22536 /* Same treatment for fixP->fx_offset. */
22537 fixP->fx_offset &= 0xffffffff;
22538 fixP->fx_offset ^= 0x80000000;
22539 fixP->fx_offset -= 0x80000000;
22540
22541 switch (fixP->fx_r_type)
22542 {
22543 case BFD_RELOC_NONE:
22544 /* This will need to go in the object file. */
22545 fixP->fx_done = 0;
22546 break;
22547
22548 case BFD_RELOC_ARM_IMMEDIATE:
22549 /* We claim that this fixup has been processed here,
22550 even if in fact we generate an error because we do
22551 not have a reloc for it, so tc_gen_reloc will reject it. */
22552 fixP->fx_done = 1;
22553
22554 if (fixP->fx_addsy)
22555 {
22556 const char *msg = 0;
22557
22558 if (! S_IS_DEFINED (fixP->fx_addsy))
22559 msg = _("undefined symbol %s used as an immediate value");
22560 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22561 msg = _("symbol %s is in a different section");
22562 else if (S_IS_WEAK (fixP->fx_addsy))
22563 msg = _("symbol %s is weak and may be overridden later");
22564
22565 if (msg)
22566 {
22567 as_bad_where (fixP->fx_file, fixP->fx_line,
22568 msg, S_GET_NAME (fixP->fx_addsy));
22569 break;
22570 }
22571 }
22572
22573 temp = md_chars_to_number (buf, INSN_SIZE);
22574
22575 /* If the offset is negative, we should use encoding A2 for ADR. */
22576 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22577 newimm = negate_data_op (&temp, value);
22578 else
22579 {
22580 newimm = encode_arm_immediate (value);
22581
22582 /* If the instruction will fail, see if we can fix things up by
22583 changing the opcode. */
22584 if (newimm == (unsigned int) FAIL)
22585 newimm = negate_data_op (&temp, value);
22586 }
22587
22588 if (newimm == (unsigned int) FAIL)
22589 {
22590 as_bad_where (fixP->fx_file, fixP->fx_line,
22591 _("invalid constant (%lx) after fixup"),
22592 (unsigned long) value);
22593 break;
22594 }
22595
22596 newimm |= (temp & 0xfffff000);
22597 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22598 break;
22599
22600 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22601 {
22602 unsigned int highpart = 0;
22603 unsigned int newinsn = 0xe1a00000; /* nop. */
22604
22605 if (fixP->fx_addsy)
22606 {
22607 const char *msg = 0;
22608
22609 if (! S_IS_DEFINED (fixP->fx_addsy))
22610 msg = _("undefined symbol %s used as an immediate value");
22611 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22612 msg = _("symbol %s is in a different section");
22613 else if (S_IS_WEAK (fixP->fx_addsy))
22614 msg = _("symbol %s is weak and may be overridden later");
22615
22616 if (msg)
22617 {
22618 as_bad_where (fixP->fx_file, fixP->fx_line,
22619 msg, S_GET_NAME (fixP->fx_addsy));
22620 break;
22621 }
22622 }
22623
22624 newimm = encode_arm_immediate (value);
22625 temp = md_chars_to_number (buf, INSN_SIZE);
22626
22627 /* If the instruction will fail, see if we can fix things up by
22628 changing the opcode. */
22629 if (newimm == (unsigned int) FAIL
22630 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22631 {
22632 /* No ? OK - try using two ADD instructions to generate
22633 the value. */
22634 newimm = validate_immediate_twopart (value, & highpart);
22635
22636 /* Yes - then make sure that the second instruction is
22637 also an add. */
22638 if (newimm != (unsigned int) FAIL)
22639 newinsn = temp;
22640 /* Still No ? Try using a negated value. */
22641 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22642 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22643 /* Otherwise - give up. */
22644 else
22645 {
22646 as_bad_where (fixP->fx_file, fixP->fx_line,
22647 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22648 (long) value);
22649 break;
22650 }
22651
22652 /* Replace the first operand in the 2nd instruction (which
22653 is the PC) with the destination register. We have
22654 already added in the PC in the first instruction and we
22655 do not want to do it again. */
22656 newinsn &= ~ 0xf0000;
22657 newinsn |= ((newinsn & 0x0f000) << 4);
22658 }
22659
22660 newimm |= (temp & 0xfffff000);
22661 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22662
22663 highpart |= (newinsn & 0xfffff000);
22664 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22665 }
22666 break;
22667
22668 case BFD_RELOC_ARM_OFFSET_IMM:
22669 if (!fixP->fx_done && seg->use_rela_p)
22670 value = 0;
22671
22672 case BFD_RELOC_ARM_LITERAL:
22673 sign = value > 0;
22674
22675 if (value < 0)
22676 value = - value;
22677
22678 if (validate_offset_imm (value, 0) == FAIL)
22679 {
22680 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22681 as_bad_where (fixP->fx_file, fixP->fx_line,
22682 _("invalid literal constant: pool needs to be closer"));
22683 else
22684 as_bad_where (fixP->fx_file, fixP->fx_line,
22685 _("bad immediate value for offset (%ld)"),
22686 (long) value);
22687 break;
22688 }
22689
22690 newval = md_chars_to_number (buf, INSN_SIZE);
22691 if (value == 0)
22692 newval &= 0xfffff000;
22693 else
22694 {
22695 newval &= 0xff7ff000;
22696 newval |= value | (sign ? INDEX_UP : 0);
22697 }
22698 md_number_to_chars (buf, newval, INSN_SIZE);
22699 break;
22700
22701 case BFD_RELOC_ARM_OFFSET_IMM8:
22702 case BFD_RELOC_ARM_HWLITERAL:
22703 sign = value > 0;
22704
22705 if (value < 0)
22706 value = - value;
22707
22708 if (validate_offset_imm (value, 1) == FAIL)
22709 {
22710 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22711 as_bad_where (fixP->fx_file, fixP->fx_line,
22712 _("invalid literal constant: pool needs to be closer"));
22713 else
22714 as_bad_where (fixP->fx_file, fixP->fx_line,
22715 _("bad immediate value for 8-bit offset (%ld)"),
22716 (long) value);
22717 break;
22718 }
22719
22720 newval = md_chars_to_number (buf, INSN_SIZE);
22721 if (value == 0)
22722 newval &= 0xfffff0f0;
22723 else
22724 {
22725 newval &= 0xff7ff0f0;
22726 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22727 }
22728 md_number_to_chars (buf, newval, INSN_SIZE);
22729 break;
22730
22731 case BFD_RELOC_ARM_T32_OFFSET_U8:
22732 if (value < 0 || value > 1020 || value % 4 != 0)
22733 as_bad_where (fixP->fx_file, fixP->fx_line,
22734 _("bad immediate value for offset (%ld)"), (long) value);
22735 value /= 4;
22736
22737 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22738 newval |= value;
22739 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22740 break;
22741
22742 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22743 /* This is a complicated relocation used for all varieties of Thumb32
22744 load/store instruction with immediate offset:
22745
22746 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22747 *4, optional writeback(W)
22748 (doubleword load/store)
22749
22750 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22751 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22752 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22753 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22754 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22755
22756 Uppercase letters indicate bits that are already encoded at
22757 this point. Lowercase letters are our problem. For the
22758 second block of instructions, the secondary opcode nybble
22759 (bits 8..11) is present, and bit 23 is zero, even if this is
22760 a PC-relative operation. */
22761 newval = md_chars_to_number (buf, THUMB_SIZE);
22762 newval <<= 16;
22763 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22764
22765 if ((newval & 0xf0000000) == 0xe0000000)
22766 {
22767 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22768 if (value >= 0)
22769 newval |= (1 << 23);
22770 else
22771 value = -value;
22772 if (value % 4 != 0)
22773 {
22774 as_bad_where (fixP->fx_file, fixP->fx_line,
22775 _("offset not a multiple of 4"));
22776 break;
22777 }
22778 value /= 4;
22779 if (value > 0xff)
22780 {
22781 as_bad_where (fixP->fx_file, fixP->fx_line,
22782 _("offset out of range"));
22783 break;
22784 }
22785 newval &= ~0xff;
22786 }
22787 else if ((newval & 0x000f0000) == 0x000f0000)
22788 {
22789 /* PC-relative, 12-bit offset. */
22790 if (value >= 0)
22791 newval |= (1 << 23);
22792 else
22793 value = -value;
22794 if (value > 0xfff)
22795 {
22796 as_bad_where (fixP->fx_file, fixP->fx_line,
22797 _("offset out of range"));
22798 break;
22799 }
22800 newval &= ~0xfff;
22801 }
22802 else if ((newval & 0x00000100) == 0x00000100)
22803 {
22804 /* Writeback: 8-bit, +/- offset. */
22805 if (value >= 0)
22806 newval |= (1 << 9);
22807 else
22808 value = -value;
22809 if (value > 0xff)
22810 {
22811 as_bad_where (fixP->fx_file, fixP->fx_line,
22812 _("offset out of range"));
22813 break;
22814 }
22815 newval &= ~0xff;
22816 }
22817 else if ((newval & 0x00000f00) == 0x00000e00)
22818 {
22819 /* T-instruction: positive 8-bit offset. */
22820 if (value < 0 || value > 0xff)
22821 {
22822 as_bad_where (fixP->fx_file, fixP->fx_line,
22823 _("offset out of range"));
22824 break;
22825 }
22826 newval &= ~0xff;
22827 newval |= value;
22828 }
22829 else
22830 {
22831 /* Positive 12-bit or negative 8-bit offset. */
22832 int limit;
22833 if (value >= 0)
22834 {
22835 newval |= (1 << 23);
22836 limit = 0xfff;
22837 }
22838 else
22839 {
22840 value = -value;
22841 limit = 0xff;
22842 }
22843 if (value > limit)
22844 {
22845 as_bad_where (fixP->fx_file, fixP->fx_line,
22846 _("offset out of range"));
22847 break;
22848 }
22849 newval &= ~limit;
22850 }
22851
22852 newval |= value;
22853 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22854 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22855 break;
22856
22857 case BFD_RELOC_ARM_SHIFT_IMM:
22858 newval = md_chars_to_number (buf, INSN_SIZE);
22859 if (((unsigned long) value) > 32
22860 || (value == 32
22861 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22862 {
22863 as_bad_where (fixP->fx_file, fixP->fx_line,
22864 _("shift expression is too large"));
22865 break;
22866 }
22867
22868 if (value == 0)
22869 /* Shifts of zero must be done as lsl. */
22870 newval &= ~0x60;
22871 else if (value == 32)
22872 value = 0;
22873 newval &= 0xfffff07f;
22874 newval |= (value & 0x1f) << 7;
22875 md_number_to_chars (buf, newval, INSN_SIZE);
22876 break;
22877
22878 case BFD_RELOC_ARM_T32_IMMEDIATE:
22879 case BFD_RELOC_ARM_T32_ADD_IMM:
22880 case BFD_RELOC_ARM_T32_IMM12:
22881 case BFD_RELOC_ARM_T32_ADD_PC12:
22882 /* We claim that this fixup has been processed here,
22883 even if in fact we generate an error because we do
22884 not have a reloc for it, so tc_gen_reloc will reject it. */
22885 fixP->fx_done = 1;
22886
22887 if (fixP->fx_addsy
22888 && ! S_IS_DEFINED (fixP->fx_addsy))
22889 {
22890 as_bad_where (fixP->fx_file, fixP->fx_line,
22891 _("undefined symbol %s used as an immediate value"),
22892 S_GET_NAME (fixP->fx_addsy));
22893 break;
22894 }
22895
22896 newval = md_chars_to_number (buf, THUMB_SIZE);
22897 newval <<= 16;
22898 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22899
22900 newimm = FAIL;
22901 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22902 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22903 {
22904 newimm = encode_thumb32_immediate (value);
22905 if (newimm == (unsigned int) FAIL)
22906 newimm = thumb32_negate_data_op (&newval, value);
22907 }
22908 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22909 && newimm == (unsigned int) FAIL)
22910 {
22911 /* Turn add/sum into addw/subw. */
22912 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22913 newval = (newval & 0xfeffffff) | 0x02000000;
22914 /* No flat 12-bit imm encoding for addsw/subsw. */
22915 if ((newval & 0x00100000) == 0)
22916 {
22917 /* 12 bit immediate for addw/subw. */
22918 if (value < 0)
22919 {
22920 value = -value;
22921 newval ^= 0x00a00000;
22922 }
22923 if (value > 0xfff)
22924 newimm = (unsigned int) FAIL;
22925 else
22926 newimm = value;
22927 }
22928 }
22929
22930 if (newimm == (unsigned int)FAIL)
22931 {
22932 as_bad_where (fixP->fx_file, fixP->fx_line,
22933 _("invalid constant (%lx) after fixup"),
22934 (unsigned long) value);
22935 break;
22936 }
22937
22938 newval |= (newimm & 0x800) << 15;
22939 newval |= (newimm & 0x700) << 4;
22940 newval |= (newimm & 0x0ff);
22941
22942 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22943 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22944 break;
22945
22946 case BFD_RELOC_ARM_SMC:
22947 if (((unsigned long) value) > 0xffff)
22948 as_bad_where (fixP->fx_file, fixP->fx_line,
22949 _("invalid smc expression"));
22950 newval = md_chars_to_number (buf, INSN_SIZE);
22951 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22952 md_number_to_chars (buf, newval, INSN_SIZE);
22953 break;
22954
22955 case BFD_RELOC_ARM_HVC:
22956 if (((unsigned long) value) > 0xffff)
22957 as_bad_where (fixP->fx_file, fixP->fx_line,
22958 _("invalid hvc expression"));
22959 newval = md_chars_to_number (buf, INSN_SIZE);
22960 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22961 md_number_to_chars (buf, newval, INSN_SIZE);
22962 break;
22963
22964 case BFD_RELOC_ARM_SWI:
22965 if (fixP->tc_fix_data != 0)
22966 {
22967 if (((unsigned long) value) > 0xff)
22968 as_bad_where (fixP->fx_file, fixP->fx_line,
22969 _("invalid swi expression"));
22970 newval = md_chars_to_number (buf, THUMB_SIZE);
22971 newval |= value;
22972 md_number_to_chars (buf, newval, THUMB_SIZE);
22973 }
22974 else
22975 {
22976 if (((unsigned long) value) > 0x00ffffff)
22977 as_bad_where (fixP->fx_file, fixP->fx_line,
22978 _("invalid swi expression"));
22979 newval = md_chars_to_number (buf, INSN_SIZE);
22980 newval |= value;
22981 md_number_to_chars (buf, newval, INSN_SIZE);
22982 }
22983 break;
22984
22985 case BFD_RELOC_ARM_MULTI:
22986 if (((unsigned long) value) > 0xffff)
22987 as_bad_where (fixP->fx_file, fixP->fx_line,
22988 _("invalid expression in load/store multiple"));
22989 newval = value | md_chars_to_number (buf, INSN_SIZE);
22990 md_number_to_chars (buf, newval, INSN_SIZE);
22991 break;
22992
22993 #ifdef OBJ_ELF
22994 case BFD_RELOC_ARM_PCREL_CALL:
22995
22996 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22997 && fixP->fx_addsy
22998 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22999 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23000 && THUMB_IS_FUNC (fixP->fx_addsy))
23001 /* Flip the bl to blx. This is a simple flip
23002 bit here because we generate PCREL_CALL for
23003 unconditional bls. */
23004 {
23005 newval = md_chars_to_number (buf, INSN_SIZE);
23006 newval = newval | 0x10000000;
23007 md_number_to_chars (buf, newval, INSN_SIZE);
23008 temp = 1;
23009 fixP->fx_done = 1;
23010 }
23011 else
23012 temp = 3;
23013 goto arm_branch_common;
23014
23015 case BFD_RELOC_ARM_PCREL_JUMP:
23016 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23017 && fixP->fx_addsy
23018 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23019 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23020 && THUMB_IS_FUNC (fixP->fx_addsy))
23021 {
23022 /* This would map to a bl<cond>, b<cond>,
23023 b<always> to a Thumb function. We
23024 need to force a relocation for this particular
23025 case. */
23026 newval = md_chars_to_number (buf, INSN_SIZE);
23027 fixP->fx_done = 0;
23028 }
23029
23030 case BFD_RELOC_ARM_PLT32:
23031 #endif
23032 case BFD_RELOC_ARM_PCREL_BRANCH:
23033 temp = 3;
23034 goto arm_branch_common;
23035
23036 case BFD_RELOC_ARM_PCREL_BLX:
23037
23038 temp = 1;
23039 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23040 && fixP->fx_addsy
23041 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23042 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23043 && ARM_IS_FUNC (fixP->fx_addsy))
23044 {
23045 /* Flip the blx to a bl and warn. */
23046 const char *name = S_GET_NAME (fixP->fx_addsy);
23047 newval = 0xeb000000;
23048 as_warn_where (fixP->fx_file, fixP->fx_line,
23049 _("blx to '%s' an ARM ISA state function changed to bl"),
23050 name);
23051 md_number_to_chars (buf, newval, INSN_SIZE);
23052 temp = 3;
23053 fixP->fx_done = 1;
23054 }
23055
23056 #ifdef OBJ_ELF
23057 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23058 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23059 #endif
23060
23061 arm_branch_common:
23062 /* We are going to store value (shifted right by two) in the
23063 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23064 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23065 also be be clear. */
23066 if (value & temp)
23067 as_bad_where (fixP->fx_file, fixP->fx_line,
23068 _("misaligned branch destination"));
23069 if ((value & (offsetT)0xfe000000) != (offsetT)0
23070 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23071 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23072
23073 if (fixP->fx_done || !seg->use_rela_p)
23074 {
23075 newval = md_chars_to_number (buf, INSN_SIZE);
23076 newval |= (value >> 2) & 0x00ffffff;
23077 /* Set the H bit on BLX instructions. */
23078 if (temp == 1)
23079 {
23080 if (value & 2)
23081 newval |= 0x01000000;
23082 else
23083 newval &= ~0x01000000;
23084 }
23085 md_number_to_chars (buf, newval, INSN_SIZE);
23086 }
23087 break;
23088
23089 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23090 /* CBZ can only branch forward. */
23091
23092 /* Attempts to use CBZ to branch to the next instruction
23093 (which, strictly speaking, are prohibited) will be turned into
23094 no-ops.
23095
23096 FIXME: It may be better to remove the instruction completely and
23097 perform relaxation. */
23098 if (value == -2)
23099 {
23100 newval = md_chars_to_number (buf, THUMB_SIZE);
23101 newval = 0xbf00; /* NOP encoding T1 */
23102 md_number_to_chars (buf, newval, THUMB_SIZE);
23103 }
23104 else
23105 {
23106 if (value & ~0x7e)
23107 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23108
23109 if (fixP->fx_done || !seg->use_rela_p)
23110 {
23111 newval = md_chars_to_number (buf, THUMB_SIZE);
23112 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23113 md_number_to_chars (buf, newval, THUMB_SIZE);
23114 }
23115 }
23116 break;
23117
23118 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23119 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23120 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23121
23122 if (fixP->fx_done || !seg->use_rela_p)
23123 {
23124 newval = md_chars_to_number (buf, THUMB_SIZE);
23125 newval |= (value & 0x1ff) >> 1;
23126 md_number_to_chars (buf, newval, THUMB_SIZE);
23127 }
23128 break;
23129
23130 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23131 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23132 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23133
23134 if (fixP->fx_done || !seg->use_rela_p)
23135 {
23136 newval = md_chars_to_number (buf, THUMB_SIZE);
23137 newval |= (value & 0xfff) >> 1;
23138 md_number_to_chars (buf, newval, THUMB_SIZE);
23139 }
23140 break;
23141
23142 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23143 if (fixP->fx_addsy
23144 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23145 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23146 && ARM_IS_FUNC (fixP->fx_addsy)
23147 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23148 {
23149 /* Force a relocation for a branch 20 bits wide. */
23150 fixP->fx_done = 0;
23151 }
23152 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23153 as_bad_where (fixP->fx_file, fixP->fx_line,
23154 _("conditional branch out of range"));
23155
23156 if (fixP->fx_done || !seg->use_rela_p)
23157 {
23158 offsetT newval2;
23159 addressT S, J1, J2, lo, hi;
23160
23161 S = (value & 0x00100000) >> 20;
23162 J2 = (value & 0x00080000) >> 19;
23163 J1 = (value & 0x00040000) >> 18;
23164 hi = (value & 0x0003f000) >> 12;
23165 lo = (value & 0x00000ffe) >> 1;
23166
23167 newval = md_chars_to_number (buf, THUMB_SIZE);
23168 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23169 newval |= (S << 10) | hi;
23170 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23171 md_number_to_chars (buf, newval, THUMB_SIZE);
23172 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23173 }
23174 break;
23175
23176 case BFD_RELOC_THUMB_PCREL_BLX:
23177 /* If there is a blx from a thumb state function to
23178 another thumb function flip this to a bl and warn
23179 about it. */
23180
23181 if (fixP->fx_addsy
23182 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23183 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23184 && THUMB_IS_FUNC (fixP->fx_addsy))
23185 {
23186 const char *name = S_GET_NAME (fixP->fx_addsy);
23187 as_warn_where (fixP->fx_file, fixP->fx_line,
23188 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23189 name);
23190 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23191 newval = newval | 0x1000;
23192 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23193 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23194 fixP->fx_done = 1;
23195 }
23196
23197
23198 goto thumb_bl_common;
23199
23200 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23201 /* A bl from Thumb state ISA to an internal ARM state function
23202 is converted to a blx. */
23203 if (fixP->fx_addsy
23204 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23205 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23206 && ARM_IS_FUNC (fixP->fx_addsy)
23207 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23208 {
23209 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23210 newval = newval & ~0x1000;
23211 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23212 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23213 fixP->fx_done = 1;
23214 }
23215
23216 thumb_bl_common:
23217
23218 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23219 /* For a BLX instruction, make sure that the relocation is rounded up
23220 to a word boundary. This follows the semantics of the instruction
23221 which specifies that bit 1 of the target address will come from bit
23222 1 of the base address. */
23223 value = (value + 3) & ~ 3;
23224
23225 #ifdef OBJ_ELF
23226 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23227 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23228 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23229 #endif
23230
23231 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23232 {
23233 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23234 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23235 else if ((value & ~0x1ffffff)
23236 && ((value & ~0x1ffffff) != ~0x1ffffff))
23237 as_bad_where (fixP->fx_file, fixP->fx_line,
23238 _("Thumb2 branch out of range"));
23239 }
23240
23241 if (fixP->fx_done || !seg->use_rela_p)
23242 encode_thumb2_b_bl_offset (buf, value);
23243
23244 break;
23245
23246 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23247 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23248 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23249
23250 if (fixP->fx_done || !seg->use_rela_p)
23251 encode_thumb2_b_bl_offset (buf, value);
23252
23253 break;
23254
23255 case BFD_RELOC_8:
23256 if (fixP->fx_done || !seg->use_rela_p)
23257 *buf = value;
23258 break;
23259
23260 case BFD_RELOC_16:
23261 if (fixP->fx_done || !seg->use_rela_p)
23262 md_number_to_chars (buf, value, 2);
23263 break;
23264
23265 #ifdef OBJ_ELF
23266 case BFD_RELOC_ARM_TLS_CALL:
23267 case BFD_RELOC_ARM_THM_TLS_CALL:
23268 case BFD_RELOC_ARM_TLS_DESCSEQ:
23269 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23270 case BFD_RELOC_ARM_TLS_GOTDESC:
23271 case BFD_RELOC_ARM_TLS_GD32:
23272 case BFD_RELOC_ARM_TLS_LE32:
23273 case BFD_RELOC_ARM_TLS_IE32:
23274 case BFD_RELOC_ARM_TLS_LDM32:
23275 case BFD_RELOC_ARM_TLS_LDO32:
23276 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23277 break;
23278
23279 case BFD_RELOC_ARM_GOT32:
23280 case BFD_RELOC_ARM_GOTOFF:
23281 break;
23282
23283 case BFD_RELOC_ARM_GOT_PREL:
23284 if (fixP->fx_done || !seg->use_rela_p)
23285 md_number_to_chars (buf, value, 4);
23286 break;
23287
23288 case BFD_RELOC_ARM_TARGET2:
23289 /* TARGET2 is not partial-inplace, so we need to write the
23290 addend here for REL targets, because it won't be written out
23291 during reloc processing later. */
23292 if (fixP->fx_done || !seg->use_rela_p)
23293 md_number_to_chars (buf, fixP->fx_offset, 4);
23294 break;
23295 #endif
23296
23297 case BFD_RELOC_RVA:
23298 case BFD_RELOC_32:
23299 case BFD_RELOC_ARM_TARGET1:
23300 case BFD_RELOC_ARM_ROSEGREL32:
23301 case BFD_RELOC_ARM_SBREL32:
23302 case BFD_RELOC_32_PCREL:
23303 #ifdef TE_PE
23304 case BFD_RELOC_32_SECREL:
23305 #endif
23306 if (fixP->fx_done || !seg->use_rela_p)
23307 #ifdef TE_WINCE
23308 /* For WinCE we only do this for pcrel fixups. */
23309 if (fixP->fx_done || fixP->fx_pcrel)
23310 #endif
23311 md_number_to_chars (buf, value, 4);
23312 break;
23313
23314 #ifdef OBJ_ELF
23315 case BFD_RELOC_ARM_PREL31:
23316 if (fixP->fx_done || !seg->use_rela_p)
23317 {
23318 newval = md_chars_to_number (buf, 4) & 0x80000000;
23319 if ((value ^ (value >> 1)) & 0x40000000)
23320 {
23321 as_bad_where (fixP->fx_file, fixP->fx_line,
23322 _("rel31 relocation overflow"));
23323 }
23324 newval |= value & 0x7fffffff;
23325 md_number_to_chars (buf, newval, 4);
23326 }
23327 break;
23328 #endif
23329
23330 case BFD_RELOC_ARM_CP_OFF_IMM:
23331 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23332 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23333 newval = md_chars_to_number (buf, INSN_SIZE);
23334 else
23335 newval = get_thumb32_insn (buf);
23336 if ((newval & 0x0f200f00) == 0x0d000900)
23337 {
23338 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23339 has permitted values that are multiples of 2, in the range 0
23340 to 510. */
23341 if (value < -510 || value > 510 || (value & 1))
23342 as_bad_where (fixP->fx_file, fixP->fx_line,
23343 _("co-processor offset out of range"));
23344 }
23345 else if (value < -1023 || value > 1023 || (value & 3))
23346 as_bad_where (fixP->fx_file, fixP->fx_line,
23347 _("co-processor offset out of range"));
23348 cp_off_common:
23349 sign = value > 0;
23350 if (value < 0)
23351 value = -value;
23352 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23353 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23354 newval = md_chars_to_number (buf, INSN_SIZE);
23355 else
23356 newval = get_thumb32_insn (buf);
23357 if (value == 0)
23358 newval &= 0xffffff00;
23359 else
23360 {
23361 newval &= 0xff7fff00;
23362 if ((newval & 0x0f200f00) == 0x0d000900)
23363 {
23364 /* This is a fp16 vstr/vldr.
23365
23366 It requires the immediate offset in the instruction is shifted
23367 left by 1 to be a half-word offset.
23368
23369 Here, left shift by 1 first, and later right shift by 2
23370 should get the right offset. */
23371 value <<= 1;
23372 }
23373 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23374 }
23375 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23376 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23377 md_number_to_chars (buf, newval, INSN_SIZE);
23378 else
23379 put_thumb32_insn (buf, newval);
23380 break;
23381
23382 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23383 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23384 if (value < -255 || value > 255)
23385 as_bad_where (fixP->fx_file, fixP->fx_line,
23386 _("co-processor offset out of range"));
23387 value *= 4;
23388 goto cp_off_common;
23389
23390 case BFD_RELOC_ARM_THUMB_OFFSET:
23391 newval = md_chars_to_number (buf, THUMB_SIZE);
23392 /* Exactly what ranges, and where the offset is inserted depends
23393 on the type of instruction, we can establish this from the
23394 top 4 bits. */
23395 switch (newval >> 12)
23396 {
23397 case 4: /* PC load. */
23398 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23399 forced to zero for these loads; md_pcrel_from has already
23400 compensated for this. */
23401 if (value & 3)
23402 as_bad_where (fixP->fx_file, fixP->fx_line,
23403 _("invalid offset, target not word aligned (0x%08lX)"),
23404 (((unsigned long) fixP->fx_frag->fr_address
23405 + (unsigned long) fixP->fx_where) & ~3)
23406 + (unsigned long) value);
23407
23408 if (value & ~0x3fc)
23409 as_bad_where (fixP->fx_file, fixP->fx_line,
23410 _("invalid offset, value too big (0x%08lX)"),
23411 (long) value);
23412
23413 newval |= value >> 2;
23414 break;
23415
23416 case 9: /* SP load/store. */
23417 if (value & ~0x3fc)
23418 as_bad_where (fixP->fx_file, fixP->fx_line,
23419 _("invalid offset, value too big (0x%08lX)"),
23420 (long) value);
23421 newval |= value >> 2;
23422 break;
23423
23424 case 6: /* Word load/store. */
23425 if (value & ~0x7c)
23426 as_bad_where (fixP->fx_file, fixP->fx_line,
23427 _("invalid offset, value too big (0x%08lX)"),
23428 (long) value);
23429 newval |= value << 4; /* 6 - 2. */
23430 break;
23431
23432 case 7: /* Byte load/store. */
23433 if (value & ~0x1f)
23434 as_bad_where (fixP->fx_file, fixP->fx_line,
23435 _("invalid offset, value too big (0x%08lX)"),
23436 (long) value);
23437 newval |= value << 6;
23438 break;
23439
23440 case 8: /* Halfword load/store. */
23441 if (value & ~0x3e)
23442 as_bad_where (fixP->fx_file, fixP->fx_line,
23443 _("invalid offset, value too big (0x%08lX)"),
23444 (long) value);
23445 newval |= value << 5; /* 6 - 1. */
23446 break;
23447
23448 default:
23449 as_bad_where (fixP->fx_file, fixP->fx_line,
23450 "Unable to process relocation for thumb opcode: %lx",
23451 (unsigned long) newval);
23452 break;
23453 }
23454 md_number_to_chars (buf, newval, THUMB_SIZE);
23455 break;
23456
23457 case BFD_RELOC_ARM_THUMB_ADD:
23458 /* This is a complicated relocation, since we use it for all of
23459 the following immediate relocations:
23460
23461 3bit ADD/SUB
23462 8bit ADD/SUB
23463 9bit ADD/SUB SP word-aligned
23464 10bit ADD PC/SP word-aligned
23465
23466 The type of instruction being processed is encoded in the
23467 instruction field:
23468
23469 0x8000 SUB
23470 0x00F0 Rd
23471 0x000F Rs
23472 */
23473 newval = md_chars_to_number (buf, THUMB_SIZE);
23474 {
23475 int rd = (newval >> 4) & 0xf;
23476 int rs = newval & 0xf;
23477 int subtract = !!(newval & 0x8000);
23478
23479 /* Check for HI regs, only very restricted cases allowed:
23480 Adjusting SP, and using PC or SP to get an address. */
23481 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23482 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23483 as_bad_where (fixP->fx_file, fixP->fx_line,
23484 _("invalid Hi register with immediate"));
23485
23486 /* If value is negative, choose the opposite instruction. */
23487 if (value < 0)
23488 {
23489 value = -value;
23490 subtract = !subtract;
23491 if (value < 0)
23492 as_bad_where (fixP->fx_file, fixP->fx_line,
23493 _("immediate value out of range"));
23494 }
23495
23496 if (rd == REG_SP)
23497 {
23498 if (value & ~0x1fc)
23499 as_bad_where (fixP->fx_file, fixP->fx_line,
23500 _("invalid immediate for stack address calculation"));
23501 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23502 newval |= value >> 2;
23503 }
23504 else if (rs == REG_PC || rs == REG_SP)
23505 {
23506 /* PR gas/18541. If the addition is for a defined symbol
23507 within range of an ADR instruction then accept it. */
23508 if (subtract
23509 && value == 4
23510 && fixP->fx_addsy != NULL)
23511 {
23512 subtract = 0;
23513
23514 if (! S_IS_DEFINED (fixP->fx_addsy)
23515 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23516 || S_IS_WEAK (fixP->fx_addsy))
23517 {
23518 as_bad_where (fixP->fx_file, fixP->fx_line,
23519 _("address calculation needs a strongly defined nearby symbol"));
23520 }
23521 else
23522 {
23523 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23524
23525 /* Round up to the next 4-byte boundary. */
23526 if (v & 3)
23527 v = (v + 3) & ~ 3;
23528 else
23529 v += 4;
23530 v = S_GET_VALUE (fixP->fx_addsy) - v;
23531
23532 if (v & ~0x3fc)
23533 {
23534 as_bad_where (fixP->fx_file, fixP->fx_line,
23535 _("symbol too far away"));
23536 }
23537 else
23538 {
23539 fixP->fx_done = 1;
23540 value = v;
23541 }
23542 }
23543 }
23544
23545 if (subtract || value & ~0x3fc)
23546 as_bad_where (fixP->fx_file, fixP->fx_line,
23547 _("invalid immediate for address calculation (value = 0x%08lX)"),
23548 (unsigned long) (subtract ? - value : value));
23549 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23550 newval |= rd << 8;
23551 newval |= value >> 2;
23552 }
23553 else if (rs == rd)
23554 {
23555 if (value & ~0xff)
23556 as_bad_where (fixP->fx_file, fixP->fx_line,
23557 _("immediate value out of range"));
23558 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23559 newval |= (rd << 8) | value;
23560 }
23561 else
23562 {
23563 if (value & ~0x7)
23564 as_bad_where (fixP->fx_file, fixP->fx_line,
23565 _("immediate value out of range"));
23566 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23567 newval |= rd | (rs << 3) | (value << 6);
23568 }
23569 }
23570 md_number_to_chars (buf, newval, THUMB_SIZE);
23571 break;
23572
23573 case BFD_RELOC_ARM_THUMB_IMM:
23574 newval = md_chars_to_number (buf, THUMB_SIZE);
23575 if (value < 0 || value > 255)
23576 as_bad_where (fixP->fx_file, fixP->fx_line,
23577 _("invalid immediate: %ld is out of range"),
23578 (long) value);
23579 newval |= value;
23580 md_number_to_chars (buf, newval, THUMB_SIZE);
23581 break;
23582
23583 case BFD_RELOC_ARM_THUMB_SHIFT:
23584 /* 5bit shift value (0..32). LSL cannot take 32. */
23585 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23586 temp = newval & 0xf800;
23587 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23588 as_bad_where (fixP->fx_file, fixP->fx_line,
23589 _("invalid shift value: %ld"), (long) value);
23590 /* Shifts of zero must be encoded as LSL. */
23591 if (value == 0)
23592 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23593 /* Shifts of 32 are encoded as zero. */
23594 else if (value == 32)
23595 value = 0;
23596 newval |= value << 6;
23597 md_number_to_chars (buf, newval, THUMB_SIZE);
23598 break;
23599
23600 case BFD_RELOC_VTABLE_INHERIT:
23601 case BFD_RELOC_VTABLE_ENTRY:
23602 fixP->fx_done = 0;
23603 return;
23604
23605 case BFD_RELOC_ARM_MOVW:
23606 case BFD_RELOC_ARM_MOVT:
23607 case BFD_RELOC_ARM_THUMB_MOVW:
23608 case BFD_RELOC_ARM_THUMB_MOVT:
23609 if (fixP->fx_done || !seg->use_rela_p)
23610 {
23611 /* REL format relocations are limited to a 16-bit addend. */
23612 if (!fixP->fx_done)
23613 {
23614 if (value < -0x8000 || value > 0x7fff)
23615 as_bad_where (fixP->fx_file, fixP->fx_line,
23616 _("offset out of range"));
23617 }
23618 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23619 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23620 {
23621 value >>= 16;
23622 }
23623
23624 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23625 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23626 {
23627 newval = get_thumb32_insn (buf);
23628 newval &= 0xfbf08f00;
23629 newval |= (value & 0xf000) << 4;
23630 newval |= (value & 0x0800) << 15;
23631 newval |= (value & 0x0700) << 4;
23632 newval |= (value & 0x00ff);
23633 put_thumb32_insn (buf, newval);
23634 }
23635 else
23636 {
23637 newval = md_chars_to_number (buf, 4);
23638 newval &= 0xfff0f000;
23639 newval |= value & 0x0fff;
23640 newval |= (value & 0xf000) << 4;
23641 md_number_to_chars (buf, newval, 4);
23642 }
23643 }
23644 return;
23645
23646 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23647 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23648 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23649 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23650 gas_assert (!fixP->fx_done);
23651 {
23652 bfd_vma insn;
23653 bfd_boolean is_mov;
23654 bfd_vma encoded_addend = value;
23655
23656 /* Check that addend can be encoded in instruction. */
23657 if (!seg->use_rela_p && (value < 0 || value > 255))
23658 as_bad_where (fixP->fx_file, fixP->fx_line,
23659 _("the offset 0x%08lX is not representable"),
23660 (unsigned long) encoded_addend);
23661
23662 /* Extract the instruction. */
23663 insn = md_chars_to_number (buf, THUMB_SIZE);
23664 is_mov = (insn & 0xf800) == 0x2000;
23665
23666 /* Encode insn. */
23667 if (is_mov)
23668 {
23669 if (!seg->use_rela_p)
23670 insn |= encoded_addend;
23671 }
23672 else
23673 {
23674 int rd, rs;
23675
23676 /* Extract the instruction. */
23677 /* Encoding is the following
23678 0x8000 SUB
23679 0x00F0 Rd
23680 0x000F Rs
23681 */
23682 /* The following conditions must be true :
23683 - ADD
23684 - Rd == Rs
23685 - Rd <= 7
23686 */
23687 rd = (insn >> 4) & 0xf;
23688 rs = insn & 0xf;
23689 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23690 as_bad_where (fixP->fx_file, fixP->fx_line,
23691 _("Unable to process relocation for thumb opcode: %lx"),
23692 (unsigned long) insn);
23693
23694 /* Encode as ADD immediate8 thumb 1 code. */
23695 insn = 0x3000 | (rd << 8);
23696
23697 /* Place the encoded addend into the first 8 bits of the
23698 instruction. */
23699 if (!seg->use_rela_p)
23700 insn |= encoded_addend;
23701 }
23702
23703 /* Update the instruction. */
23704 md_number_to_chars (buf, insn, THUMB_SIZE);
23705 }
23706 break;
23707
23708 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23709 case BFD_RELOC_ARM_ALU_PC_G0:
23710 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23711 case BFD_RELOC_ARM_ALU_PC_G1:
23712 case BFD_RELOC_ARM_ALU_PC_G2:
23713 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23714 case BFD_RELOC_ARM_ALU_SB_G0:
23715 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23716 case BFD_RELOC_ARM_ALU_SB_G1:
23717 case BFD_RELOC_ARM_ALU_SB_G2:
23718 gas_assert (!fixP->fx_done);
23719 if (!seg->use_rela_p)
23720 {
23721 bfd_vma insn;
23722 bfd_vma encoded_addend;
23723 bfd_vma addend_abs = abs (value);
23724
23725 /* Check that the absolute value of the addend can be
23726 expressed as an 8-bit constant plus a rotation. */
23727 encoded_addend = encode_arm_immediate (addend_abs);
23728 if (encoded_addend == (unsigned int) FAIL)
23729 as_bad_where (fixP->fx_file, fixP->fx_line,
23730 _("the offset 0x%08lX is not representable"),
23731 (unsigned long) addend_abs);
23732
23733 /* Extract the instruction. */
23734 insn = md_chars_to_number (buf, INSN_SIZE);
23735
23736 /* If the addend is positive, use an ADD instruction.
23737 Otherwise use a SUB. Take care not to destroy the S bit. */
23738 insn &= 0xff1fffff;
23739 if (value < 0)
23740 insn |= 1 << 22;
23741 else
23742 insn |= 1 << 23;
23743
23744 /* Place the encoded addend into the first 12 bits of the
23745 instruction. */
23746 insn &= 0xfffff000;
23747 insn |= encoded_addend;
23748
23749 /* Update the instruction. */
23750 md_number_to_chars (buf, insn, INSN_SIZE);
23751 }
23752 break;
23753
23754 case BFD_RELOC_ARM_LDR_PC_G0:
23755 case BFD_RELOC_ARM_LDR_PC_G1:
23756 case BFD_RELOC_ARM_LDR_PC_G2:
23757 case BFD_RELOC_ARM_LDR_SB_G0:
23758 case BFD_RELOC_ARM_LDR_SB_G1:
23759 case BFD_RELOC_ARM_LDR_SB_G2:
23760 gas_assert (!fixP->fx_done);
23761 if (!seg->use_rela_p)
23762 {
23763 bfd_vma insn;
23764 bfd_vma addend_abs = abs (value);
23765
23766 /* Check that the absolute value of the addend can be
23767 encoded in 12 bits. */
23768 if (addend_abs >= 0x1000)
23769 as_bad_where (fixP->fx_file, fixP->fx_line,
23770 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23771 (unsigned long) addend_abs);
23772
23773 /* Extract the instruction. */
23774 insn = md_chars_to_number (buf, INSN_SIZE);
23775
23776 /* If the addend is negative, clear bit 23 of the instruction.
23777 Otherwise set it. */
23778 if (value < 0)
23779 insn &= ~(1 << 23);
23780 else
23781 insn |= 1 << 23;
23782
23783 /* Place the absolute value of the addend into the first 12 bits
23784 of the instruction. */
23785 insn &= 0xfffff000;
23786 insn |= addend_abs;
23787
23788 /* Update the instruction. */
23789 md_number_to_chars (buf, insn, INSN_SIZE);
23790 }
23791 break;
23792
23793 case BFD_RELOC_ARM_LDRS_PC_G0:
23794 case BFD_RELOC_ARM_LDRS_PC_G1:
23795 case BFD_RELOC_ARM_LDRS_PC_G2:
23796 case BFD_RELOC_ARM_LDRS_SB_G0:
23797 case BFD_RELOC_ARM_LDRS_SB_G1:
23798 case BFD_RELOC_ARM_LDRS_SB_G2:
23799 gas_assert (!fixP->fx_done);
23800 if (!seg->use_rela_p)
23801 {
23802 bfd_vma insn;
23803 bfd_vma addend_abs = abs (value);
23804
23805 /* Check that the absolute value of the addend can be
23806 encoded in 8 bits. */
23807 if (addend_abs >= 0x100)
23808 as_bad_where (fixP->fx_file, fixP->fx_line,
23809 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23810 (unsigned long) addend_abs);
23811
23812 /* Extract the instruction. */
23813 insn = md_chars_to_number (buf, INSN_SIZE);
23814
23815 /* If the addend is negative, clear bit 23 of the instruction.
23816 Otherwise set it. */
23817 if (value < 0)
23818 insn &= ~(1 << 23);
23819 else
23820 insn |= 1 << 23;
23821
23822 /* Place the first four bits of the absolute value of the addend
23823 into the first 4 bits of the instruction, and the remaining
23824 four into bits 8 .. 11. */
23825 insn &= 0xfffff0f0;
23826 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23827
23828 /* Update the instruction. */
23829 md_number_to_chars (buf, insn, INSN_SIZE);
23830 }
23831 break;
23832
23833 case BFD_RELOC_ARM_LDC_PC_G0:
23834 case BFD_RELOC_ARM_LDC_PC_G1:
23835 case BFD_RELOC_ARM_LDC_PC_G2:
23836 case BFD_RELOC_ARM_LDC_SB_G0:
23837 case BFD_RELOC_ARM_LDC_SB_G1:
23838 case BFD_RELOC_ARM_LDC_SB_G2:
23839 gas_assert (!fixP->fx_done);
23840 if (!seg->use_rela_p)
23841 {
23842 bfd_vma insn;
23843 bfd_vma addend_abs = abs (value);
23844
23845 /* Check that the absolute value of the addend is a multiple of
23846 four and, when divided by four, fits in 8 bits. */
23847 if (addend_abs & 0x3)
23848 as_bad_where (fixP->fx_file, fixP->fx_line,
23849 _("bad offset 0x%08lX (must be word-aligned)"),
23850 (unsigned long) addend_abs);
23851
23852 if ((addend_abs >> 2) > 0xff)
23853 as_bad_where (fixP->fx_file, fixP->fx_line,
23854 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23855 (unsigned long) addend_abs);
23856
23857 /* Extract the instruction. */
23858 insn = md_chars_to_number (buf, INSN_SIZE);
23859
23860 /* If the addend is negative, clear bit 23 of the instruction.
23861 Otherwise set it. */
23862 if (value < 0)
23863 insn &= ~(1 << 23);
23864 else
23865 insn |= 1 << 23;
23866
23867 /* Place the addend (divided by four) into the first eight
23868 bits of the instruction. */
23869 insn &= 0xfffffff0;
23870 insn |= addend_abs >> 2;
23871
23872 /* Update the instruction. */
23873 md_number_to_chars (buf, insn, INSN_SIZE);
23874 }
23875 break;
23876
23877 case BFD_RELOC_ARM_V4BX:
23878 /* This will need to go in the object file. */
23879 fixP->fx_done = 0;
23880 break;
23881
23882 case BFD_RELOC_UNUSED:
23883 default:
23884 as_bad_where (fixP->fx_file, fixP->fx_line,
23885 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23886 }
23887 }
23888
23889 /* Translate internal representation of relocation info to BFD target
23890 format. */
23891
23892 arelent *
23893 tc_gen_reloc (asection *section, fixS *fixp)
23894 {
23895 arelent * reloc;
23896 bfd_reloc_code_real_type code;
23897
23898 reloc = (arelent *) xmalloc (sizeof (arelent));
23899
23900 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23901 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23902 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23903
23904 if (fixp->fx_pcrel)
23905 {
23906 if (section->use_rela_p)
23907 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23908 else
23909 fixp->fx_offset = reloc->address;
23910 }
23911 reloc->addend = fixp->fx_offset;
23912
23913 switch (fixp->fx_r_type)
23914 {
23915 case BFD_RELOC_8:
23916 if (fixp->fx_pcrel)
23917 {
23918 code = BFD_RELOC_8_PCREL;
23919 break;
23920 }
23921
23922 case BFD_RELOC_16:
23923 if (fixp->fx_pcrel)
23924 {
23925 code = BFD_RELOC_16_PCREL;
23926 break;
23927 }
23928
23929 case BFD_RELOC_32:
23930 if (fixp->fx_pcrel)
23931 {
23932 code = BFD_RELOC_32_PCREL;
23933 break;
23934 }
23935
23936 case BFD_RELOC_ARM_MOVW:
23937 if (fixp->fx_pcrel)
23938 {
23939 code = BFD_RELOC_ARM_MOVW_PCREL;
23940 break;
23941 }
23942
23943 case BFD_RELOC_ARM_MOVT:
23944 if (fixp->fx_pcrel)
23945 {
23946 code = BFD_RELOC_ARM_MOVT_PCREL;
23947 break;
23948 }
23949
23950 case BFD_RELOC_ARM_THUMB_MOVW:
23951 if (fixp->fx_pcrel)
23952 {
23953 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23954 break;
23955 }
23956
23957 case BFD_RELOC_ARM_THUMB_MOVT:
23958 if (fixp->fx_pcrel)
23959 {
23960 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23961 break;
23962 }
23963
23964 case BFD_RELOC_NONE:
23965 case BFD_RELOC_ARM_PCREL_BRANCH:
23966 case BFD_RELOC_ARM_PCREL_BLX:
23967 case BFD_RELOC_RVA:
23968 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23969 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23970 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23971 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23972 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23973 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23974 case BFD_RELOC_VTABLE_ENTRY:
23975 case BFD_RELOC_VTABLE_INHERIT:
23976 #ifdef TE_PE
23977 case BFD_RELOC_32_SECREL:
23978 #endif
23979 code = fixp->fx_r_type;
23980 break;
23981
23982 case BFD_RELOC_THUMB_PCREL_BLX:
23983 #ifdef OBJ_ELF
23984 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23985 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23986 else
23987 #endif
23988 code = BFD_RELOC_THUMB_PCREL_BLX;
23989 break;
23990
23991 case BFD_RELOC_ARM_LITERAL:
23992 case BFD_RELOC_ARM_HWLITERAL:
23993 /* If this is called then the a literal has
23994 been referenced across a section boundary. */
23995 as_bad_where (fixp->fx_file, fixp->fx_line,
23996 _("literal referenced across section boundary"));
23997 return NULL;
23998
23999 #ifdef OBJ_ELF
24000 case BFD_RELOC_ARM_TLS_CALL:
24001 case BFD_RELOC_ARM_THM_TLS_CALL:
24002 case BFD_RELOC_ARM_TLS_DESCSEQ:
24003 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24004 case BFD_RELOC_ARM_GOT32:
24005 case BFD_RELOC_ARM_GOTOFF:
24006 case BFD_RELOC_ARM_GOT_PREL:
24007 case BFD_RELOC_ARM_PLT32:
24008 case BFD_RELOC_ARM_TARGET1:
24009 case BFD_RELOC_ARM_ROSEGREL32:
24010 case BFD_RELOC_ARM_SBREL32:
24011 case BFD_RELOC_ARM_PREL31:
24012 case BFD_RELOC_ARM_TARGET2:
24013 case BFD_RELOC_ARM_TLS_LDO32:
24014 case BFD_RELOC_ARM_PCREL_CALL:
24015 case BFD_RELOC_ARM_PCREL_JUMP:
24016 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24017 case BFD_RELOC_ARM_ALU_PC_G0:
24018 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24019 case BFD_RELOC_ARM_ALU_PC_G1:
24020 case BFD_RELOC_ARM_ALU_PC_G2:
24021 case BFD_RELOC_ARM_LDR_PC_G0:
24022 case BFD_RELOC_ARM_LDR_PC_G1:
24023 case BFD_RELOC_ARM_LDR_PC_G2:
24024 case BFD_RELOC_ARM_LDRS_PC_G0:
24025 case BFD_RELOC_ARM_LDRS_PC_G1:
24026 case BFD_RELOC_ARM_LDRS_PC_G2:
24027 case BFD_RELOC_ARM_LDC_PC_G0:
24028 case BFD_RELOC_ARM_LDC_PC_G1:
24029 case BFD_RELOC_ARM_LDC_PC_G2:
24030 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24031 case BFD_RELOC_ARM_ALU_SB_G0:
24032 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24033 case BFD_RELOC_ARM_ALU_SB_G1:
24034 case BFD_RELOC_ARM_ALU_SB_G2:
24035 case BFD_RELOC_ARM_LDR_SB_G0:
24036 case BFD_RELOC_ARM_LDR_SB_G1:
24037 case BFD_RELOC_ARM_LDR_SB_G2:
24038 case BFD_RELOC_ARM_LDRS_SB_G0:
24039 case BFD_RELOC_ARM_LDRS_SB_G1:
24040 case BFD_RELOC_ARM_LDRS_SB_G2:
24041 case BFD_RELOC_ARM_LDC_SB_G0:
24042 case BFD_RELOC_ARM_LDC_SB_G1:
24043 case BFD_RELOC_ARM_LDC_SB_G2:
24044 case BFD_RELOC_ARM_V4BX:
24045 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24046 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24047 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24048 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24049 code = fixp->fx_r_type;
24050 break;
24051
24052 case BFD_RELOC_ARM_TLS_GOTDESC:
24053 case BFD_RELOC_ARM_TLS_GD32:
24054 case BFD_RELOC_ARM_TLS_LE32:
24055 case BFD_RELOC_ARM_TLS_IE32:
24056 case BFD_RELOC_ARM_TLS_LDM32:
24057 /* BFD will include the symbol's address in the addend.
24058 But we don't want that, so subtract it out again here. */
24059 if (!S_IS_COMMON (fixp->fx_addsy))
24060 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24061 code = fixp->fx_r_type;
24062 break;
24063 #endif
24064
24065 case BFD_RELOC_ARM_IMMEDIATE:
24066 as_bad_where (fixp->fx_file, fixp->fx_line,
24067 _("internal relocation (type: IMMEDIATE) not fixed up"));
24068 return NULL;
24069
24070 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24071 as_bad_where (fixp->fx_file, fixp->fx_line,
24072 _("ADRL used for a symbol not defined in the same file"));
24073 return NULL;
24074
24075 case BFD_RELOC_ARM_OFFSET_IMM:
24076 if (section->use_rela_p)
24077 {
24078 code = fixp->fx_r_type;
24079 break;
24080 }
24081
24082 if (fixp->fx_addsy != NULL
24083 && !S_IS_DEFINED (fixp->fx_addsy)
24084 && S_IS_LOCAL (fixp->fx_addsy))
24085 {
24086 as_bad_where (fixp->fx_file, fixp->fx_line,
24087 _("undefined local label `%s'"),
24088 S_GET_NAME (fixp->fx_addsy));
24089 return NULL;
24090 }
24091
24092 as_bad_where (fixp->fx_file, fixp->fx_line,
24093 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24094 return NULL;
24095
24096 default:
24097 {
24098 const char * type;
24099
24100 switch (fixp->fx_r_type)
24101 {
24102 case BFD_RELOC_NONE: type = "NONE"; break;
24103 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24104 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24105 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24106 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24107 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24108 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24109 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24110 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24111 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24112 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24113 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24114 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24115 default: type = _("<unknown>"); break;
24116 }
24117 as_bad_where (fixp->fx_file, fixp->fx_line,
24118 _("cannot represent %s relocation in this object file format"),
24119 type);
24120 return NULL;
24121 }
24122 }
24123
24124 #ifdef OBJ_ELF
24125 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24126 && GOT_symbol
24127 && fixp->fx_addsy == GOT_symbol)
24128 {
24129 code = BFD_RELOC_ARM_GOTPC;
24130 reloc->addend = fixp->fx_offset = reloc->address;
24131 }
24132 #endif
24133
24134 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24135
24136 if (reloc->howto == NULL)
24137 {
24138 as_bad_where (fixp->fx_file, fixp->fx_line,
24139 _("cannot represent %s relocation in this object file format"),
24140 bfd_get_reloc_code_name (code));
24141 return NULL;
24142 }
24143
24144 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24145 vtable entry to be used in the relocation's section offset. */
24146 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24147 reloc->address = fixp->fx_offset;
24148
24149 return reloc;
24150 }
24151
24152 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24153
24154 void
24155 cons_fix_new_arm (fragS * frag,
24156 int where,
24157 int size,
24158 expressionS * exp,
24159 bfd_reloc_code_real_type reloc)
24160 {
24161 int pcrel = 0;
24162
24163 /* Pick a reloc.
24164 FIXME: @@ Should look at CPU word size. */
24165 switch (size)
24166 {
24167 case 1:
24168 reloc = BFD_RELOC_8;
24169 break;
24170 case 2:
24171 reloc = BFD_RELOC_16;
24172 break;
24173 case 4:
24174 default:
24175 reloc = BFD_RELOC_32;
24176 break;
24177 case 8:
24178 reloc = BFD_RELOC_64;
24179 break;
24180 }
24181
24182 #ifdef TE_PE
24183 if (exp->X_op == O_secrel)
24184 {
24185 exp->X_op = O_symbol;
24186 reloc = BFD_RELOC_32_SECREL;
24187 }
24188 #endif
24189
24190 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24191 }
24192
24193 #if defined (OBJ_COFF)
24194 void
24195 arm_validate_fix (fixS * fixP)
24196 {
24197 /* If the destination of the branch is a defined symbol which does not have
24198 the THUMB_FUNC attribute, then we must be calling a function which has
24199 the (interfacearm) attribute. We look for the Thumb entry point to that
24200 function and change the branch to refer to that function instead. */
24201 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24202 && fixP->fx_addsy != NULL
24203 && S_IS_DEFINED (fixP->fx_addsy)
24204 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24205 {
24206 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24207 }
24208 }
24209 #endif
24210
24211
24212 int
24213 arm_force_relocation (struct fix * fixp)
24214 {
24215 #if defined (OBJ_COFF) && defined (TE_PE)
24216 if (fixp->fx_r_type == BFD_RELOC_RVA)
24217 return 1;
24218 #endif
24219
24220 /* In case we have a call or a branch to a function in ARM ISA mode from
24221 a thumb function or vice-versa force the relocation. These relocations
24222 are cleared off for some cores that might have blx and simple transformations
24223 are possible. */
24224
24225 #ifdef OBJ_ELF
24226 switch (fixp->fx_r_type)
24227 {
24228 case BFD_RELOC_ARM_PCREL_JUMP:
24229 case BFD_RELOC_ARM_PCREL_CALL:
24230 case BFD_RELOC_THUMB_PCREL_BLX:
24231 if (THUMB_IS_FUNC (fixp->fx_addsy))
24232 return 1;
24233 break;
24234
24235 case BFD_RELOC_ARM_PCREL_BLX:
24236 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24237 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24238 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24239 if (ARM_IS_FUNC (fixp->fx_addsy))
24240 return 1;
24241 break;
24242
24243 default:
24244 break;
24245 }
24246 #endif
24247
24248 /* Resolve these relocations even if the symbol is extern or weak.
24249 Technically this is probably wrong due to symbol preemption.
24250 In practice these relocations do not have enough range to be useful
24251 at dynamic link time, and some code (e.g. in the Linux kernel)
24252 expects these references to be resolved. */
24253 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24254 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24255 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24256 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24257 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24258 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24259 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24260 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24261 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24262 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24263 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24264 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24265 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24266 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24267 return 0;
24268
24269 /* Always leave these relocations for the linker. */
24270 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24271 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24272 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24273 return 1;
24274
24275 /* Always generate relocations against function symbols. */
24276 if (fixp->fx_r_type == BFD_RELOC_32
24277 && fixp->fx_addsy
24278 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24279 return 1;
24280
24281 return generic_force_reloc (fixp);
24282 }
24283
24284 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24285 /* Relocations against function names must be left unadjusted,
24286 so that the linker can use this information to generate interworking
24287 stubs. The MIPS version of this function
24288 also prevents relocations that are mips-16 specific, but I do not
24289 know why it does this.
24290
24291 FIXME:
24292 There is one other problem that ought to be addressed here, but
24293 which currently is not: Taking the address of a label (rather
24294 than a function) and then later jumping to that address. Such
24295 addresses also ought to have their bottom bit set (assuming that
24296 they reside in Thumb code), but at the moment they will not. */
24297
24298 bfd_boolean
24299 arm_fix_adjustable (fixS * fixP)
24300 {
24301 if (fixP->fx_addsy == NULL)
24302 return 1;
24303
24304 /* Preserve relocations against symbols with function type. */
24305 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24306 return FALSE;
24307
24308 if (THUMB_IS_FUNC (fixP->fx_addsy)
24309 && fixP->fx_subsy == NULL)
24310 return FALSE;
24311
24312 /* We need the symbol name for the VTABLE entries. */
24313 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24314 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24315 return FALSE;
24316
24317 /* Don't allow symbols to be discarded on GOT related relocs. */
24318 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24319 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24320 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24321 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24322 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24323 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24324 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24325 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24326 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24327 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24328 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24329 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24330 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24331 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24332 return FALSE;
24333
24334 /* Similarly for group relocations. */
24335 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24336 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24337 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24338 return FALSE;
24339
24340 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24341 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24342 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24343 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24344 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24345 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24346 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24347 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24348 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24349 return FALSE;
24350
24351 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24352 offsets, so keep these symbols. */
24353 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24354 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24355 return FALSE;
24356
24357 return TRUE;
24358 }
24359 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24360
24361 #ifdef OBJ_ELF
24362 const char *
24363 elf32_arm_target_format (void)
24364 {
24365 #ifdef TE_SYMBIAN
24366 return (target_big_endian
24367 ? "elf32-bigarm-symbian"
24368 : "elf32-littlearm-symbian");
24369 #elif defined (TE_VXWORKS)
24370 return (target_big_endian
24371 ? "elf32-bigarm-vxworks"
24372 : "elf32-littlearm-vxworks");
24373 #elif defined (TE_NACL)
24374 return (target_big_endian
24375 ? "elf32-bigarm-nacl"
24376 : "elf32-littlearm-nacl");
24377 #else
24378 if (target_big_endian)
24379 return "elf32-bigarm";
24380 else
24381 return "elf32-littlearm";
24382 #endif
24383 }
24384
24385 void
24386 armelf_frob_symbol (symbolS * symp,
24387 int * puntp)
24388 {
24389 elf_frob_symbol (symp, puntp);
24390 }
24391 #endif
24392
24393 /* MD interface: Finalization. */
24394
24395 void
24396 arm_cleanup (void)
24397 {
24398 literal_pool * pool;
24399
24400 /* Ensure that all the IT blocks are properly closed. */
24401 check_it_blocks_finished ();
24402
24403 for (pool = list_of_pools; pool; pool = pool->next)
24404 {
24405 /* Put it at the end of the relevant section. */
24406 subseg_set (pool->section, pool->sub_section);
24407 #ifdef OBJ_ELF
24408 arm_elf_change_section ();
24409 #endif
24410 s_ltorg (0);
24411 }
24412 }
24413
24414 #ifdef OBJ_ELF
24415 /* Remove any excess mapping symbols generated for alignment frags in
24416 SEC. We may have created a mapping symbol before a zero byte
24417 alignment; remove it if there's a mapping symbol after the
24418 alignment. */
24419 static void
24420 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24421 void *dummy ATTRIBUTE_UNUSED)
24422 {
24423 segment_info_type *seginfo = seg_info (sec);
24424 fragS *fragp;
24425
24426 if (seginfo == NULL || seginfo->frchainP == NULL)
24427 return;
24428
24429 for (fragp = seginfo->frchainP->frch_root;
24430 fragp != NULL;
24431 fragp = fragp->fr_next)
24432 {
24433 symbolS *sym = fragp->tc_frag_data.last_map;
24434 fragS *next = fragp->fr_next;
24435
24436 /* Variable-sized frags have been converted to fixed size by
24437 this point. But if this was variable-sized to start with,
24438 there will be a fixed-size frag after it. So don't handle
24439 next == NULL. */
24440 if (sym == NULL || next == NULL)
24441 continue;
24442
24443 if (S_GET_VALUE (sym) < next->fr_address)
24444 /* Not at the end of this frag. */
24445 continue;
24446 know (S_GET_VALUE (sym) == next->fr_address);
24447
24448 do
24449 {
24450 if (next->tc_frag_data.first_map != NULL)
24451 {
24452 /* Next frag starts with a mapping symbol. Discard this
24453 one. */
24454 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24455 break;
24456 }
24457
24458 if (next->fr_next == NULL)
24459 {
24460 /* This mapping symbol is at the end of the section. Discard
24461 it. */
24462 know (next->fr_fix == 0 && next->fr_var == 0);
24463 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24464 break;
24465 }
24466
24467 /* As long as we have empty frags without any mapping symbols,
24468 keep looking. */
24469 /* If the next frag is non-empty and does not start with a
24470 mapping symbol, then this mapping symbol is required. */
24471 if (next->fr_address != next->fr_next->fr_address)
24472 break;
24473
24474 next = next->fr_next;
24475 }
24476 while (next != NULL);
24477 }
24478 }
24479 #endif
24480
24481 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24482 ARM ones. */
24483
24484 void
24485 arm_adjust_symtab (void)
24486 {
24487 #ifdef OBJ_COFF
24488 symbolS * sym;
24489
24490 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24491 {
24492 if (ARM_IS_THUMB (sym))
24493 {
24494 if (THUMB_IS_FUNC (sym))
24495 {
24496 /* Mark the symbol as a Thumb function. */
24497 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24498 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24499 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24500
24501 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24502 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24503 else
24504 as_bad (_("%s: unexpected function type: %d"),
24505 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24506 }
24507 else switch (S_GET_STORAGE_CLASS (sym))
24508 {
24509 case C_EXT:
24510 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24511 break;
24512 case C_STAT:
24513 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24514 break;
24515 case C_LABEL:
24516 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24517 break;
24518 default:
24519 /* Do nothing. */
24520 break;
24521 }
24522 }
24523
24524 if (ARM_IS_INTERWORK (sym))
24525 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24526 }
24527 #endif
24528 #ifdef OBJ_ELF
24529 symbolS * sym;
24530 char bind;
24531
24532 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24533 {
24534 if (ARM_IS_THUMB (sym))
24535 {
24536 elf_symbol_type * elf_sym;
24537
24538 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24539 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24540
24541 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24542 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24543 {
24544 /* If it's a .thumb_func, declare it as so,
24545 otherwise tag label as .code 16. */
24546 if (THUMB_IS_FUNC (sym))
24547 elf_sym->internal_elf_sym.st_target_internal
24548 = ST_BRANCH_TO_THUMB;
24549 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24550 elf_sym->internal_elf_sym.st_info =
24551 ELF_ST_INFO (bind, STT_ARM_16BIT);
24552 }
24553 }
24554 }
24555
24556 /* Remove any overlapping mapping symbols generated by alignment frags. */
24557 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24558 /* Now do generic ELF adjustments. */
24559 elf_adjust_symtab ();
24560 #endif
24561 }
24562
24563 /* MD interface: Initialization. */
24564
24565 static void
24566 set_constant_flonums (void)
24567 {
24568 int i;
24569
24570 for (i = 0; i < NUM_FLOAT_VALS; i++)
24571 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24572 abort ();
24573 }
24574
24575 /* Auto-select Thumb mode if it's the only available instruction set for the
24576 given architecture. */
24577
24578 static void
24579 autoselect_thumb_from_cpu_variant (void)
24580 {
24581 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24582 opcode_select (16);
24583 }
24584
24585 void
24586 md_begin (void)
24587 {
24588 unsigned mach;
24589 unsigned int i;
24590
24591 if ( (arm_ops_hsh = hash_new ()) == NULL
24592 || (arm_cond_hsh = hash_new ()) == NULL
24593 || (arm_shift_hsh = hash_new ()) == NULL
24594 || (arm_psr_hsh = hash_new ()) == NULL
24595 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24596 || (arm_reg_hsh = hash_new ()) == NULL
24597 || (arm_reloc_hsh = hash_new ()) == NULL
24598 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24599 as_fatal (_("virtual memory exhausted"));
24600
24601 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24602 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24603 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24604 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24605 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24606 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24607 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24608 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24609 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24610 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24611 (void *) (v7m_psrs + i));
24612 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24613 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24614 for (i = 0;
24615 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24616 i++)
24617 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24618 (void *) (barrier_opt_names + i));
24619 #ifdef OBJ_ELF
24620 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24621 {
24622 struct reloc_entry * entry = reloc_names + i;
24623
24624 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24625 /* This makes encode_branch() use the EABI versions of this relocation. */
24626 entry->reloc = BFD_RELOC_UNUSED;
24627
24628 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24629 }
24630 #endif
24631
24632 set_constant_flonums ();
24633
24634 /* Set the cpu variant based on the command-line options. We prefer
24635 -mcpu= over -march= if both are set (as for GCC); and we prefer
24636 -mfpu= over any other way of setting the floating point unit.
24637 Use of legacy options with new options are faulted. */
24638 if (legacy_cpu)
24639 {
24640 if (mcpu_cpu_opt || march_cpu_opt)
24641 as_bad (_("use of old and new-style options to set CPU type"));
24642
24643 mcpu_cpu_opt = legacy_cpu;
24644 }
24645 else if (!mcpu_cpu_opt)
24646 mcpu_cpu_opt = march_cpu_opt;
24647
24648 if (legacy_fpu)
24649 {
24650 if (mfpu_opt)
24651 as_bad (_("use of old and new-style options to set FPU type"));
24652
24653 mfpu_opt = legacy_fpu;
24654 }
24655 else if (!mfpu_opt)
24656 {
24657 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24658 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24659 /* Some environments specify a default FPU. If they don't, infer it
24660 from the processor. */
24661 if (mcpu_fpu_opt)
24662 mfpu_opt = mcpu_fpu_opt;
24663 else
24664 mfpu_opt = march_fpu_opt;
24665 #else
24666 mfpu_opt = &fpu_default;
24667 #endif
24668 }
24669
24670 if (!mfpu_opt)
24671 {
24672 if (mcpu_cpu_opt != NULL)
24673 mfpu_opt = &fpu_default;
24674 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24675 mfpu_opt = &fpu_arch_vfp_v2;
24676 else
24677 mfpu_opt = &fpu_arch_fpa;
24678 }
24679
24680 #ifdef CPU_DEFAULT
24681 if (!mcpu_cpu_opt)
24682 {
24683 mcpu_cpu_opt = &cpu_default;
24684 selected_cpu = cpu_default;
24685 }
24686 else if (no_cpu_selected ())
24687 selected_cpu = cpu_default;
24688 #else
24689 if (mcpu_cpu_opt)
24690 selected_cpu = *mcpu_cpu_opt;
24691 else
24692 mcpu_cpu_opt = &arm_arch_any;
24693 #endif
24694
24695 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24696
24697 autoselect_thumb_from_cpu_variant ();
24698
24699 arm_arch_used = thumb_arch_used = arm_arch_none;
24700
24701 #if defined OBJ_COFF || defined OBJ_ELF
24702 {
24703 unsigned int flags = 0;
24704
24705 #if defined OBJ_ELF
24706 flags = meabi_flags;
24707
24708 switch (meabi_flags)
24709 {
24710 case EF_ARM_EABI_UNKNOWN:
24711 #endif
24712 /* Set the flags in the private structure. */
24713 if (uses_apcs_26) flags |= F_APCS26;
24714 if (support_interwork) flags |= F_INTERWORK;
24715 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24716 if (pic_code) flags |= F_PIC;
24717 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24718 flags |= F_SOFT_FLOAT;
24719
24720 switch (mfloat_abi_opt)
24721 {
24722 case ARM_FLOAT_ABI_SOFT:
24723 case ARM_FLOAT_ABI_SOFTFP:
24724 flags |= F_SOFT_FLOAT;
24725 break;
24726
24727 case ARM_FLOAT_ABI_HARD:
24728 if (flags & F_SOFT_FLOAT)
24729 as_bad (_("hard-float conflicts with specified fpu"));
24730 break;
24731 }
24732
24733 /* Using pure-endian doubles (even if soft-float). */
24734 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24735 flags |= F_VFP_FLOAT;
24736
24737 #if defined OBJ_ELF
24738 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24739 flags |= EF_ARM_MAVERICK_FLOAT;
24740 break;
24741
24742 case EF_ARM_EABI_VER4:
24743 case EF_ARM_EABI_VER5:
24744 /* No additional flags to set. */
24745 break;
24746
24747 default:
24748 abort ();
24749 }
24750 #endif
24751 bfd_set_private_flags (stdoutput, flags);
24752
24753 /* We have run out flags in the COFF header to encode the
24754 status of ATPCS support, so instead we create a dummy,
24755 empty, debug section called .arm.atpcs. */
24756 if (atpcs)
24757 {
24758 asection * sec;
24759
24760 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24761
24762 if (sec != NULL)
24763 {
24764 bfd_set_section_flags
24765 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24766 bfd_set_section_size (stdoutput, sec, 0);
24767 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24768 }
24769 }
24770 }
24771 #endif
24772
24773 /* Record the CPU type as well. */
24774 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24775 mach = bfd_mach_arm_iWMMXt2;
24776 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24777 mach = bfd_mach_arm_iWMMXt;
24778 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24779 mach = bfd_mach_arm_XScale;
24780 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24781 mach = bfd_mach_arm_ep9312;
24782 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24783 mach = bfd_mach_arm_5TE;
24784 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24785 {
24786 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24787 mach = bfd_mach_arm_5T;
24788 else
24789 mach = bfd_mach_arm_5;
24790 }
24791 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24792 {
24793 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24794 mach = bfd_mach_arm_4T;
24795 else
24796 mach = bfd_mach_arm_4;
24797 }
24798 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24799 mach = bfd_mach_arm_3M;
24800 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24801 mach = bfd_mach_arm_3;
24802 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24803 mach = bfd_mach_arm_2a;
24804 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24805 mach = bfd_mach_arm_2;
24806 else
24807 mach = bfd_mach_arm_unknown;
24808
24809 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24810 }
24811
24812 /* Command line processing. */
24813
24814 /* md_parse_option
24815 Invocation line includes a switch not recognized by the base assembler.
24816 See if it's a processor-specific option.
24817
24818 This routine is somewhat complicated by the need for backwards
24819 compatibility (since older releases of gcc can't be changed).
24820 The new options try to make the interface as compatible as
24821 possible with GCC.
24822
24823 New options (supported) are:
24824
24825 -mcpu=<cpu name> Assemble for selected processor
24826 -march=<architecture name> Assemble for selected architecture
24827 -mfpu=<fpu architecture> Assemble for selected FPU.
24828 -EB/-mbig-endian Big-endian
24829 -EL/-mlittle-endian Little-endian
24830 -k Generate PIC code
24831 -mthumb Start in Thumb mode
24832 -mthumb-interwork Code supports ARM/Thumb interworking
24833
24834 -m[no-]warn-deprecated Warn about deprecated features
24835 -m[no-]warn-syms Warn when symbols match instructions
24836
24837 For now we will also provide support for:
24838
24839 -mapcs-32 32-bit Program counter
24840 -mapcs-26 26-bit Program counter
24841 -macps-float Floats passed in FP registers
24842 -mapcs-reentrant Reentrant code
24843 -matpcs
24844 (sometime these will probably be replaced with -mapcs=<list of options>
24845 and -matpcs=<list of options>)
24846
24847 The remaining options are only supported for back-wards compatibility.
24848 Cpu variants, the arm part is optional:
24849 -m[arm]1 Currently not supported.
24850 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24851 -m[arm]3 Arm 3 processor
24852 -m[arm]6[xx], Arm 6 processors
24853 -m[arm]7[xx][t][[d]m] Arm 7 processors
24854 -m[arm]8[10] Arm 8 processors
24855 -m[arm]9[20][tdmi] Arm 9 processors
24856 -mstrongarm[110[0]] StrongARM processors
24857 -mxscale XScale processors
24858 -m[arm]v[2345[t[e]]] Arm architectures
24859 -mall All (except the ARM1)
24860 FP variants:
24861 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24862 -mfpe-old (No float load/store multiples)
24863 -mvfpxd VFP Single precision
24864 -mvfp All VFP
24865 -mno-fpu Disable all floating point instructions
24866
24867 The following CPU names are recognized:
24868 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24869 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24870 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24871 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24872 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24873 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24874 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24875
24876 */
24877
24878 const char * md_shortopts = "m:k";
24879
24880 #ifdef ARM_BI_ENDIAN
24881 #define OPTION_EB (OPTION_MD_BASE + 0)
24882 #define OPTION_EL (OPTION_MD_BASE + 1)
24883 #else
24884 #if TARGET_BYTES_BIG_ENDIAN
24885 #define OPTION_EB (OPTION_MD_BASE + 0)
24886 #else
24887 #define OPTION_EL (OPTION_MD_BASE + 1)
24888 #endif
24889 #endif
24890 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24891
24892 struct option md_longopts[] =
24893 {
24894 #ifdef OPTION_EB
24895 {"EB", no_argument, NULL, OPTION_EB},
24896 #endif
24897 #ifdef OPTION_EL
24898 {"EL", no_argument, NULL, OPTION_EL},
24899 #endif
24900 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24901 {NULL, no_argument, NULL, 0}
24902 };
24903
24904
24905 size_t md_longopts_size = sizeof (md_longopts);
24906
24907 struct arm_option_table
24908 {
24909 const char *option; /* Option name to match. */
24910 const char *help; /* Help information. */
24911 int *var; /* Variable to change. */
24912 int value; /* What to change it to. */
24913 const char *deprecated; /* If non-null, print this message. */
24914 };
24915
24916 struct arm_option_table arm_opts[] =
24917 {
24918 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24919 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24920 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24921 &support_interwork, 1, NULL},
24922 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24923 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24924 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24925 1, NULL},
24926 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24927 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24928 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24929 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24930 NULL},
24931
24932 /* These are recognized by the assembler, but have no affect on code. */
24933 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24934 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24935
24936 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24937 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24938 &warn_on_deprecated, 0, NULL},
24939 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24940 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24941 {NULL, NULL, NULL, 0, NULL}
24942 };
24943
24944 struct arm_legacy_option_table
24945 {
24946 const char *option; /* Option name to match. */
24947 const arm_feature_set **var; /* Variable to change. */
24948 const arm_feature_set value; /* What to change it to. */
24949 const char *deprecated; /* If non-null, print this message. */
24950 };
24951
24952 const struct arm_legacy_option_table arm_legacy_opts[] =
24953 {
24954 /* DON'T add any new processors to this list -- we want the whole list
24955 to go away... Add them to the processors table instead. */
24956 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24957 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24958 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24959 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24960 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24961 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24962 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24963 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24964 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24965 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24966 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24967 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24968 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24969 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24970 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24971 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24972 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24973 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24974 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24975 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24976 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24977 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24978 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24979 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24980 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24981 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24982 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24983 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24984 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24985 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24986 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24987 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24988 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24989 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24990 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24991 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24992 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24993 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24994 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24995 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24996 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24997 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24998 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24999 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25000 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25001 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25002 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25003 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25004 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25005 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25006 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25007 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25008 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25009 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25010 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25011 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25012 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25013 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25014 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25015 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25016 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25017 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25018 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25019 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25020 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25021 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25022 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25023 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25024 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25025 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25026 N_("use -mcpu=strongarm110")},
25027 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25028 N_("use -mcpu=strongarm1100")},
25029 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25030 N_("use -mcpu=strongarm1110")},
25031 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25032 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25033 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25034
25035 /* Architecture variants -- don't add any more to this list either. */
25036 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25037 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25038 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25039 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25040 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25041 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25042 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25043 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25044 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25045 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25046 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25047 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25048 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25049 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25050 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25051 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25052 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25053 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25054
25055 /* Floating point variants -- don't add any more to this list either. */
25056 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25057 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25058 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25059 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25060 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25061
25062 {NULL, NULL, ARM_ARCH_NONE, NULL}
25063 };
25064
25065 struct arm_cpu_option_table
25066 {
25067 const char *name;
25068 size_t name_len;
25069 const arm_feature_set value;
25070 /* For some CPUs we assume an FPU unless the user explicitly sets
25071 -mfpu=... */
25072 const arm_feature_set default_fpu;
25073 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25074 case. */
25075 const char *canonical_name;
25076 };
25077
25078 /* This list should, at a minimum, contain all the cpu names
25079 recognized by GCC. */
25080 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25081 static const struct arm_cpu_option_table arm_cpus[] =
25082 {
25083 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
25084 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
25085 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
25086 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25087 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25088 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25089 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25090 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25091 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25092 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25093 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25094 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25095 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25096 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25097 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25098 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25099 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25100 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25101 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25102 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25103 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25104 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25105 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25106 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25107 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25108 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25109 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25110 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25111 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25112 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25113 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25114 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25115 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25116 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25117 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25118 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25119 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25120 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25121 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25122 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
25123 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25124 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25125 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25126 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25127 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25128 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25129 /* For V5 or later processors we default to using VFP; but the user
25130 should really set the FPU type explicitly. */
25131 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25132 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25133 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25134 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25135 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25136 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25137 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
25138 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25139 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25140 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
25141 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25142 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25143 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25144 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25145 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25146 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
25147 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25148 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25149 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25150 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
25151 "ARM1026EJ-S"),
25152 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25153 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25154 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25155 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25156 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25157 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25158 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
25159 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
25160 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
25161 "ARM1136JF-S"),
25162 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
25163 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
25164 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
25165 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
25166 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
25167 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
25168 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
25169 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
25170 FPU_NONE, "Cortex-A5"),
25171 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25172 "Cortex-A7"),
25173 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
25174 ARM_FEATURE_COPROC (FPU_VFP_V3
25175 | FPU_NEON_EXT_V1),
25176 "Cortex-A8"),
25177 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
25178 ARM_FEATURE_COPROC (FPU_VFP_V3
25179 | FPU_NEON_EXT_V1),
25180 "Cortex-A9"),
25181 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25182 "Cortex-A12"),
25183 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25184 "Cortex-A15"),
25185 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25186 "Cortex-A17"),
25187 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25188 "Cortex-A32"),
25189 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25190 "Cortex-A35"),
25191 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25192 "Cortex-A53"),
25193 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25194 "Cortex-A57"),
25195 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25196 "Cortex-A72"),
25197 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25198 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25199 "Cortex-R4F"),
25200 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25201 FPU_NONE, "Cortex-R5"),
25202 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25203 FPU_ARCH_VFP_V3D16,
25204 "Cortex-R7"),
25205 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25206 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25207 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25208 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25209 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25210 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25211 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25212 "Samsung " \
25213 "Exynos M1"),
25214 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25215 "Qualcomm "
25216 "QDF24XX"),
25217
25218 /* ??? XSCALE is really an architecture. */
25219 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25220 /* ??? iwmmxt is not a processor. */
25221 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25222 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25223 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25224 /* Maverick */
25225 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25226 FPU_ARCH_MAVERICK, "ARM920T"),
25227 /* Marvell processors. */
25228 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25229 | ARM_EXT_SEC,
25230 ARM_EXT2_V6T2_V8M),
25231 FPU_ARCH_VFP_V3D16, NULL),
25232 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25233 | ARM_EXT_SEC,
25234 ARM_EXT2_V6T2_V8M),
25235 FPU_ARCH_NEON_VFP_V4, NULL),
25236 /* APM X-Gene family. */
25237 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25238 "APM X-Gene 1"),
25239 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25240 "APM X-Gene 2"),
25241
25242 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25243 };
25244 #undef ARM_CPU_OPT
25245
25246 struct arm_arch_option_table
25247 {
25248 const char *name;
25249 size_t name_len;
25250 const arm_feature_set value;
25251 const arm_feature_set default_fpu;
25252 };
25253
25254 /* This list should, at a minimum, contain all the architecture names
25255 recognized by GCC. */
25256 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25257 static const struct arm_arch_option_table arm_archs[] =
25258 {
25259 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25260 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25261 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25262 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25263 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25264 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25265 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25266 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25267 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25268 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25269 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25270 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25271 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25272 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25273 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25274 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25275 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25276 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25277 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25278 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25279 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25280 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25281 kept to preserve existing behaviour. */
25282 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25283 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25284 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25285 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25286 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25287 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25288 kept to preserve existing behaviour. */
25289 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25290 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25291 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25292 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25293 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25294 /* The official spelling of the ARMv7 profile variants is the dashed form.
25295 Accept the non-dashed form for compatibility with old toolchains. */
25296 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25297 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25298 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25299 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25300 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25301 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25302 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25303 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25304 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25305 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25306 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25307 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25308 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25309 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25310 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25311 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25312 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25313 };
25314 #undef ARM_ARCH_OPT
25315
25316 /* ISA extensions in the co-processor and main instruction set space. */
25317 struct arm_option_extension_value_table
25318 {
25319 const char *name;
25320 size_t name_len;
25321 const arm_feature_set merge_value;
25322 const arm_feature_set clear_value;
25323 const arm_feature_set allowed_archs;
25324 };
25325
25326 /* The following table must be in alphabetical order with a NULL last entry.
25327 */
25328 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25329 static const struct arm_option_extension_value_table arm_extensions[] =
25330 {
25331 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25332 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25333 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25334 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25335 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25336 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25337 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25338 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25339 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25340 ARM_ARCH_V8_2A),
25341 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25342 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25343 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25344 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25345 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
25346 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25347 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
25348 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25349 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
25350 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25351 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25352 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25353 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25354 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25355 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25356 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25357 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25358 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25359 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25360 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25362 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25363 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25364 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
25365 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25366 | ARM_EXT_DIV),
25367 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25368 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25369 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8,
25370 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25371 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25372 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25373 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
25374 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
25375 };
25376 #undef ARM_EXT_OPT
25377
25378 /* ISA floating-point and Advanced SIMD extensions. */
25379 struct arm_option_fpu_value_table
25380 {
25381 const char *name;
25382 const arm_feature_set value;
25383 };
25384
25385 /* This list should, at a minimum, contain all the fpu names
25386 recognized by GCC. */
25387 static const struct arm_option_fpu_value_table arm_fpus[] =
25388 {
25389 {"softfpa", FPU_NONE},
25390 {"fpe", FPU_ARCH_FPE},
25391 {"fpe2", FPU_ARCH_FPE},
25392 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25393 {"fpa", FPU_ARCH_FPA},
25394 {"fpa10", FPU_ARCH_FPA},
25395 {"fpa11", FPU_ARCH_FPA},
25396 {"arm7500fe", FPU_ARCH_FPA},
25397 {"softvfp", FPU_ARCH_VFP},
25398 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25399 {"vfp", FPU_ARCH_VFP_V2},
25400 {"vfp9", FPU_ARCH_VFP_V2},
25401 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25402 {"vfp10", FPU_ARCH_VFP_V2},
25403 {"vfp10-r0", FPU_ARCH_VFP_V1},
25404 {"vfpxd", FPU_ARCH_VFP_V1xD},
25405 {"vfpv2", FPU_ARCH_VFP_V2},
25406 {"vfpv3", FPU_ARCH_VFP_V3},
25407 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25408 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25409 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25410 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25411 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25412 {"arm1020t", FPU_ARCH_VFP_V1},
25413 {"arm1020e", FPU_ARCH_VFP_V2},
25414 {"arm1136jfs", FPU_ARCH_VFP_V2},
25415 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25416 {"maverick", FPU_ARCH_MAVERICK},
25417 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25418 {"neon-fp16", FPU_ARCH_NEON_FP16},
25419 {"vfpv4", FPU_ARCH_VFP_V4},
25420 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25421 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25422 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25423 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25424 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25425 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25426 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25427 {"crypto-neon-fp-armv8",
25428 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25429 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25430 {"crypto-neon-fp-armv8.1",
25431 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25432 {NULL, ARM_ARCH_NONE}
25433 };
25434
25435 struct arm_option_value_table
25436 {
25437 const char *name;
25438 long value;
25439 };
25440
25441 static const struct arm_option_value_table arm_float_abis[] =
25442 {
25443 {"hard", ARM_FLOAT_ABI_HARD},
25444 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25445 {"soft", ARM_FLOAT_ABI_SOFT},
25446 {NULL, 0}
25447 };
25448
25449 #ifdef OBJ_ELF
25450 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25451 static const struct arm_option_value_table arm_eabis[] =
25452 {
25453 {"gnu", EF_ARM_EABI_UNKNOWN},
25454 {"4", EF_ARM_EABI_VER4},
25455 {"5", EF_ARM_EABI_VER5},
25456 {NULL, 0}
25457 };
25458 #endif
25459
25460 struct arm_long_option_table
25461 {
25462 const char * option; /* Substring to match. */
25463 const char * help; /* Help information. */
25464 int (* func) (char * subopt); /* Function to decode sub-option. */
25465 const char * deprecated; /* If non-null, print this message. */
25466 };
25467
25468 static bfd_boolean
25469 arm_parse_extension (char *str, const arm_feature_set **opt_p)
25470 {
25471 arm_feature_set *ext_set = (arm_feature_set *)
25472 xmalloc (sizeof (arm_feature_set));
25473
25474 /* We insist on extensions being specified in alphabetical order, and with
25475 extensions being added before being removed. We achieve this by having
25476 the global ARM_EXTENSIONS table in alphabetical order, and using the
25477 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25478 or removing it (0) and only allowing it to change in the order
25479 -1 -> 1 -> 0. */
25480 const struct arm_option_extension_value_table * opt = NULL;
25481 int adding_value = -1;
25482
25483 /* Copy the feature set, so that we can modify it. */
25484 *ext_set = **opt_p;
25485 *opt_p = ext_set;
25486
25487 while (str != NULL && *str != 0)
25488 {
25489 char *ext;
25490 size_t len;
25491
25492 if (*str != '+')
25493 {
25494 as_bad (_("invalid architectural extension"));
25495 return FALSE;
25496 }
25497
25498 str++;
25499 ext = strchr (str, '+');
25500
25501 if (ext != NULL)
25502 len = ext - str;
25503 else
25504 len = strlen (str);
25505
25506 if (len >= 2 && strncmp (str, "no", 2) == 0)
25507 {
25508 if (adding_value != 0)
25509 {
25510 adding_value = 0;
25511 opt = arm_extensions;
25512 }
25513
25514 len -= 2;
25515 str += 2;
25516 }
25517 else if (len > 0)
25518 {
25519 if (adding_value == -1)
25520 {
25521 adding_value = 1;
25522 opt = arm_extensions;
25523 }
25524 else if (adding_value != 1)
25525 {
25526 as_bad (_("must specify extensions to add before specifying "
25527 "those to remove"));
25528 return FALSE;
25529 }
25530 }
25531
25532 if (len == 0)
25533 {
25534 as_bad (_("missing architectural extension"));
25535 return FALSE;
25536 }
25537
25538 gas_assert (adding_value != -1);
25539 gas_assert (opt != NULL);
25540
25541 /* Scan over the options table trying to find an exact match. */
25542 for (; opt->name != NULL; opt++)
25543 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25544 {
25545 /* Check we can apply the extension to this architecture. */
25546 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25547 {
25548 as_bad (_("extension does not apply to the base architecture"));
25549 return FALSE;
25550 }
25551
25552 /* Add or remove the extension. */
25553 if (adding_value)
25554 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25555 else
25556 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25557
25558 break;
25559 }
25560
25561 if (opt->name == NULL)
25562 {
25563 /* Did we fail to find an extension because it wasn't specified in
25564 alphabetical order, or because it does not exist? */
25565
25566 for (opt = arm_extensions; opt->name != NULL; opt++)
25567 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25568 break;
25569
25570 if (opt->name == NULL)
25571 as_bad (_("unknown architectural extension `%s'"), str);
25572 else
25573 as_bad (_("architectural extensions must be specified in "
25574 "alphabetical order"));
25575
25576 return FALSE;
25577 }
25578 else
25579 {
25580 /* We should skip the extension we've just matched the next time
25581 round. */
25582 opt++;
25583 }
25584
25585 str = ext;
25586 };
25587
25588 return TRUE;
25589 }
25590
25591 static bfd_boolean
25592 arm_parse_cpu (char *str)
25593 {
25594 const struct arm_cpu_option_table *opt;
25595 char *ext = strchr (str, '+');
25596 size_t len;
25597
25598 if (ext != NULL)
25599 len = ext - str;
25600 else
25601 len = strlen (str);
25602
25603 if (len == 0)
25604 {
25605 as_bad (_("missing cpu name `%s'"), str);
25606 return FALSE;
25607 }
25608
25609 for (opt = arm_cpus; opt->name != NULL; opt++)
25610 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25611 {
25612 mcpu_cpu_opt = &opt->value;
25613 mcpu_fpu_opt = &opt->default_fpu;
25614 if (opt->canonical_name)
25615 {
25616 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25617 strcpy (selected_cpu_name, opt->canonical_name);
25618 }
25619 else
25620 {
25621 size_t i;
25622
25623 if (len >= sizeof selected_cpu_name)
25624 len = (sizeof selected_cpu_name) - 1;
25625
25626 for (i = 0; i < len; i++)
25627 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25628 selected_cpu_name[i] = 0;
25629 }
25630
25631 if (ext != NULL)
25632 return arm_parse_extension (ext, &mcpu_cpu_opt);
25633
25634 return TRUE;
25635 }
25636
25637 as_bad (_("unknown cpu `%s'"), str);
25638 return FALSE;
25639 }
25640
25641 static bfd_boolean
25642 arm_parse_arch (char *str)
25643 {
25644 const struct arm_arch_option_table *opt;
25645 char *ext = strchr (str, '+');
25646 size_t len;
25647
25648 if (ext != NULL)
25649 len = ext - str;
25650 else
25651 len = strlen (str);
25652
25653 if (len == 0)
25654 {
25655 as_bad (_("missing architecture name `%s'"), str);
25656 return FALSE;
25657 }
25658
25659 for (opt = arm_archs; opt->name != NULL; opt++)
25660 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25661 {
25662 march_cpu_opt = &opt->value;
25663 march_fpu_opt = &opt->default_fpu;
25664 strcpy (selected_cpu_name, opt->name);
25665
25666 if (ext != NULL)
25667 return arm_parse_extension (ext, &march_cpu_opt);
25668
25669 return TRUE;
25670 }
25671
25672 as_bad (_("unknown architecture `%s'\n"), str);
25673 return FALSE;
25674 }
25675
25676 static bfd_boolean
25677 arm_parse_fpu (char * str)
25678 {
25679 const struct arm_option_fpu_value_table * opt;
25680
25681 for (opt = arm_fpus; opt->name != NULL; opt++)
25682 if (streq (opt->name, str))
25683 {
25684 mfpu_opt = &opt->value;
25685 return TRUE;
25686 }
25687
25688 as_bad (_("unknown floating point format `%s'\n"), str);
25689 return FALSE;
25690 }
25691
25692 static bfd_boolean
25693 arm_parse_float_abi (char * str)
25694 {
25695 const struct arm_option_value_table * opt;
25696
25697 for (opt = arm_float_abis; opt->name != NULL; opt++)
25698 if (streq (opt->name, str))
25699 {
25700 mfloat_abi_opt = opt->value;
25701 return TRUE;
25702 }
25703
25704 as_bad (_("unknown floating point abi `%s'\n"), str);
25705 return FALSE;
25706 }
25707
25708 #ifdef OBJ_ELF
25709 static bfd_boolean
25710 arm_parse_eabi (char * str)
25711 {
25712 const struct arm_option_value_table *opt;
25713
25714 for (opt = arm_eabis; opt->name != NULL; opt++)
25715 if (streq (opt->name, str))
25716 {
25717 meabi_flags = opt->value;
25718 return TRUE;
25719 }
25720 as_bad (_("unknown EABI `%s'\n"), str);
25721 return FALSE;
25722 }
25723 #endif
25724
25725 static bfd_boolean
25726 arm_parse_it_mode (char * str)
25727 {
25728 bfd_boolean ret = TRUE;
25729
25730 if (streq ("arm", str))
25731 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25732 else if (streq ("thumb", str))
25733 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25734 else if (streq ("always", str))
25735 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25736 else if (streq ("never", str))
25737 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25738 else
25739 {
25740 as_bad (_("unknown implicit IT mode `%s', should be "\
25741 "arm, thumb, always, or never."), str);
25742 ret = FALSE;
25743 }
25744
25745 return ret;
25746 }
25747
25748 static bfd_boolean
25749 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25750 {
25751 codecomposer_syntax = TRUE;
25752 arm_comment_chars[0] = ';';
25753 arm_line_separator_chars[0] = 0;
25754 return TRUE;
25755 }
25756
25757 struct arm_long_option_table arm_long_opts[] =
25758 {
25759 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25760 arm_parse_cpu, NULL},
25761 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25762 arm_parse_arch, NULL},
25763 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25764 arm_parse_fpu, NULL},
25765 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25766 arm_parse_float_abi, NULL},
25767 #ifdef OBJ_ELF
25768 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25769 arm_parse_eabi, NULL},
25770 #endif
25771 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25772 arm_parse_it_mode, NULL},
25773 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25774 arm_ccs_mode, NULL},
25775 {NULL, NULL, 0, NULL}
25776 };
25777
25778 int
25779 md_parse_option (int c, char * arg)
25780 {
25781 struct arm_option_table *opt;
25782 const struct arm_legacy_option_table *fopt;
25783 struct arm_long_option_table *lopt;
25784
25785 switch (c)
25786 {
25787 #ifdef OPTION_EB
25788 case OPTION_EB:
25789 target_big_endian = 1;
25790 break;
25791 #endif
25792
25793 #ifdef OPTION_EL
25794 case OPTION_EL:
25795 target_big_endian = 0;
25796 break;
25797 #endif
25798
25799 case OPTION_FIX_V4BX:
25800 fix_v4bx = TRUE;
25801 break;
25802
25803 case 'a':
25804 /* Listing option. Just ignore these, we don't support additional
25805 ones. */
25806 return 0;
25807
25808 default:
25809 for (opt = arm_opts; opt->option != NULL; opt++)
25810 {
25811 if (c == opt->option[0]
25812 && ((arg == NULL && opt->option[1] == 0)
25813 || streq (arg, opt->option + 1)))
25814 {
25815 /* If the option is deprecated, tell the user. */
25816 if (warn_on_deprecated && opt->deprecated != NULL)
25817 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25818 arg ? arg : "", _(opt->deprecated));
25819
25820 if (opt->var != NULL)
25821 *opt->var = opt->value;
25822
25823 return 1;
25824 }
25825 }
25826
25827 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25828 {
25829 if (c == fopt->option[0]
25830 && ((arg == NULL && fopt->option[1] == 0)
25831 || streq (arg, fopt->option + 1)))
25832 {
25833 /* If the option is deprecated, tell the user. */
25834 if (warn_on_deprecated && fopt->deprecated != NULL)
25835 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25836 arg ? arg : "", _(fopt->deprecated));
25837
25838 if (fopt->var != NULL)
25839 *fopt->var = &fopt->value;
25840
25841 return 1;
25842 }
25843 }
25844
25845 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25846 {
25847 /* These options are expected to have an argument. */
25848 if (c == lopt->option[0]
25849 && arg != NULL
25850 && strncmp (arg, lopt->option + 1,
25851 strlen (lopt->option + 1)) == 0)
25852 {
25853 /* If the option is deprecated, tell the user. */
25854 if (warn_on_deprecated && lopt->deprecated != NULL)
25855 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25856 _(lopt->deprecated));
25857
25858 /* Call the sup-option parser. */
25859 return lopt->func (arg + strlen (lopt->option) - 1);
25860 }
25861 }
25862
25863 return 0;
25864 }
25865
25866 return 1;
25867 }
25868
25869 void
25870 md_show_usage (FILE * fp)
25871 {
25872 struct arm_option_table *opt;
25873 struct arm_long_option_table *lopt;
25874
25875 fprintf (fp, _(" ARM-specific assembler options:\n"));
25876
25877 for (opt = arm_opts; opt->option != NULL; opt++)
25878 if (opt->help != NULL)
25879 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25880
25881 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25882 if (lopt->help != NULL)
25883 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25884
25885 #ifdef OPTION_EB
25886 fprintf (fp, _("\
25887 -EB assemble code for a big-endian cpu\n"));
25888 #endif
25889
25890 #ifdef OPTION_EL
25891 fprintf (fp, _("\
25892 -EL assemble code for a little-endian cpu\n"));
25893 #endif
25894
25895 fprintf (fp, _("\
25896 --fix-v4bx Allow BX in ARMv4 code\n"));
25897 }
25898
25899
25900 #ifdef OBJ_ELF
25901 typedef struct
25902 {
25903 int val;
25904 arm_feature_set flags;
25905 } cpu_arch_ver_table;
25906
25907 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25908 must be sorted least features first but some reordering is needed, eg. for
25909 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25910 static const cpu_arch_ver_table cpu_arch_ver[] =
25911 {
25912 {1, ARM_ARCH_V4},
25913 {2, ARM_ARCH_V4T},
25914 {3, ARM_ARCH_V5},
25915 {3, ARM_ARCH_V5T},
25916 {4, ARM_ARCH_V5TE},
25917 {5, ARM_ARCH_V5TEJ},
25918 {6, ARM_ARCH_V6},
25919 {9, ARM_ARCH_V6K},
25920 {7, ARM_ARCH_V6Z},
25921 {11, ARM_ARCH_V6M},
25922 {12, ARM_ARCH_V6SM},
25923 {8, ARM_ARCH_V6T2},
25924 {10, ARM_ARCH_V7VE},
25925 {10, ARM_ARCH_V7R},
25926 {10, ARM_ARCH_V7M},
25927 {14, ARM_ARCH_V8A},
25928 {16, ARM_ARCH_V8M_BASE},
25929 {17, ARM_ARCH_V8M_MAIN},
25930 {0, ARM_ARCH_NONE}
25931 };
25932
25933 /* Set an attribute if it has not already been set by the user. */
25934 static void
25935 aeabi_set_attribute_int (int tag, int value)
25936 {
25937 if (tag < 1
25938 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25939 || !attributes_set_explicitly[tag])
25940 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25941 }
25942
25943 static void
25944 aeabi_set_attribute_string (int tag, const char *value)
25945 {
25946 if (tag < 1
25947 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25948 || !attributes_set_explicitly[tag])
25949 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25950 }
25951
25952 /* Set the public EABI object attributes. */
25953 void
25954 aeabi_set_public_attributes (void)
25955 {
25956 int arch;
25957 char profile;
25958 int virt_sec = 0;
25959 int fp16_optional = 0;
25960 arm_feature_set flags;
25961 arm_feature_set tmp;
25962 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
25963 const cpu_arch_ver_table *p;
25964
25965 /* Choose the architecture based on the capabilities of the requested cpu
25966 (if any) and/or the instructions actually used. */
25967 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25968 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25969 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25970
25971 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25972 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25973
25974 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25975 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25976
25977 selected_cpu = flags;
25978
25979 /* Allow the user to override the reported architecture. */
25980 if (object_arch)
25981 {
25982 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25983 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25984 }
25985
25986 /* We need to make sure that the attributes do not identify us as v6S-M
25987 when the only v6S-M feature in use is the Operating System Extensions. */
25988 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25989 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25990 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25991
25992 tmp = flags;
25993 arch = 0;
25994 for (p = cpu_arch_ver; p->val; p++)
25995 {
25996 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25997 {
25998 arch = p->val;
25999 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26000 }
26001 }
26002
26003 /* The table lookup above finds the last architecture to contribute
26004 a new feature. Unfortunately, Tag13 is a subset of the union of
26005 v6T2 and v7-M, so it is never seen as contributing a new feature.
26006 We can not search for the last entry which is entirely used,
26007 because if no CPU is specified we build up only those flags
26008 actually used. Perhaps we should separate out the specified
26009 and implicit cases. Avoid taking this path for -march=all by
26010 checking for contradictory v7-A / v7-M features. */
26011 if (arch == TAG_CPU_ARCH_V7
26012 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26013 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26014 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26015 arch = TAG_CPU_ARCH_V7E_M;
26016
26017 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26018 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26019 arch = TAG_CPU_ARCH_V8M_MAIN;
26020
26021 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26022 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26023 ARMv8-M, -march=all must be detected as ARMv8-A. */
26024 if (arch == TAG_CPU_ARCH_V8M_MAIN
26025 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26026 arch = TAG_CPU_ARCH_V8;
26027
26028 /* Tag_CPU_name. */
26029 if (selected_cpu_name[0])
26030 {
26031 char *q;
26032
26033 q = selected_cpu_name;
26034 if (strncmp (q, "armv", 4) == 0)
26035 {
26036 int i;
26037
26038 q += 4;
26039 for (i = 0; q[i]; i++)
26040 q[i] = TOUPPER (q[i]);
26041 }
26042 aeabi_set_attribute_string (Tag_CPU_name, q);
26043 }
26044
26045 /* Tag_CPU_arch. */
26046 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26047
26048 /* Tag_CPU_arch_profile. */
26049 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26050 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26051 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26052 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)))
26053 profile = 'A';
26054 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26055 profile = 'R';
26056 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26057 profile = 'M';
26058 else
26059 profile = '\0';
26060
26061 if (profile != '\0')
26062 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26063
26064 /* Tag_ARM_ISA_use. */
26065 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26066 || arch == 0)
26067 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26068
26069 /* Tag_THUMB_ISA_use. */
26070 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26071 || arch == 0)
26072 {
26073 int thumb_isa_use;
26074
26075 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26076 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26077 thumb_isa_use = 3;
26078 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26079 thumb_isa_use = 2;
26080 else
26081 thumb_isa_use = 1;
26082 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26083 }
26084
26085 /* Tag_VFP_arch. */
26086 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26087 aeabi_set_attribute_int (Tag_VFP_arch,
26088 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26089 ? 7 : 8);
26090 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26091 aeabi_set_attribute_int (Tag_VFP_arch,
26092 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26093 ? 5 : 6);
26094 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26095 {
26096 fp16_optional = 1;
26097 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26098 }
26099 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26100 {
26101 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26102 fp16_optional = 1;
26103 }
26104 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26105 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26106 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26107 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26108 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26109
26110 /* Tag_ABI_HardFP_use. */
26111 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26112 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26113 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26114
26115 /* Tag_WMMX_arch. */
26116 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26117 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26118 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26119 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26120
26121 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26122 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26123 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26124 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26125 {
26126 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26127 {
26128 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26129 }
26130 else
26131 {
26132 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26133 fp16_optional = 1;
26134 }
26135 }
26136
26137 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26138 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26139 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26140
26141 /* Tag_DIV_use.
26142
26143 We set Tag_DIV_use to two when integer divide instructions have been used
26144 in ARM state, or when Thumb integer divide instructions have been used,
26145 but we have no architecture profile set, nor have we any ARM instructions.
26146
26147 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26148 by the base architecture.
26149
26150 For new architectures we will have to check these tests. */
26151 gas_assert (arch <= TAG_CPU_ARCH_V8
26152 || (arch >= TAG_CPU_ARCH_V8M_BASE
26153 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26154 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26155 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26156 aeabi_set_attribute_int (Tag_DIV_use, 0);
26157 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26158 || (profile == '\0'
26159 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26160 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26161 aeabi_set_attribute_int (Tag_DIV_use, 2);
26162
26163 /* Tag_MP_extension_use. */
26164 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26165 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26166
26167 /* Tag Virtualization_use. */
26168 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26169 virt_sec |= 1;
26170 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26171 virt_sec |= 2;
26172 if (virt_sec != 0)
26173 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26174 }
26175
26176 /* Add the default contents for the .ARM.attributes section. */
26177 void
26178 arm_md_end (void)
26179 {
26180 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26181 return;
26182
26183 aeabi_set_public_attributes ();
26184 }
26185 #endif /* OBJ_ELF */
26186
26187
26188 /* Parse a .cpu directive. */
26189
26190 static void
26191 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26192 {
26193 const struct arm_cpu_option_table *opt;
26194 char *name;
26195 char saved_char;
26196
26197 name = input_line_pointer;
26198 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26199 input_line_pointer++;
26200 saved_char = *input_line_pointer;
26201 *input_line_pointer = 0;
26202
26203 /* Skip the first "all" entry. */
26204 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26205 if (streq (opt->name, name))
26206 {
26207 mcpu_cpu_opt = &opt->value;
26208 selected_cpu = opt->value;
26209 if (opt->canonical_name)
26210 strcpy (selected_cpu_name, opt->canonical_name);
26211 else
26212 {
26213 int i;
26214 for (i = 0; opt->name[i]; i++)
26215 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26216
26217 selected_cpu_name[i] = 0;
26218 }
26219 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26220 *input_line_pointer = saved_char;
26221 demand_empty_rest_of_line ();
26222 return;
26223 }
26224 as_bad (_("unknown cpu `%s'"), name);
26225 *input_line_pointer = saved_char;
26226 ignore_rest_of_line ();
26227 }
26228
26229
26230 /* Parse a .arch directive. */
26231
26232 static void
26233 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26234 {
26235 const struct arm_arch_option_table *opt;
26236 char saved_char;
26237 char *name;
26238
26239 name = input_line_pointer;
26240 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26241 input_line_pointer++;
26242 saved_char = *input_line_pointer;
26243 *input_line_pointer = 0;
26244
26245 /* Skip the first "all" entry. */
26246 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26247 if (streq (opt->name, name))
26248 {
26249 mcpu_cpu_opt = &opt->value;
26250 selected_cpu = opt->value;
26251 strcpy (selected_cpu_name, opt->name);
26252 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26253 *input_line_pointer = saved_char;
26254 demand_empty_rest_of_line ();
26255 return;
26256 }
26257
26258 as_bad (_("unknown architecture `%s'\n"), name);
26259 *input_line_pointer = saved_char;
26260 ignore_rest_of_line ();
26261 }
26262
26263
26264 /* Parse a .object_arch directive. */
26265
26266 static void
26267 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26268 {
26269 const struct arm_arch_option_table *opt;
26270 char saved_char;
26271 char *name;
26272
26273 name = input_line_pointer;
26274 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26275 input_line_pointer++;
26276 saved_char = *input_line_pointer;
26277 *input_line_pointer = 0;
26278
26279 /* Skip the first "all" entry. */
26280 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26281 if (streq (opt->name, name))
26282 {
26283 object_arch = &opt->value;
26284 *input_line_pointer = saved_char;
26285 demand_empty_rest_of_line ();
26286 return;
26287 }
26288
26289 as_bad (_("unknown architecture `%s'\n"), name);
26290 *input_line_pointer = saved_char;
26291 ignore_rest_of_line ();
26292 }
26293
26294 /* Parse a .arch_extension directive. */
26295
26296 static void
26297 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26298 {
26299 const struct arm_option_extension_value_table *opt;
26300 char saved_char;
26301 char *name;
26302 int adding_value = 1;
26303
26304 name = input_line_pointer;
26305 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26306 input_line_pointer++;
26307 saved_char = *input_line_pointer;
26308 *input_line_pointer = 0;
26309
26310 if (strlen (name) >= 2
26311 && strncmp (name, "no", 2) == 0)
26312 {
26313 adding_value = 0;
26314 name += 2;
26315 }
26316
26317 for (opt = arm_extensions; opt->name != NULL; opt++)
26318 if (streq (opt->name, name))
26319 {
26320 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
26321 {
26322 as_bad (_("architectural extension `%s' is not allowed for the "
26323 "current base architecture"), name);
26324 break;
26325 }
26326
26327 if (adding_value)
26328 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26329 opt->merge_value);
26330 else
26331 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26332
26333 mcpu_cpu_opt = &selected_cpu;
26334 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26335 *input_line_pointer = saved_char;
26336 demand_empty_rest_of_line ();
26337 return;
26338 }
26339
26340 if (opt->name == NULL)
26341 as_bad (_("unknown architecture extension `%s'\n"), name);
26342
26343 *input_line_pointer = saved_char;
26344 ignore_rest_of_line ();
26345 }
26346
26347 /* Parse a .fpu directive. */
26348
26349 static void
26350 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26351 {
26352 const struct arm_option_fpu_value_table *opt;
26353 char saved_char;
26354 char *name;
26355
26356 name = input_line_pointer;
26357 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26358 input_line_pointer++;
26359 saved_char = *input_line_pointer;
26360 *input_line_pointer = 0;
26361
26362 for (opt = arm_fpus; opt->name != NULL; opt++)
26363 if (streq (opt->name, name))
26364 {
26365 mfpu_opt = &opt->value;
26366 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26367 *input_line_pointer = saved_char;
26368 demand_empty_rest_of_line ();
26369 return;
26370 }
26371
26372 as_bad (_("unknown floating point format `%s'\n"), name);
26373 *input_line_pointer = saved_char;
26374 ignore_rest_of_line ();
26375 }
26376
26377 /* Copy symbol information. */
26378
26379 void
26380 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26381 {
26382 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26383 }
26384
26385 #ifdef OBJ_ELF
26386 /* Given a symbolic attribute NAME, return the proper integer value.
26387 Returns -1 if the attribute is not known. */
26388
26389 int
26390 arm_convert_symbolic_attribute (const char *name)
26391 {
26392 static const struct
26393 {
26394 const char * name;
26395 const int tag;
26396 }
26397 attribute_table[] =
26398 {
26399 /* When you modify this table you should
26400 also modify the list in doc/c-arm.texi. */
26401 #define T(tag) {#tag, tag}
26402 T (Tag_CPU_raw_name),
26403 T (Tag_CPU_name),
26404 T (Tag_CPU_arch),
26405 T (Tag_CPU_arch_profile),
26406 T (Tag_ARM_ISA_use),
26407 T (Tag_THUMB_ISA_use),
26408 T (Tag_FP_arch),
26409 T (Tag_VFP_arch),
26410 T (Tag_WMMX_arch),
26411 T (Tag_Advanced_SIMD_arch),
26412 T (Tag_PCS_config),
26413 T (Tag_ABI_PCS_R9_use),
26414 T (Tag_ABI_PCS_RW_data),
26415 T (Tag_ABI_PCS_RO_data),
26416 T (Tag_ABI_PCS_GOT_use),
26417 T (Tag_ABI_PCS_wchar_t),
26418 T (Tag_ABI_FP_rounding),
26419 T (Tag_ABI_FP_denormal),
26420 T (Tag_ABI_FP_exceptions),
26421 T (Tag_ABI_FP_user_exceptions),
26422 T (Tag_ABI_FP_number_model),
26423 T (Tag_ABI_align_needed),
26424 T (Tag_ABI_align8_needed),
26425 T (Tag_ABI_align_preserved),
26426 T (Tag_ABI_align8_preserved),
26427 T (Tag_ABI_enum_size),
26428 T (Tag_ABI_HardFP_use),
26429 T (Tag_ABI_VFP_args),
26430 T (Tag_ABI_WMMX_args),
26431 T (Tag_ABI_optimization_goals),
26432 T (Tag_ABI_FP_optimization_goals),
26433 T (Tag_compatibility),
26434 T (Tag_CPU_unaligned_access),
26435 T (Tag_FP_HP_extension),
26436 T (Tag_VFP_HP_extension),
26437 T (Tag_ABI_FP_16bit_format),
26438 T (Tag_MPextension_use),
26439 T (Tag_DIV_use),
26440 T (Tag_nodefaults),
26441 T (Tag_also_compatible_with),
26442 T (Tag_conformance),
26443 T (Tag_T2EE_use),
26444 T (Tag_Virtualization_use),
26445 /* We deliberately do not include Tag_MPextension_use_legacy. */
26446 #undef T
26447 };
26448 unsigned int i;
26449
26450 if (name == NULL)
26451 return -1;
26452
26453 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26454 if (streq (name, attribute_table[i].name))
26455 return attribute_table[i].tag;
26456
26457 return -1;
26458 }
26459
26460
26461 /* Apply sym value for relocations only in the case that they are for
26462 local symbols in the same segment as the fixup and you have the
26463 respective architectural feature for blx and simple switches. */
26464 int
26465 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26466 {
26467 if (fixP->fx_addsy
26468 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26469 /* PR 17444: If the local symbol is in a different section then a reloc
26470 will always be generated for it, so applying the symbol value now
26471 will result in a double offset being stored in the relocation. */
26472 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26473 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26474 {
26475 switch (fixP->fx_r_type)
26476 {
26477 case BFD_RELOC_ARM_PCREL_BLX:
26478 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26479 if (ARM_IS_FUNC (fixP->fx_addsy))
26480 return 1;
26481 break;
26482
26483 case BFD_RELOC_ARM_PCREL_CALL:
26484 case BFD_RELOC_THUMB_PCREL_BLX:
26485 if (THUMB_IS_FUNC (fixP->fx_addsy))
26486 return 1;
26487 break;
26488
26489 default:
26490 break;
26491 }
26492
26493 }
26494 return 0;
26495 }
26496 #endif /* OBJ_ELF */
This page took 1.078669 seconds and 5 git commands to generate.