[gas][arm] Enable VLDM, VSTM, VPUSH, VPOP for MVE
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35 #include "cpu-arm.h"
36
37 #ifdef OBJ_ELF
38 #include "elf/arm.h"
39 #include "dw2gencfi.h"
40 #endif
41
42 #include "dwarf2dbg.h"
43
44 #ifdef OBJ_ELF
45 /* Must be at least the size of the largest unwind opcode (currently two). */
46 #define ARM_OPCODE_CHUNK_SIZE 8
47
48 /* This structure holds the unwinding state. */
49
50 static struct
51 {
52 symbolS * proc_start;
53 symbolS * table_entry;
54 symbolS * personality_routine;
55 int personality_index;
56 /* The segment containing the function. */
57 segT saved_seg;
58 subsegT saved_subseg;
59 /* Opcodes generated from this function. */
60 unsigned char * opcodes;
61 int opcode_count;
62 int opcode_alloc;
63 /* The number of bytes pushed to the stack. */
64 offsetT frame_size;
65 /* We don't add stack adjustment opcodes immediately so that we can merge
66 multiple adjustments. We can also omit the final adjustment
67 when using a frame pointer. */
68 offsetT pending_offset;
69 /* These two fields are set by both unwind_movsp and unwind_setfp. They
70 hold the reg+offset to use when restoring sp from a frame pointer. */
71 offsetT fp_offset;
72 int fp_reg;
73 /* Nonzero if an unwind_setfp directive has been seen. */
74 unsigned fp_used:1;
75 /* Nonzero if the last opcode restores sp from fp_reg. */
76 unsigned sp_restored:1;
77 } unwind;
78
79 /* Whether --fdpic was given. */
80 static int arm_fdpic;
81
82 #endif /* OBJ_ELF */
83
84 /* Results from operand parsing worker functions. */
85
86 typedef enum
87 {
88 PARSE_OPERAND_SUCCESS,
89 PARSE_OPERAND_FAIL,
90 PARSE_OPERAND_FAIL_NO_BACKTRACK
91 } parse_operand_result;
92
93 enum arm_float_abi
94 {
95 ARM_FLOAT_ABI_HARD,
96 ARM_FLOAT_ABI_SOFTFP,
97 ARM_FLOAT_ABI_SOFT
98 };
99
100 /* Types of processor to assemble for. */
101 #ifndef CPU_DEFAULT
102 /* The code that was here used to select a default CPU depending on compiler
103 pre-defines which were only present when doing native builds, thus
104 changing gas' default behaviour depending upon the build host.
105
106 If you have a target that requires a default CPU option then the you
107 should define CPU_DEFAULT here. */
108 #endif
109
110 /* Perform range checks on positive and negative overflows by checking if the
111 VALUE given fits within the range of an BITS sized immediate. */
112 static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
113 {
114 gas_assert (bits < (offsetT)(sizeof (value) * 8));
115 return (value & ~((1 << bits)-1))
116 && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117 }
118
119 #ifndef FPU_DEFAULT
120 # ifdef TE_LINUX
121 # define FPU_DEFAULT FPU_ARCH_FPA
122 # elif defined (TE_NetBSD)
123 # ifdef OBJ_ELF
124 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
125 # else
126 /* Legacy a.out format. */
127 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
128 # endif
129 # elif defined (TE_VXWORKS)
130 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
131 # else
132 /* For backwards compatibility, default to FPA. */
133 # define FPU_DEFAULT FPU_ARCH_FPA
134 # endif
135 #endif /* ifndef FPU_DEFAULT */
136
137 #define streq(a, b) (strcmp (a, b) == 0)
138
139 /* Current set of feature bits available (CPU+FPU). Different from
140 selected_cpu + selected_fpu in case of autodetection since the CPU
141 feature bits are then all set. */
142 static arm_feature_set cpu_variant;
143 /* Feature bits used in each execution state. Used to set build attribute
144 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
145 static arm_feature_set arm_arch_used;
146 static arm_feature_set thumb_arch_used;
147
148 /* Flags stored in private area of BFD structure. */
149 static int uses_apcs_26 = FALSE;
150 static int atpcs = FALSE;
151 static int support_interwork = FALSE;
152 static int uses_apcs_float = FALSE;
153 static int pic_code = FALSE;
154 static int fix_v4bx = FALSE;
155 /* Warn on using deprecated features. */
156 static int warn_on_deprecated = TRUE;
157
158 /* Understand CodeComposer Studio assembly syntax. */
159 bfd_boolean codecomposer_syntax = FALSE;
160
161 /* Variables that we set while parsing command-line options. Once all
162 options have been read we re-process these values to set the real
163 assembly flags. */
164
165 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
166 instead of -mcpu=arm1). */
167 static const arm_feature_set *legacy_cpu = NULL;
168 static const arm_feature_set *legacy_fpu = NULL;
169
170 /* CPU, extension and FPU feature bits selected by -mcpu. */
171 static const arm_feature_set *mcpu_cpu_opt = NULL;
172 static arm_feature_set *mcpu_ext_opt = NULL;
173 static const arm_feature_set *mcpu_fpu_opt = NULL;
174
175 /* CPU, extension and FPU feature bits selected by -march. */
176 static const arm_feature_set *march_cpu_opt = NULL;
177 static arm_feature_set *march_ext_opt = NULL;
178 static const arm_feature_set *march_fpu_opt = NULL;
179
180 /* Feature bits selected by -mfpu. */
181 static const arm_feature_set *mfpu_opt = NULL;
182
183 /* Constants for known architecture features. */
184 static const arm_feature_set fpu_default = FPU_DEFAULT;
185 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
186 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
187 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
188 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
189 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
190 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
191 #ifdef OBJ_ELF
192 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
193 #endif
194 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
195
196 #ifdef CPU_DEFAULT
197 static const arm_feature_set cpu_default = CPU_DEFAULT;
198 #endif
199
200 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
201 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
202 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
203 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
204 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
205 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
206 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
207 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
208 static const arm_feature_set arm_ext_v4t_5 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
210 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
211 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
212 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
213 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
214 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
215 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
216 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
217 /* Only for compatability of hint instructions. */
218 static const arm_feature_set arm_ext_v6k_v6t2 =
219 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
220 static const arm_feature_set arm_ext_v6_notm =
221 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
222 static const arm_feature_set arm_ext_v6_dsp =
223 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
224 static const arm_feature_set arm_ext_barrier =
225 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
226 static const arm_feature_set arm_ext_msr =
227 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
228 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
229 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
230 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
231 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
232 #ifdef OBJ_ELF
233 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
234 #endif
235 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
236 static const arm_feature_set arm_ext_m =
237 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
238 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
239 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
240 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
241 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
242 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
243 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
244 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
245 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
246 static const arm_feature_set arm_ext_v8m_main =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
248 static const arm_feature_set arm_ext_v8_1m_main =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
250 /* Instructions in ARMv8-M only found in M profile architectures. */
251 static const arm_feature_set arm_ext_v8m_m_only =
252 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
253 static const arm_feature_set arm_ext_v6t2_v8m =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
255 /* Instructions shared between ARMv8-A and ARMv8-M. */
256 static const arm_feature_set arm_ext_atomics =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
258 #ifdef OBJ_ELF
259 /* DSP instructions Tag_DSP_extension refers to. */
260 static const arm_feature_set arm_ext_dsp =
261 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
262 #endif
263 static const arm_feature_set arm_ext_ras =
264 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
265 /* FP16 instructions. */
266 static const arm_feature_set arm_ext_fp16 =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
268 static const arm_feature_set arm_ext_fp16_fml =
269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
270 static const arm_feature_set arm_ext_v8_2 =
271 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
272 static const arm_feature_set arm_ext_v8_3 =
273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
274 static const arm_feature_set arm_ext_sb =
275 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
276 static const arm_feature_set arm_ext_predres =
277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
278 static const arm_feature_set arm_ext_bf16 =
279 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
280 static const arm_feature_set arm_ext_i8mm =
281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
282
283 static const arm_feature_set arm_arch_any = ARM_ANY;
284 static const arm_feature_set fpu_any = FPU_ANY;
285 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
286 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
287 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
288
289 static const arm_feature_set arm_cext_iwmmxt2 =
290 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
291 static const arm_feature_set arm_cext_iwmmxt =
292 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
293 static const arm_feature_set arm_cext_xscale =
294 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
295 static const arm_feature_set arm_cext_maverick =
296 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
297 static const arm_feature_set fpu_fpa_ext_v1 =
298 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
299 static const arm_feature_set fpu_fpa_ext_v2 =
300 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
301 static const arm_feature_set fpu_vfp_ext_v1xd =
302 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
303 static const arm_feature_set fpu_vfp_ext_v1 =
304 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
305 static const arm_feature_set fpu_vfp_ext_v2 =
306 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
307 static const arm_feature_set fpu_vfp_ext_v3xd =
308 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
309 static const arm_feature_set fpu_vfp_ext_v3 =
310 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
311 static const arm_feature_set fpu_vfp_ext_d32 =
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
313 static const arm_feature_set fpu_neon_ext_v1 =
314 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
315 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
316 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
317 static const arm_feature_set mve_ext =
318 ARM_FEATURE_COPROC (FPU_MVE);
319 static const arm_feature_set mve_fp_ext =
320 ARM_FEATURE_COPROC (FPU_MVE_FP);
321 #ifdef OBJ_ELF
322 static const arm_feature_set fpu_vfp_fp16 =
323 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
324 static const arm_feature_set fpu_neon_ext_fma =
325 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
326 #endif
327 static const arm_feature_set fpu_vfp_ext_fma =
328 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
329 static const arm_feature_set fpu_vfp_ext_armv8 =
330 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
331 static const arm_feature_set fpu_vfp_ext_armv8xd =
332 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
333 static const arm_feature_set fpu_neon_ext_armv8 =
334 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
335 static const arm_feature_set fpu_crypto_ext_armv8 =
336 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
337 static const arm_feature_set crc_ext_armv8 =
338 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
339 static const arm_feature_set fpu_neon_ext_v8_1 =
340 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
341 static const arm_feature_set fpu_neon_ext_dotprod =
342 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
343
344 static int mfloat_abi_opt = -1;
345 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
346 directive. */
347 static arm_feature_set selected_arch = ARM_ARCH_NONE;
348 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
349 directive. */
350 static arm_feature_set selected_ext = ARM_ARCH_NONE;
351 /* Feature bits selected by the last -mcpu/-march or by the combination of the
352 last .cpu/.arch directive .arch_extension directives since that
353 directive. */
354 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
355 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
356 static arm_feature_set selected_fpu = FPU_NONE;
357 /* Feature bits selected by the last .object_arch directive. */
358 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
359 /* Must be long enough to hold any of the names in arm_cpus. */
360 static const struct arm_ext_table * selected_ctx_ext_table = NULL;
361 static char selected_cpu_name[20];
362
363 extern FLONUM_TYPE generic_floating_point_number;
364
365 /* Return if no cpu was selected on command-line. */
366 static bfd_boolean
367 no_cpu_selected (void)
368 {
369 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
370 }
371
372 #ifdef OBJ_ELF
373 # ifdef EABI_DEFAULT
374 static int meabi_flags = EABI_DEFAULT;
375 # else
376 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
377 # endif
378
379 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
380
381 bfd_boolean
382 arm_is_eabi (void)
383 {
384 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
385 }
386 #endif
387
388 #ifdef OBJ_ELF
389 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
390 symbolS * GOT_symbol;
391 #endif
392
393 /* 0: assemble for ARM,
394 1: assemble for Thumb,
395 2: assemble for Thumb even though target CPU does not support thumb
396 instructions. */
397 static int thumb_mode = 0;
398 /* A value distinct from the possible values for thumb_mode that we
399 can use to record whether thumb_mode has been copied into the
400 tc_frag_data field of a frag. */
401 #define MODE_RECORDED (1 << 4)
402
403 /* Specifies the intrinsic IT insn behavior mode. */
404 enum implicit_it_mode
405 {
406 IMPLICIT_IT_MODE_NEVER = 0x00,
407 IMPLICIT_IT_MODE_ARM = 0x01,
408 IMPLICIT_IT_MODE_THUMB = 0x02,
409 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
410 };
411 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
412
413 /* If unified_syntax is true, we are processing the new unified
414 ARM/Thumb syntax. Important differences from the old ARM mode:
415
416 - Immediate operands do not require a # prefix.
417 - Conditional affixes always appear at the end of the
418 instruction. (For backward compatibility, those instructions
419 that formerly had them in the middle, continue to accept them
420 there.)
421 - The IT instruction may appear, and if it does is validated
422 against subsequent conditional affixes. It does not generate
423 machine code.
424
425 Important differences from the old Thumb mode:
426
427 - Immediate operands do not require a # prefix.
428 - Most of the V6T2 instructions are only available in unified mode.
429 - The .N and .W suffixes are recognized and honored (it is an error
430 if they cannot be honored).
431 - All instructions set the flags if and only if they have an 's' affix.
432 - Conditional affixes may be used. They are validated against
433 preceding IT instructions. Unlike ARM mode, you cannot use a
434 conditional affix except in the scope of an IT instruction. */
435
436 static bfd_boolean unified_syntax = FALSE;
437
438 /* An immediate operand can start with #, and ld*, st*, pld operands
439 can contain [ and ]. We need to tell APP not to elide whitespace
440 before a [, which can appear as the first operand for pld.
441 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
442 const char arm_symbol_chars[] = "#[]{}";
443
444 enum neon_el_type
445 {
446 NT_invtype,
447 NT_untyped,
448 NT_integer,
449 NT_float,
450 NT_poly,
451 NT_signed,
452 NT_bfloat,
453 NT_unsigned
454 };
455
456 struct neon_type_el
457 {
458 enum neon_el_type type;
459 unsigned size;
460 };
461
462 #define NEON_MAX_TYPE_ELS 4
463
464 struct neon_type
465 {
466 struct neon_type_el el[NEON_MAX_TYPE_ELS];
467 unsigned elems;
468 };
469
470 enum pred_instruction_type
471 {
472 OUTSIDE_PRED_INSN,
473 INSIDE_VPT_INSN,
474 INSIDE_IT_INSN,
475 INSIDE_IT_LAST_INSN,
476 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
477 if inside, should be the last one. */
478 NEUTRAL_IT_INSN, /* This could be either inside or outside,
479 i.e. BKPT and NOP. */
480 IT_INSN, /* The IT insn has been parsed. */
481 VPT_INSN, /* The VPT/VPST insn has been parsed. */
482 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
483 a predication code. */
484 MVE_UNPREDICABLE_INSN /* MVE instruction that is non-predicable. */
485 };
486
487 /* The maximum number of operands we need. */
488 #define ARM_IT_MAX_OPERANDS 6
489 #define ARM_IT_MAX_RELOCS 3
490
491 struct arm_it
492 {
493 const char * error;
494 unsigned long instruction;
495 int size;
496 int size_req;
497 int cond;
498 /* "uncond_value" is set to the value in place of the conditional field in
499 unconditional versions of the instruction, or -1 if nothing is
500 appropriate. */
501 int uncond_value;
502 struct neon_type vectype;
503 /* This does not indicate an actual NEON instruction, only that
504 the mnemonic accepts neon-style type suffixes. */
505 int is_neon;
506 /* Set to the opcode if the instruction needs relaxation.
507 Zero if the instruction is not relaxed. */
508 unsigned long relax;
509 struct
510 {
511 bfd_reloc_code_real_type type;
512 expressionS exp;
513 int pc_rel;
514 } relocs[ARM_IT_MAX_RELOCS];
515
516 enum pred_instruction_type pred_insn_type;
517
518 struct
519 {
520 unsigned reg;
521 signed int imm;
522 struct neon_type_el vectype;
523 unsigned present : 1; /* Operand present. */
524 unsigned isreg : 1; /* Operand was a register. */
525 unsigned immisreg : 2; /* .imm field is a second register.
526 0: imm, 1: gpr, 2: MVE Q-register. */
527 unsigned isscalar : 2; /* Operand is a (SIMD) scalar:
528 0) not scalar,
529 1) Neon scalar,
530 2) MVE scalar. */
531 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
532 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
533 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
534 instructions. This allows us to disambiguate ARM <-> vector insns. */
535 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
536 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
537 unsigned isquad : 1; /* Operand is SIMD quad register. */
538 unsigned issingle : 1; /* Operand is VFP single-precision register. */
539 unsigned iszr : 1; /* Operand is ZR register. */
540 unsigned hasreloc : 1; /* Operand has relocation suffix. */
541 unsigned writeback : 1; /* Operand has trailing ! */
542 unsigned preind : 1; /* Preindexed address. */
543 unsigned postind : 1; /* Postindexed address. */
544 unsigned negative : 1; /* Index register was negated. */
545 unsigned shifted : 1; /* Shift applied to operation. */
546 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
547 } operands[ARM_IT_MAX_OPERANDS];
548 };
549
550 static struct arm_it inst;
551
552 #define NUM_FLOAT_VALS 8
553
554 const char * fp_const[] =
555 {
556 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
557 };
558
559 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
560
561 #define FAIL (-1)
562 #define SUCCESS (0)
563
564 #define SUFF_S 1
565 #define SUFF_D 2
566 #define SUFF_E 3
567 #define SUFF_P 4
568
569 #define CP_T_X 0x00008000
570 #define CP_T_Y 0x00400000
571
572 #define CONDS_BIT 0x00100000
573 #define LOAD_BIT 0x00100000
574
575 #define DOUBLE_LOAD_FLAG 0x00000001
576
577 struct asm_cond
578 {
579 const char * template_name;
580 unsigned long value;
581 };
582
583 #define COND_ALWAYS 0xE
584
585 struct asm_psr
586 {
587 const char * template_name;
588 unsigned long field;
589 };
590
591 struct asm_barrier_opt
592 {
593 const char * template_name;
594 unsigned long value;
595 const arm_feature_set arch;
596 };
597
598 /* The bit that distinguishes CPSR and SPSR. */
599 #define SPSR_BIT (1 << 22)
600
601 /* The individual PSR flag bits. */
602 #define PSR_c (1 << 16)
603 #define PSR_x (1 << 17)
604 #define PSR_s (1 << 18)
605 #define PSR_f (1 << 19)
606
607 struct reloc_entry
608 {
609 const char * name;
610 bfd_reloc_code_real_type reloc;
611 };
612
613 enum vfp_reg_pos
614 {
615 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
616 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
617 };
618
619 enum vfp_ldstm_type
620 {
621 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
622 };
623
624 /* Bits for DEFINED field in neon_typed_alias. */
625 #define NTA_HASTYPE 1
626 #define NTA_HASINDEX 2
627
628 struct neon_typed_alias
629 {
630 unsigned char defined;
631 unsigned char index;
632 struct neon_type_el eltype;
633 };
634
635 /* ARM register categories. This includes coprocessor numbers and various
636 architecture extensions' registers. Each entry should have an error message
637 in reg_expected_msgs below. */
638 enum arm_reg_type
639 {
640 REG_TYPE_RN,
641 REG_TYPE_CP,
642 REG_TYPE_CN,
643 REG_TYPE_FN,
644 REG_TYPE_VFS,
645 REG_TYPE_VFD,
646 REG_TYPE_NQ,
647 REG_TYPE_VFSD,
648 REG_TYPE_NDQ,
649 REG_TYPE_NSD,
650 REG_TYPE_NSDQ,
651 REG_TYPE_VFC,
652 REG_TYPE_MVF,
653 REG_TYPE_MVD,
654 REG_TYPE_MVFX,
655 REG_TYPE_MVDX,
656 REG_TYPE_MVAX,
657 REG_TYPE_MQ,
658 REG_TYPE_DSPSC,
659 REG_TYPE_MMXWR,
660 REG_TYPE_MMXWC,
661 REG_TYPE_MMXWCG,
662 REG_TYPE_XSCALE,
663 REG_TYPE_RNB,
664 REG_TYPE_ZR
665 };
666
667 /* Structure for a hash table entry for a register.
668 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
669 information which states whether a vector type or index is specified (for a
670 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
671 struct reg_entry
672 {
673 const char * name;
674 unsigned int number;
675 unsigned char type;
676 unsigned char builtin;
677 struct neon_typed_alias * neon;
678 };
679
680 /* Diagnostics used when we don't get a register of the expected type. */
681 const char * const reg_expected_msgs[] =
682 {
683 [REG_TYPE_RN] = N_("ARM register expected"),
684 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
685 [REG_TYPE_CN] = N_("co-processor register expected"),
686 [REG_TYPE_FN] = N_("FPA register expected"),
687 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
688 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
689 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
690 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
691 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
692 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
693 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
694 " expected"),
695 [REG_TYPE_VFC] = N_("VFP system register expected"),
696 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
697 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
698 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
699 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
700 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
701 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
702 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
703 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
704 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
705 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
706 [REG_TYPE_MQ] = N_("MVE vector register expected"),
707 [REG_TYPE_RNB] = N_("")
708 };
709
710 /* Some well known registers that we refer to directly elsewhere. */
711 #define REG_R12 12
712 #define REG_SP 13
713 #define REG_LR 14
714 #define REG_PC 15
715
716 /* ARM instructions take 4bytes in the object file, Thumb instructions
717 take 2: */
718 #define INSN_SIZE 4
719
720 struct asm_opcode
721 {
722 /* Basic string to match. */
723 const char * template_name;
724
725 /* Parameters to instruction. */
726 unsigned int operands[8];
727
728 /* Conditional tag - see opcode_lookup. */
729 unsigned int tag : 4;
730
731 /* Basic instruction code. */
732 unsigned int avalue;
733
734 /* Thumb-format instruction code. */
735 unsigned int tvalue;
736
737 /* Which architecture variant provides this instruction. */
738 const arm_feature_set * avariant;
739 const arm_feature_set * tvariant;
740
741 /* Function to call to encode instruction in ARM format. */
742 void (* aencode) (void);
743
744 /* Function to call to encode instruction in Thumb format. */
745 void (* tencode) (void);
746
747 /* Indicates whether this instruction may be vector predicated. */
748 unsigned int mayBeVecPred : 1;
749 };
750
751 /* Defines for various bits that we will want to toggle. */
752 #define INST_IMMEDIATE 0x02000000
753 #define OFFSET_REG 0x02000000
754 #define HWOFFSET_IMM 0x00400000
755 #define SHIFT_BY_REG 0x00000010
756 #define PRE_INDEX 0x01000000
757 #define INDEX_UP 0x00800000
758 #define WRITE_BACK 0x00200000
759 #define LDM_TYPE_2_OR_3 0x00400000
760 #define CPSI_MMOD 0x00020000
761
762 #define LITERAL_MASK 0xf000f000
763 #define OPCODE_MASK 0xfe1fffff
764 #define V4_STR_BIT 0x00000020
765 #define VLDR_VMOV_SAME 0x0040f000
766
767 #define T2_SUBS_PC_LR 0xf3de8f00
768
769 #define DATA_OP_SHIFT 21
770 #define SBIT_SHIFT 20
771
772 #define T2_OPCODE_MASK 0xfe1fffff
773 #define T2_DATA_OP_SHIFT 21
774 #define T2_SBIT_SHIFT 20
775
776 #define A_COND_MASK 0xf0000000
777 #define A_PUSH_POP_OP_MASK 0x0fff0000
778
779 /* Opcodes for pushing/poping registers to/from the stack. */
780 #define A1_OPCODE_PUSH 0x092d0000
781 #define A2_OPCODE_PUSH 0x052d0004
782 #define A2_OPCODE_POP 0x049d0004
783
784 /* Codes to distinguish the arithmetic instructions. */
785 #define OPCODE_AND 0
786 #define OPCODE_EOR 1
787 #define OPCODE_SUB 2
788 #define OPCODE_RSB 3
789 #define OPCODE_ADD 4
790 #define OPCODE_ADC 5
791 #define OPCODE_SBC 6
792 #define OPCODE_RSC 7
793 #define OPCODE_TST 8
794 #define OPCODE_TEQ 9
795 #define OPCODE_CMP 10
796 #define OPCODE_CMN 11
797 #define OPCODE_ORR 12
798 #define OPCODE_MOV 13
799 #define OPCODE_BIC 14
800 #define OPCODE_MVN 15
801
802 #define T2_OPCODE_AND 0
803 #define T2_OPCODE_BIC 1
804 #define T2_OPCODE_ORR 2
805 #define T2_OPCODE_ORN 3
806 #define T2_OPCODE_EOR 4
807 #define T2_OPCODE_ADD 8
808 #define T2_OPCODE_ADC 10
809 #define T2_OPCODE_SBC 11
810 #define T2_OPCODE_SUB 13
811 #define T2_OPCODE_RSB 14
812
813 #define T_OPCODE_MUL 0x4340
814 #define T_OPCODE_TST 0x4200
815 #define T_OPCODE_CMN 0x42c0
816 #define T_OPCODE_NEG 0x4240
817 #define T_OPCODE_MVN 0x43c0
818
819 #define T_OPCODE_ADD_R3 0x1800
820 #define T_OPCODE_SUB_R3 0x1a00
821 #define T_OPCODE_ADD_HI 0x4400
822 #define T_OPCODE_ADD_ST 0xb000
823 #define T_OPCODE_SUB_ST 0xb080
824 #define T_OPCODE_ADD_SP 0xa800
825 #define T_OPCODE_ADD_PC 0xa000
826 #define T_OPCODE_ADD_I8 0x3000
827 #define T_OPCODE_SUB_I8 0x3800
828 #define T_OPCODE_ADD_I3 0x1c00
829 #define T_OPCODE_SUB_I3 0x1e00
830
831 #define T_OPCODE_ASR_R 0x4100
832 #define T_OPCODE_LSL_R 0x4080
833 #define T_OPCODE_LSR_R 0x40c0
834 #define T_OPCODE_ROR_R 0x41c0
835 #define T_OPCODE_ASR_I 0x1000
836 #define T_OPCODE_LSL_I 0x0000
837 #define T_OPCODE_LSR_I 0x0800
838
839 #define T_OPCODE_MOV_I8 0x2000
840 #define T_OPCODE_CMP_I8 0x2800
841 #define T_OPCODE_CMP_LR 0x4280
842 #define T_OPCODE_MOV_HR 0x4600
843 #define T_OPCODE_CMP_HR 0x4500
844
845 #define T_OPCODE_LDR_PC 0x4800
846 #define T_OPCODE_LDR_SP 0x9800
847 #define T_OPCODE_STR_SP 0x9000
848 #define T_OPCODE_LDR_IW 0x6800
849 #define T_OPCODE_STR_IW 0x6000
850 #define T_OPCODE_LDR_IH 0x8800
851 #define T_OPCODE_STR_IH 0x8000
852 #define T_OPCODE_LDR_IB 0x7800
853 #define T_OPCODE_STR_IB 0x7000
854 #define T_OPCODE_LDR_RW 0x5800
855 #define T_OPCODE_STR_RW 0x5000
856 #define T_OPCODE_LDR_RH 0x5a00
857 #define T_OPCODE_STR_RH 0x5200
858 #define T_OPCODE_LDR_RB 0x5c00
859 #define T_OPCODE_STR_RB 0x5400
860
861 #define T_OPCODE_PUSH 0xb400
862 #define T_OPCODE_POP 0xbc00
863
864 #define T_OPCODE_BRANCH 0xe000
865
866 #define THUMB_SIZE 2 /* Size of thumb instruction. */
867 #define THUMB_PP_PC_LR 0x0100
868 #define THUMB_LOAD_BIT 0x0800
869 #define THUMB2_LOAD_BIT 0x00100000
870
871 #define BAD_SYNTAX _("syntax error")
872 #define BAD_ARGS _("bad arguments to instruction")
873 #define BAD_SP _("r13 not allowed here")
874 #define BAD_PC _("r15 not allowed here")
875 #define BAD_ODD _("Odd register not allowed here")
876 #define BAD_EVEN _("Even register not allowed here")
877 #define BAD_COND _("instruction cannot be conditional")
878 #define BAD_OVERLAP _("registers may not be the same")
879 #define BAD_HIREG _("lo register required")
880 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
881 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
882 #define BAD_BRANCH _("branch must be last instruction in IT block")
883 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
884 #define BAD_NOT_IT _("instruction not allowed in IT block")
885 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
886 #define BAD_FPU _("selected FPU does not support instruction")
887 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
888 #define BAD_OUT_VPT \
889 _("vector predicated instruction should be in VPT/VPST block")
890 #define BAD_IT_COND _("incorrect condition in IT block")
891 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
892 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
893 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
894 #define BAD_PC_ADDRESSING \
895 _("cannot use register index with PC-relative addressing")
896 #define BAD_PC_WRITEBACK \
897 _("cannot use writeback with PC-relative addressing")
898 #define BAD_RANGE _("branch out of range")
899 #define BAD_FP16 _("selected processor does not support fp16 instruction")
900 #define BAD_BF16 _("selected processor does not support bf16 instruction")
901 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
902 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
903 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
904 "block")
905 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
906 "block")
907 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
908 " operand")
909 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
910 " operand")
911 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
912 #define BAD_MVE_AUTO \
913 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
914 " use a valid -march or -mcpu option.")
915 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
916 "and source operands makes instruction UNPREDICTABLE")
917 #define BAD_EL_TYPE _("bad element type for instruction")
918 #define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
919
920 static struct hash_control * arm_ops_hsh;
921 static struct hash_control * arm_cond_hsh;
922 static struct hash_control * arm_vcond_hsh;
923 static struct hash_control * arm_shift_hsh;
924 static struct hash_control * arm_psr_hsh;
925 static struct hash_control * arm_v7m_psr_hsh;
926 static struct hash_control * arm_reg_hsh;
927 static struct hash_control * arm_reloc_hsh;
928 static struct hash_control * arm_barrier_opt_hsh;
929
930 /* Stuff needed to resolve the label ambiguity
931 As:
932 ...
933 label: <insn>
934 may differ from:
935 ...
936 label:
937 <insn> */
938
939 symbolS * last_label_seen;
940 static int label_is_thumb_function_name = FALSE;
941
942 /* Literal pool structure. Held on a per-section
943 and per-sub-section basis. */
944
945 #define MAX_LITERAL_POOL_SIZE 1024
946 typedef struct literal_pool
947 {
948 expressionS literals [MAX_LITERAL_POOL_SIZE];
949 unsigned int next_free_entry;
950 unsigned int id;
951 symbolS * symbol;
952 segT section;
953 subsegT sub_section;
954 #ifdef OBJ_ELF
955 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
956 #endif
957 struct literal_pool * next;
958 unsigned int alignment;
959 } literal_pool;
960
961 /* Pointer to a linked list of literal pools. */
962 literal_pool * list_of_pools = NULL;
963
964 typedef enum asmfunc_states
965 {
966 OUTSIDE_ASMFUNC,
967 WAITING_ASMFUNC_NAME,
968 WAITING_ENDASMFUNC
969 } asmfunc_states;
970
971 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
972
973 #ifdef OBJ_ELF
974 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
975 #else
976 static struct current_pred now_pred;
977 #endif
978
979 static inline int
980 now_pred_compatible (int cond)
981 {
982 return (cond & ~1) == (now_pred.cc & ~1);
983 }
984
985 static inline int
986 conditional_insn (void)
987 {
988 return inst.cond != COND_ALWAYS;
989 }
990
991 static int in_pred_block (void);
992
993 static int handle_pred_state (void);
994
995 static void force_automatic_it_block_close (void);
996
997 static void it_fsm_post_encode (void);
998
999 #define set_pred_insn_type(type) \
1000 do \
1001 { \
1002 inst.pred_insn_type = type; \
1003 if (handle_pred_state () == FAIL) \
1004 return; \
1005 } \
1006 while (0)
1007
1008 #define set_pred_insn_type_nonvoid(type, failret) \
1009 do \
1010 { \
1011 inst.pred_insn_type = type; \
1012 if (handle_pred_state () == FAIL) \
1013 return failret; \
1014 } \
1015 while(0)
1016
1017 #define set_pred_insn_type_last() \
1018 do \
1019 { \
1020 if (inst.cond == COND_ALWAYS) \
1021 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1022 else \
1023 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1024 } \
1025 while (0)
1026
1027 /* Toggle value[pos]. */
1028 #define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1029
1030 /* Pure syntax. */
1031
1032 /* This array holds the chars that always start a comment. If the
1033 pre-processor is disabled, these aren't very useful. */
1034 char arm_comment_chars[] = "@";
1035
1036 /* This array holds the chars that only start a comment at the beginning of
1037 a line. If the line seems to have the form '# 123 filename'
1038 .line and .file directives will appear in the pre-processed output. */
1039 /* Note that input_file.c hand checks for '#' at the beginning of the
1040 first line of the input file. This is because the compiler outputs
1041 #NO_APP at the beginning of its output. */
1042 /* Also note that comments like this one will always work. */
1043 const char line_comment_chars[] = "#";
1044
1045 char arm_line_separator_chars[] = ";";
1046
1047 /* Chars that can be used to separate mant
1048 from exp in floating point numbers. */
1049 const char EXP_CHARS[] = "eE";
1050
1051 /* Chars that mean this number is a floating point constant. */
1052 /* As in 0f12.456 */
1053 /* or 0d1.2345e12 */
1054
1055 const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1056
1057 /* Prefix characters that indicate the start of an immediate
1058 value. */
1059 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1060
1061 /* Separator character handling. */
1062
1063 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1064
1065 enum fp_16bit_format
1066 {
1067 ARM_FP16_FORMAT_IEEE = 0x1,
1068 ARM_FP16_FORMAT_ALTERNATIVE = 0x2,
1069 ARM_FP16_FORMAT_DEFAULT = 0x3
1070 };
1071
1072 static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1073
1074
1075 static inline int
1076 skip_past_char (char ** str, char c)
1077 {
1078 /* PR gas/14987: Allow for whitespace before the expected character. */
1079 skip_whitespace (*str);
1080
1081 if (**str == c)
1082 {
1083 (*str)++;
1084 return SUCCESS;
1085 }
1086 else
1087 return FAIL;
1088 }
1089
1090 #define skip_past_comma(str) skip_past_char (str, ',')
1091
1092 /* Arithmetic expressions (possibly involving symbols). */
1093
1094 /* Return TRUE if anything in the expression is a bignum. */
1095
1096 static bfd_boolean
1097 walk_no_bignums (symbolS * sp)
1098 {
1099 if (symbol_get_value_expression (sp)->X_op == O_big)
1100 return TRUE;
1101
1102 if (symbol_get_value_expression (sp)->X_add_symbol)
1103 {
1104 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1105 || (symbol_get_value_expression (sp)->X_op_symbol
1106 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1107 }
1108
1109 return FALSE;
1110 }
1111
1112 static bfd_boolean in_my_get_expression = FALSE;
1113
1114 /* Third argument to my_get_expression. */
1115 #define GE_NO_PREFIX 0
1116 #define GE_IMM_PREFIX 1
1117 #define GE_OPT_PREFIX 2
1118 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1119 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1120 #define GE_OPT_PREFIX_BIG 3
1121
1122 static int
1123 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1124 {
1125 char * save_in;
1126
1127 /* In unified syntax, all prefixes are optional. */
1128 if (unified_syntax)
1129 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1130 : GE_OPT_PREFIX;
1131
1132 switch (prefix_mode)
1133 {
1134 case GE_NO_PREFIX: break;
1135 case GE_IMM_PREFIX:
1136 if (!is_immediate_prefix (**str))
1137 {
1138 inst.error = _("immediate expression requires a # prefix");
1139 return FAIL;
1140 }
1141 (*str)++;
1142 break;
1143 case GE_OPT_PREFIX:
1144 case GE_OPT_PREFIX_BIG:
1145 if (is_immediate_prefix (**str))
1146 (*str)++;
1147 break;
1148 default:
1149 abort ();
1150 }
1151
1152 memset (ep, 0, sizeof (expressionS));
1153
1154 save_in = input_line_pointer;
1155 input_line_pointer = *str;
1156 in_my_get_expression = TRUE;
1157 expression (ep);
1158 in_my_get_expression = FALSE;
1159
1160 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1161 {
1162 /* We found a bad or missing expression in md_operand(). */
1163 *str = input_line_pointer;
1164 input_line_pointer = save_in;
1165 if (inst.error == NULL)
1166 inst.error = (ep->X_op == O_absent
1167 ? _("missing expression") :_("bad expression"));
1168 return 1;
1169 }
1170
1171 /* Get rid of any bignums now, so that we don't generate an error for which
1172 we can't establish a line number later on. Big numbers are never valid
1173 in instructions, which is where this routine is always called. */
1174 if (prefix_mode != GE_OPT_PREFIX_BIG
1175 && (ep->X_op == O_big
1176 || (ep->X_add_symbol
1177 && (walk_no_bignums (ep->X_add_symbol)
1178 || (ep->X_op_symbol
1179 && walk_no_bignums (ep->X_op_symbol))))))
1180 {
1181 inst.error = _("invalid constant");
1182 *str = input_line_pointer;
1183 input_line_pointer = save_in;
1184 return 1;
1185 }
1186
1187 *str = input_line_pointer;
1188 input_line_pointer = save_in;
1189 return SUCCESS;
1190 }
1191
1192 /* Turn a string in input_line_pointer into a floating point constant
1193 of type TYPE, and store the appropriate bytes in *LITP. The number
1194 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1195 returned, or NULL on OK.
1196
1197 Note that fp constants aren't represent in the normal way on the ARM.
1198 In big endian mode, things are as expected. However, in little endian
1199 mode fp constants are big-endian word-wise, and little-endian byte-wise
1200 within the words. For example, (double) 1.1 in big endian mode is
1201 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1202 the byte sequence 99 99 f1 3f 9a 99 99 99.
1203
1204 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1205
1206 const char *
1207 md_atof (int type, char * litP, int * sizeP)
1208 {
1209 int prec;
1210 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1211 char *t;
1212 int i;
1213
1214 switch (type)
1215 {
1216 case 'H':
1217 case 'h':
1218 prec = 1;
1219 break;
1220
1221 /* If this is a bfloat16, then parse it slightly differently, as it
1222 does not follow the IEEE specification for floating point numbers
1223 exactly. */
1224 case 'b':
1225 {
1226 FLONUM_TYPE generic_float;
1227
1228 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
1229
1230 if (t)
1231 input_line_pointer = t;
1232 else
1233 return _("invalid floating point number");
1234
1235 switch (generic_float.sign)
1236 {
1237 /* Is +Inf. */
1238 case 'P':
1239 words[0] = 0x7f80;
1240 break;
1241
1242 /* Is -Inf. */
1243 case 'N':
1244 words[0] = 0xff80;
1245 break;
1246
1247 /* Is NaN. */
1248 /* bfloat16 has two types of NaN - quiet and signalling.
1249 Quiet NaN has bit[6] == 1 && faction != 0, whereas
1250 signalling NaN's have bit[0] == 0 && fraction != 0.
1251 Chosen this specific encoding as it is the same form
1252 as used by other IEEE 754 encodings in GAS. */
1253 case 0:
1254 words[0] = 0x7fff;
1255 break;
1256
1257 default:
1258 break;
1259 }
1260
1261 *sizeP = 2;
1262
1263 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
1264
1265 return NULL;
1266 }
1267 case 'f':
1268 case 'F':
1269 case 's':
1270 case 'S':
1271 prec = 2;
1272 break;
1273
1274 case 'd':
1275 case 'D':
1276 case 'r':
1277 case 'R':
1278 prec = 4;
1279 break;
1280
1281 case 'x':
1282 case 'X':
1283 prec = 5;
1284 break;
1285
1286 case 'p':
1287 case 'P':
1288 prec = 5;
1289 break;
1290
1291 default:
1292 *sizeP = 0;
1293 return _("Unrecognized or unsupported floating point constant");
1294 }
1295
1296 t = atof_ieee (input_line_pointer, type, words);
1297 if (t)
1298 input_line_pointer = t;
1299 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1300
1301 if (target_big_endian || prec == 1)
1302 for (i = 0; i < prec; i++)
1303 {
1304 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1305 litP += sizeof (LITTLENUM_TYPE);
1306 }
1307 else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1308 for (i = prec - 1; i >= 0; i--)
1309 {
1310 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1311 litP += sizeof (LITTLENUM_TYPE);
1312 }
1313 else
1314 /* For a 4 byte float the order of elements in `words' is 1 0.
1315 For an 8 byte float the order is 1 0 3 2. */
1316 for (i = 0; i < prec; i += 2)
1317 {
1318 md_number_to_chars (litP, (valueT) words[i + 1],
1319 sizeof (LITTLENUM_TYPE));
1320 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1321 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1322 litP += 2 * sizeof (LITTLENUM_TYPE);
1323 }
1324
1325 return NULL;
1326 }
1327
1328 /* We handle all bad expressions here, so that we can report the faulty
1329 instruction in the error message. */
1330
1331 void
1332 md_operand (expressionS * exp)
1333 {
1334 if (in_my_get_expression)
1335 exp->X_op = O_illegal;
1336 }
1337
1338 /* Immediate values. */
1339
1340 #ifdef OBJ_ELF
1341 /* Generic immediate-value read function for use in directives.
1342 Accepts anything that 'expression' can fold to a constant.
1343 *val receives the number. */
1344
1345 static int
1346 immediate_for_directive (int *val)
1347 {
1348 expressionS exp;
1349 exp.X_op = O_illegal;
1350
1351 if (is_immediate_prefix (*input_line_pointer))
1352 {
1353 input_line_pointer++;
1354 expression (&exp);
1355 }
1356
1357 if (exp.X_op != O_constant)
1358 {
1359 as_bad (_("expected #constant"));
1360 ignore_rest_of_line ();
1361 return FAIL;
1362 }
1363 *val = exp.X_add_number;
1364 return SUCCESS;
1365 }
1366 #endif
1367
1368 /* Register parsing. */
1369
1370 /* Generic register parser. CCP points to what should be the
1371 beginning of a register name. If it is indeed a valid register
1372 name, advance CCP over it and return the reg_entry structure;
1373 otherwise return NULL. Does not issue diagnostics. */
1374
1375 static struct reg_entry *
1376 arm_reg_parse_multi (char **ccp)
1377 {
1378 char *start = *ccp;
1379 char *p;
1380 struct reg_entry *reg;
1381
1382 skip_whitespace (start);
1383
1384 #ifdef REGISTER_PREFIX
1385 if (*start != REGISTER_PREFIX)
1386 return NULL;
1387 start++;
1388 #endif
1389 #ifdef OPTIONAL_REGISTER_PREFIX
1390 if (*start == OPTIONAL_REGISTER_PREFIX)
1391 start++;
1392 #endif
1393
1394 p = start;
1395 if (!ISALPHA (*p) || !is_name_beginner (*p))
1396 return NULL;
1397
1398 do
1399 p++;
1400 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1401
1402 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1403
1404 if (!reg)
1405 return NULL;
1406
1407 *ccp = p;
1408 return reg;
1409 }
1410
1411 static int
1412 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1413 enum arm_reg_type type)
1414 {
1415 /* Alternative syntaxes are accepted for a few register classes. */
1416 switch (type)
1417 {
1418 case REG_TYPE_MVF:
1419 case REG_TYPE_MVD:
1420 case REG_TYPE_MVFX:
1421 case REG_TYPE_MVDX:
1422 /* Generic coprocessor register names are allowed for these. */
1423 if (reg && reg->type == REG_TYPE_CN)
1424 return reg->number;
1425 break;
1426
1427 case REG_TYPE_CP:
1428 /* For backward compatibility, a bare number is valid here. */
1429 {
1430 unsigned long processor = strtoul (start, ccp, 10);
1431 if (*ccp != start && processor <= 15)
1432 return processor;
1433 }
1434 /* Fall through. */
1435
1436 case REG_TYPE_MMXWC:
1437 /* WC includes WCG. ??? I'm not sure this is true for all
1438 instructions that take WC registers. */
1439 if (reg && reg->type == REG_TYPE_MMXWCG)
1440 return reg->number;
1441 break;
1442
1443 default:
1444 break;
1445 }
1446
1447 return FAIL;
1448 }
1449
1450 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1451 return value is the register number or FAIL. */
1452
1453 static int
1454 arm_reg_parse (char **ccp, enum arm_reg_type type)
1455 {
1456 char *start = *ccp;
1457 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1458 int ret;
1459
1460 /* Do not allow a scalar (reg+index) to parse as a register. */
1461 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1462 return FAIL;
1463
1464 if (reg && reg->type == type)
1465 return reg->number;
1466
1467 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1468 return ret;
1469
1470 *ccp = start;
1471 return FAIL;
1472 }
1473
1474 /* Parse a Neon type specifier. *STR should point at the leading '.'
1475 character. Does no verification at this stage that the type fits the opcode
1476 properly. E.g.,
1477
1478 .i32.i32.s16
1479 .s32.f32
1480 .u16
1481
1482 Can all be legally parsed by this function.
1483
1484 Fills in neon_type struct pointer with parsed information, and updates STR
1485 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1486 type, FAIL if not. */
1487
1488 static int
1489 parse_neon_type (struct neon_type *type, char **str)
1490 {
1491 char *ptr = *str;
1492
1493 if (type)
1494 type->elems = 0;
1495
1496 while (type->elems < NEON_MAX_TYPE_ELS)
1497 {
1498 enum neon_el_type thistype = NT_untyped;
1499 unsigned thissize = -1u;
1500
1501 if (*ptr != '.')
1502 break;
1503
1504 ptr++;
1505
1506 /* Just a size without an explicit type. */
1507 if (ISDIGIT (*ptr))
1508 goto parsesize;
1509
1510 switch (TOLOWER (*ptr))
1511 {
1512 case 'i': thistype = NT_integer; break;
1513 case 'f': thistype = NT_float; break;
1514 case 'p': thistype = NT_poly; break;
1515 case 's': thistype = NT_signed; break;
1516 case 'u': thistype = NT_unsigned; break;
1517 case 'd':
1518 thistype = NT_float;
1519 thissize = 64;
1520 ptr++;
1521 goto done;
1522 case 'b':
1523 thistype = NT_bfloat;
1524 switch (TOLOWER (*(++ptr)))
1525 {
1526 case 'f':
1527 ptr += 1;
1528 thissize = strtoul (ptr, &ptr, 10);
1529 if (thissize != 16)
1530 {
1531 as_bad (_("bad size %d in type specifier"), thissize);
1532 return FAIL;
1533 }
1534 goto done;
1535 case '0': case '1': case '2': case '3': case '4':
1536 case '5': case '6': case '7': case '8': case '9':
1537 case ' ': case '.':
1538 as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1539 return FAIL;
1540 default:
1541 break;
1542 }
1543 break;
1544 default:
1545 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1546 return FAIL;
1547 }
1548
1549 ptr++;
1550
1551 /* .f is an abbreviation for .f32. */
1552 if (thistype == NT_float && !ISDIGIT (*ptr))
1553 thissize = 32;
1554 else
1555 {
1556 parsesize:
1557 thissize = strtoul (ptr, &ptr, 10);
1558
1559 if (thissize != 8 && thissize != 16 && thissize != 32
1560 && thissize != 64)
1561 {
1562 as_bad (_("bad size %d in type specifier"), thissize);
1563 return FAIL;
1564 }
1565 }
1566
1567 done:
1568 if (type)
1569 {
1570 type->el[type->elems].type = thistype;
1571 type->el[type->elems].size = thissize;
1572 type->elems++;
1573 }
1574 }
1575
1576 /* Empty/missing type is not a successful parse. */
1577 if (type->elems == 0)
1578 return FAIL;
1579
1580 *str = ptr;
1581
1582 return SUCCESS;
1583 }
1584
1585 /* Errors may be set multiple times during parsing or bit encoding
1586 (particularly in the Neon bits), but usually the earliest error which is set
1587 will be the most meaningful. Avoid overwriting it with later (cascading)
1588 errors by calling this function. */
1589
1590 static void
1591 first_error (const char *err)
1592 {
1593 if (!inst.error)
1594 inst.error = err;
1595 }
1596
1597 /* Parse a single type, e.g. ".s32", leading period included. */
1598 static int
1599 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1600 {
1601 char *str = *ccp;
1602 struct neon_type optype;
1603
1604 if (*str == '.')
1605 {
1606 if (parse_neon_type (&optype, &str) == SUCCESS)
1607 {
1608 if (optype.elems == 1)
1609 *vectype = optype.el[0];
1610 else
1611 {
1612 first_error (_("only one type should be specified for operand"));
1613 return FAIL;
1614 }
1615 }
1616 else
1617 {
1618 first_error (_("vector type expected"));
1619 return FAIL;
1620 }
1621 }
1622 else
1623 return FAIL;
1624
1625 *ccp = str;
1626
1627 return SUCCESS;
1628 }
1629
1630 /* Special meanings for indices (which have a range of 0-7), which will fit into
1631 a 4-bit integer. */
1632
1633 #define NEON_ALL_LANES 15
1634 #define NEON_INTERLEAVE_LANES 14
1635
1636 /* Record a use of the given feature. */
1637 static void
1638 record_feature_use (const arm_feature_set *feature)
1639 {
1640 if (thumb_mode)
1641 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1642 else
1643 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1644 }
1645
1646 /* If the given feature available in the selected CPU, mark it as used.
1647 Returns TRUE iff feature is available. */
1648 static bfd_boolean
1649 mark_feature_used (const arm_feature_set *feature)
1650 {
1651
1652 /* Do not support the use of MVE only instructions when in auto-detection or
1653 -march=all. */
1654 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1655 && ARM_CPU_IS_ANY (cpu_variant))
1656 {
1657 first_error (BAD_MVE_AUTO);
1658 return FALSE;
1659 }
1660 /* Ensure the option is valid on the current architecture. */
1661 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1662 return FALSE;
1663
1664 /* Add the appropriate architecture feature for the barrier option used.
1665 */
1666 record_feature_use (feature);
1667
1668 return TRUE;
1669 }
1670
1671 /* Parse either a register or a scalar, with an optional type. Return the
1672 register number, and optionally fill in the actual type of the register
1673 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1674 type/index information in *TYPEINFO. */
1675
1676 static int
1677 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1678 enum arm_reg_type *rtype,
1679 struct neon_typed_alias *typeinfo)
1680 {
1681 char *str = *ccp;
1682 struct reg_entry *reg = arm_reg_parse_multi (&str);
1683 struct neon_typed_alias atype;
1684 struct neon_type_el parsetype;
1685
1686 atype.defined = 0;
1687 atype.index = -1;
1688 atype.eltype.type = NT_invtype;
1689 atype.eltype.size = -1;
1690
1691 /* Try alternate syntax for some types of register. Note these are mutually
1692 exclusive with the Neon syntax extensions. */
1693 if (reg == NULL)
1694 {
1695 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1696 if (altreg != FAIL)
1697 *ccp = str;
1698 if (typeinfo)
1699 *typeinfo = atype;
1700 return altreg;
1701 }
1702
1703 /* Undo polymorphism when a set of register types may be accepted. */
1704 if ((type == REG_TYPE_NDQ
1705 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1706 || (type == REG_TYPE_VFSD
1707 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1708 || (type == REG_TYPE_NSDQ
1709 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1710 || reg->type == REG_TYPE_NQ))
1711 || (type == REG_TYPE_NSD
1712 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1713 || (type == REG_TYPE_MMXWC
1714 && (reg->type == REG_TYPE_MMXWCG)))
1715 type = (enum arm_reg_type) reg->type;
1716
1717 if (type == REG_TYPE_MQ)
1718 {
1719 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1720 return FAIL;
1721
1722 if (!reg || reg->type != REG_TYPE_NQ)
1723 return FAIL;
1724
1725 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1726 {
1727 first_error (_("expected MVE register [q0..q7]"));
1728 return FAIL;
1729 }
1730 type = REG_TYPE_NQ;
1731 }
1732 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1733 && (type == REG_TYPE_NQ))
1734 return FAIL;
1735
1736
1737 if (type != reg->type)
1738 return FAIL;
1739
1740 if (reg->neon)
1741 atype = *reg->neon;
1742
1743 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1744 {
1745 if ((atype.defined & NTA_HASTYPE) != 0)
1746 {
1747 first_error (_("can't redefine type for operand"));
1748 return FAIL;
1749 }
1750 atype.defined |= NTA_HASTYPE;
1751 atype.eltype = parsetype;
1752 }
1753
1754 if (skip_past_char (&str, '[') == SUCCESS)
1755 {
1756 if (type != REG_TYPE_VFD
1757 && !(type == REG_TYPE_VFS
1758 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1759 && !(type == REG_TYPE_NQ
1760 && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1761 {
1762 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1763 first_error (_("only D and Q registers may be indexed"));
1764 else
1765 first_error (_("only D registers may be indexed"));
1766 return FAIL;
1767 }
1768
1769 if ((atype.defined & NTA_HASINDEX) != 0)
1770 {
1771 first_error (_("can't change index for operand"));
1772 return FAIL;
1773 }
1774
1775 atype.defined |= NTA_HASINDEX;
1776
1777 if (skip_past_char (&str, ']') == SUCCESS)
1778 atype.index = NEON_ALL_LANES;
1779 else
1780 {
1781 expressionS exp;
1782
1783 my_get_expression (&exp, &str, GE_NO_PREFIX);
1784
1785 if (exp.X_op != O_constant)
1786 {
1787 first_error (_("constant expression required"));
1788 return FAIL;
1789 }
1790
1791 if (skip_past_char (&str, ']') == FAIL)
1792 return FAIL;
1793
1794 atype.index = exp.X_add_number;
1795 }
1796 }
1797
1798 if (typeinfo)
1799 *typeinfo = atype;
1800
1801 if (rtype)
1802 *rtype = type;
1803
1804 *ccp = str;
1805
1806 return reg->number;
1807 }
1808
1809 /* Like arm_reg_parse, but also allow the following extra features:
1810 - If RTYPE is non-zero, return the (possibly restricted) type of the
1811 register (e.g. Neon double or quad reg when either has been requested).
1812 - If this is a Neon vector type with additional type information, fill
1813 in the struct pointed to by VECTYPE (if non-NULL).
1814 This function will fault on encountering a scalar. */
1815
1816 static int
1817 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1818 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1819 {
1820 struct neon_typed_alias atype;
1821 char *str = *ccp;
1822 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1823
1824 if (reg == FAIL)
1825 return FAIL;
1826
1827 /* Do not allow regname(... to parse as a register. */
1828 if (*str == '(')
1829 return FAIL;
1830
1831 /* Do not allow a scalar (reg+index) to parse as a register. */
1832 if ((atype.defined & NTA_HASINDEX) != 0)
1833 {
1834 first_error (_("register operand expected, but got scalar"));
1835 return FAIL;
1836 }
1837
1838 if (vectype)
1839 *vectype = atype.eltype;
1840
1841 *ccp = str;
1842
1843 return reg;
1844 }
1845
1846 #define NEON_SCALAR_REG(X) ((X) >> 4)
1847 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1848
1849 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1850 have enough information to be able to do a good job bounds-checking. So, we
1851 just do easy checks here, and do further checks later. */
1852
1853 static int
1854 parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1855 arm_reg_type reg_type)
1856 {
1857 int reg;
1858 char *str = *ccp;
1859 struct neon_typed_alias atype;
1860 unsigned reg_size;
1861
1862 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1863
1864 switch (reg_type)
1865 {
1866 case REG_TYPE_VFS:
1867 reg_size = 32;
1868 break;
1869 case REG_TYPE_VFD:
1870 reg_size = 64;
1871 break;
1872 case REG_TYPE_MQ:
1873 reg_size = 128;
1874 break;
1875 default:
1876 gas_assert (0);
1877 return FAIL;
1878 }
1879
1880 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1881 return FAIL;
1882
1883 if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1884 {
1885 first_error (_("scalar must have an index"));
1886 return FAIL;
1887 }
1888 else if (atype.index >= reg_size / elsize)
1889 {
1890 first_error (_("scalar index out of range"));
1891 return FAIL;
1892 }
1893
1894 if (type)
1895 *type = atype.eltype;
1896
1897 *ccp = str;
1898
1899 return reg * 16 + atype.index;
1900 }
1901
1902 /* Types of registers in a list. */
1903
1904 enum reg_list_els
1905 {
1906 REGLIST_RN,
1907 REGLIST_CLRM,
1908 REGLIST_VFP_S,
1909 REGLIST_VFP_S_VPR,
1910 REGLIST_VFP_D,
1911 REGLIST_VFP_D_VPR,
1912 REGLIST_NEON_D
1913 };
1914
1915 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1916
1917 static long
1918 parse_reg_list (char ** strp, enum reg_list_els etype)
1919 {
1920 char *str = *strp;
1921 long range = 0;
1922 int another_range;
1923
1924 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1925
1926 /* We come back here if we get ranges concatenated by '+' or '|'. */
1927 do
1928 {
1929 skip_whitespace (str);
1930
1931 another_range = 0;
1932
1933 if (*str == '{')
1934 {
1935 int in_range = 0;
1936 int cur_reg = -1;
1937
1938 str++;
1939 do
1940 {
1941 int reg;
1942 const char apsr_str[] = "apsr";
1943 int apsr_str_len = strlen (apsr_str);
1944
1945 reg = arm_reg_parse (&str, REGLIST_RN);
1946 if (etype == REGLIST_CLRM)
1947 {
1948 if (reg == REG_SP || reg == REG_PC)
1949 reg = FAIL;
1950 else if (reg == FAIL
1951 && !strncasecmp (str, apsr_str, apsr_str_len)
1952 && !ISALPHA (*(str + apsr_str_len)))
1953 {
1954 reg = 15;
1955 str += apsr_str_len;
1956 }
1957
1958 if (reg == FAIL)
1959 {
1960 first_error (_("r0-r12, lr or APSR expected"));
1961 return FAIL;
1962 }
1963 }
1964 else /* etype == REGLIST_RN. */
1965 {
1966 if (reg == FAIL)
1967 {
1968 first_error (_(reg_expected_msgs[REGLIST_RN]));
1969 return FAIL;
1970 }
1971 }
1972
1973 if (in_range)
1974 {
1975 int i;
1976
1977 if (reg <= cur_reg)
1978 {
1979 first_error (_("bad range in register list"));
1980 return FAIL;
1981 }
1982
1983 for (i = cur_reg + 1; i < reg; i++)
1984 {
1985 if (range & (1 << i))
1986 as_tsktsk
1987 (_("Warning: duplicated register (r%d) in register list"),
1988 i);
1989 else
1990 range |= 1 << i;
1991 }
1992 in_range = 0;
1993 }
1994
1995 if (range & (1 << reg))
1996 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1997 reg);
1998 else if (reg <= cur_reg)
1999 as_tsktsk (_("Warning: register range not in ascending order"));
2000
2001 range |= 1 << reg;
2002 cur_reg = reg;
2003 }
2004 while (skip_past_comma (&str) != FAIL
2005 || (in_range = 1, *str++ == '-'));
2006 str--;
2007
2008 if (skip_past_char (&str, '}') == FAIL)
2009 {
2010 first_error (_("missing `}'"));
2011 return FAIL;
2012 }
2013 }
2014 else if (etype == REGLIST_RN)
2015 {
2016 expressionS exp;
2017
2018 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2019 return FAIL;
2020
2021 if (exp.X_op == O_constant)
2022 {
2023 if (exp.X_add_number
2024 != (exp.X_add_number & 0x0000ffff))
2025 {
2026 inst.error = _("invalid register mask");
2027 return FAIL;
2028 }
2029
2030 if ((range & exp.X_add_number) != 0)
2031 {
2032 int regno = range & exp.X_add_number;
2033
2034 regno &= -regno;
2035 regno = (1 << regno) - 1;
2036 as_tsktsk
2037 (_("Warning: duplicated register (r%d) in register list"),
2038 regno);
2039 }
2040
2041 range |= exp.X_add_number;
2042 }
2043 else
2044 {
2045 if (inst.relocs[0].type != 0)
2046 {
2047 inst.error = _("expression too complex");
2048 return FAIL;
2049 }
2050
2051 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2052 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2053 inst.relocs[0].pc_rel = 0;
2054 }
2055 }
2056
2057 if (*str == '|' || *str == '+')
2058 {
2059 str++;
2060 another_range = 1;
2061 }
2062 }
2063 while (another_range);
2064
2065 *strp = str;
2066 return range;
2067 }
2068
2069 /* Parse a VFP register list. If the string is invalid return FAIL.
2070 Otherwise return the number of registers, and set PBASE to the first
2071 register. Parses registers of type ETYPE.
2072 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2073 - Q registers can be used to specify pairs of D registers
2074 - { } can be omitted from around a singleton register list
2075 FIXME: This is not implemented, as it would require backtracking in
2076 some cases, e.g.:
2077 vtbl.8 d3,d4,d5
2078 This could be done (the meaning isn't really ambiguous), but doesn't
2079 fit in well with the current parsing framework.
2080 - 32 D registers may be used (also true for VFPv3).
2081 FIXME: Types are ignored in these register lists, which is probably a
2082 bug. */
2083
2084 static int
2085 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2086 bfd_boolean *partial_match)
2087 {
2088 char *str = *ccp;
2089 int base_reg;
2090 int new_base;
2091 enum arm_reg_type regtype = (enum arm_reg_type) 0;
2092 int max_regs = 0;
2093 int count = 0;
2094 int warned = 0;
2095 unsigned long mask = 0;
2096 int i;
2097 bfd_boolean vpr_seen = FALSE;
2098 bfd_boolean expect_vpr =
2099 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2100
2101 if (skip_past_char (&str, '{') == FAIL)
2102 {
2103 inst.error = _("expecting {");
2104 return FAIL;
2105 }
2106
2107 switch (etype)
2108 {
2109 case REGLIST_VFP_S:
2110 case REGLIST_VFP_S_VPR:
2111 regtype = REG_TYPE_VFS;
2112 max_regs = 32;
2113 break;
2114
2115 case REGLIST_VFP_D:
2116 case REGLIST_VFP_D_VPR:
2117 regtype = REG_TYPE_VFD;
2118 break;
2119
2120 case REGLIST_NEON_D:
2121 regtype = REG_TYPE_NDQ;
2122 break;
2123
2124 default:
2125 gas_assert (0);
2126 }
2127
2128 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2129 {
2130 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2131 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2132 {
2133 max_regs = 32;
2134 if (thumb_mode)
2135 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2136 fpu_vfp_ext_d32);
2137 else
2138 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2139 fpu_vfp_ext_d32);
2140 }
2141 else
2142 max_regs = 16;
2143 }
2144
2145 base_reg = max_regs;
2146 *partial_match = FALSE;
2147
2148 do
2149 {
2150 int setmask = 1, addregs = 1;
2151 const char vpr_str[] = "vpr";
2152 int vpr_str_len = strlen (vpr_str);
2153
2154 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2155
2156 if (expect_vpr)
2157 {
2158 if (new_base == FAIL
2159 && !strncasecmp (str, vpr_str, vpr_str_len)
2160 && !ISALPHA (*(str + vpr_str_len))
2161 && !vpr_seen)
2162 {
2163 vpr_seen = TRUE;
2164 str += vpr_str_len;
2165 if (count == 0)
2166 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2167 }
2168 else if (vpr_seen)
2169 {
2170 first_error (_("VPR expected last"));
2171 return FAIL;
2172 }
2173 else if (new_base == FAIL)
2174 {
2175 if (regtype == REG_TYPE_VFS)
2176 first_error (_("VFP single precision register or VPR "
2177 "expected"));
2178 else /* regtype == REG_TYPE_VFD. */
2179 first_error (_("VFP/Neon double precision register or VPR "
2180 "expected"));
2181 return FAIL;
2182 }
2183 }
2184 else if (new_base == FAIL)
2185 {
2186 first_error (_(reg_expected_msgs[regtype]));
2187 return FAIL;
2188 }
2189
2190 *partial_match = TRUE;
2191 if (vpr_seen)
2192 continue;
2193
2194 if (new_base >= max_regs)
2195 {
2196 first_error (_("register out of range in list"));
2197 return FAIL;
2198 }
2199
2200 /* Note: a value of 2 * n is returned for the register Q<n>. */
2201 if (regtype == REG_TYPE_NQ)
2202 {
2203 setmask = 3;
2204 addregs = 2;
2205 }
2206
2207 if (new_base < base_reg)
2208 base_reg = new_base;
2209
2210 if (mask & (setmask << new_base))
2211 {
2212 first_error (_("invalid register list"));
2213 return FAIL;
2214 }
2215
2216 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2217 {
2218 as_tsktsk (_("register list not in ascending order"));
2219 warned = 1;
2220 }
2221
2222 mask |= setmask << new_base;
2223 count += addregs;
2224
2225 if (*str == '-') /* We have the start of a range expression */
2226 {
2227 int high_range;
2228
2229 str++;
2230
2231 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2232 == FAIL)
2233 {
2234 inst.error = gettext (reg_expected_msgs[regtype]);
2235 return FAIL;
2236 }
2237
2238 if (high_range >= max_regs)
2239 {
2240 first_error (_("register out of range in list"));
2241 return FAIL;
2242 }
2243
2244 if (regtype == REG_TYPE_NQ)
2245 high_range = high_range + 1;
2246
2247 if (high_range <= new_base)
2248 {
2249 inst.error = _("register range not in ascending order");
2250 return FAIL;
2251 }
2252
2253 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2254 {
2255 if (mask & (setmask << new_base))
2256 {
2257 inst.error = _("invalid register list");
2258 return FAIL;
2259 }
2260
2261 mask |= setmask << new_base;
2262 count += addregs;
2263 }
2264 }
2265 }
2266 while (skip_past_comma (&str) != FAIL);
2267
2268 str++;
2269
2270 /* Sanity check -- should have raised a parse error above. */
2271 if ((!vpr_seen && count == 0) || count > max_regs)
2272 abort ();
2273
2274 *pbase = base_reg;
2275
2276 if (expect_vpr && !vpr_seen)
2277 {
2278 first_error (_("VPR expected last"));
2279 return FAIL;
2280 }
2281
2282 /* Final test -- the registers must be consecutive. */
2283 mask >>= base_reg;
2284 for (i = 0; i < count; i++)
2285 {
2286 if ((mask & (1u << i)) == 0)
2287 {
2288 inst.error = _("non-contiguous register range");
2289 return FAIL;
2290 }
2291 }
2292
2293 *ccp = str;
2294
2295 return count;
2296 }
2297
2298 /* True if two alias types are the same. */
2299
2300 static bfd_boolean
2301 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2302 {
2303 if (!a && !b)
2304 return TRUE;
2305
2306 if (!a || !b)
2307 return FALSE;
2308
2309 if (a->defined != b->defined)
2310 return FALSE;
2311
2312 if ((a->defined & NTA_HASTYPE) != 0
2313 && (a->eltype.type != b->eltype.type
2314 || a->eltype.size != b->eltype.size))
2315 return FALSE;
2316
2317 if ((a->defined & NTA_HASINDEX) != 0
2318 && (a->index != b->index))
2319 return FALSE;
2320
2321 return TRUE;
2322 }
2323
2324 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2325 The base register is put in *PBASE.
2326 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2327 the return value.
2328 The register stride (minus one) is put in bit 4 of the return value.
2329 Bits [6:5] encode the list length (minus one).
2330 The type of the list elements is put in *ELTYPE, if non-NULL. */
2331
2332 #define NEON_LANE(X) ((X) & 0xf)
2333 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2334 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2335
2336 static int
2337 parse_neon_el_struct_list (char **str, unsigned *pbase,
2338 int mve,
2339 struct neon_type_el *eltype)
2340 {
2341 char *ptr = *str;
2342 int base_reg = -1;
2343 int reg_incr = -1;
2344 int count = 0;
2345 int lane = -1;
2346 int leading_brace = 0;
2347 enum arm_reg_type rtype = REG_TYPE_NDQ;
2348 const char *const incr_error = mve ? _("register stride must be 1") :
2349 _("register stride must be 1 or 2");
2350 const char *const type_error = _("mismatched element/structure types in list");
2351 struct neon_typed_alias firsttype;
2352 firsttype.defined = 0;
2353 firsttype.eltype.type = NT_invtype;
2354 firsttype.eltype.size = -1;
2355 firsttype.index = -1;
2356
2357 if (skip_past_char (&ptr, '{') == SUCCESS)
2358 leading_brace = 1;
2359
2360 do
2361 {
2362 struct neon_typed_alias atype;
2363 if (mve)
2364 rtype = REG_TYPE_MQ;
2365 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2366
2367 if (getreg == FAIL)
2368 {
2369 first_error (_(reg_expected_msgs[rtype]));
2370 return FAIL;
2371 }
2372
2373 if (base_reg == -1)
2374 {
2375 base_reg = getreg;
2376 if (rtype == REG_TYPE_NQ)
2377 {
2378 reg_incr = 1;
2379 }
2380 firsttype = atype;
2381 }
2382 else if (reg_incr == -1)
2383 {
2384 reg_incr = getreg - base_reg;
2385 if (reg_incr < 1 || reg_incr > 2)
2386 {
2387 first_error (_(incr_error));
2388 return FAIL;
2389 }
2390 }
2391 else if (getreg != base_reg + reg_incr * count)
2392 {
2393 first_error (_(incr_error));
2394 return FAIL;
2395 }
2396
2397 if (! neon_alias_types_same (&atype, &firsttype))
2398 {
2399 first_error (_(type_error));
2400 return FAIL;
2401 }
2402
2403 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2404 modes. */
2405 if (ptr[0] == '-')
2406 {
2407 struct neon_typed_alias htype;
2408 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2409 if (lane == -1)
2410 lane = NEON_INTERLEAVE_LANES;
2411 else if (lane != NEON_INTERLEAVE_LANES)
2412 {
2413 first_error (_(type_error));
2414 return FAIL;
2415 }
2416 if (reg_incr == -1)
2417 reg_incr = 1;
2418 else if (reg_incr != 1)
2419 {
2420 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2421 return FAIL;
2422 }
2423 ptr++;
2424 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2425 if (hireg == FAIL)
2426 {
2427 first_error (_(reg_expected_msgs[rtype]));
2428 return FAIL;
2429 }
2430 if (! neon_alias_types_same (&htype, &firsttype))
2431 {
2432 first_error (_(type_error));
2433 return FAIL;
2434 }
2435 count += hireg + dregs - getreg;
2436 continue;
2437 }
2438
2439 /* If we're using Q registers, we can't use [] or [n] syntax. */
2440 if (rtype == REG_TYPE_NQ)
2441 {
2442 count += 2;
2443 continue;
2444 }
2445
2446 if ((atype.defined & NTA_HASINDEX) != 0)
2447 {
2448 if (lane == -1)
2449 lane = atype.index;
2450 else if (lane != atype.index)
2451 {
2452 first_error (_(type_error));
2453 return FAIL;
2454 }
2455 }
2456 else if (lane == -1)
2457 lane = NEON_INTERLEAVE_LANES;
2458 else if (lane != NEON_INTERLEAVE_LANES)
2459 {
2460 first_error (_(type_error));
2461 return FAIL;
2462 }
2463 count++;
2464 }
2465 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2466
2467 /* No lane set by [x]. We must be interleaving structures. */
2468 if (lane == -1)
2469 lane = NEON_INTERLEAVE_LANES;
2470
2471 /* Sanity check. */
2472 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2473 || (count > 1 && reg_incr == -1))
2474 {
2475 first_error (_("error parsing element/structure list"));
2476 return FAIL;
2477 }
2478
2479 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2480 {
2481 first_error (_("expected }"));
2482 return FAIL;
2483 }
2484
2485 if (reg_incr == -1)
2486 reg_incr = 1;
2487
2488 if (eltype)
2489 *eltype = firsttype.eltype;
2490
2491 *pbase = base_reg;
2492 *str = ptr;
2493
2494 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2495 }
2496
2497 /* Parse an explicit relocation suffix on an expression. This is
2498 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2499 arm_reloc_hsh contains no entries, so this function can only
2500 succeed if there is no () after the word. Returns -1 on error,
2501 BFD_RELOC_UNUSED if there wasn't any suffix. */
2502
2503 static int
2504 parse_reloc (char **str)
2505 {
2506 struct reloc_entry *r;
2507 char *p, *q;
2508
2509 if (**str != '(')
2510 return BFD_RELOC_UNUSED;
2511
2512 p = *str + 1;
2513 q = p;
2514
2515 while (*q && *q != ')' && *q != ',')
2516 q++;
2517 if (*q != ')')
2518 return -1;
2519
2520 if ((r = (struct reloc_entry *)
2521 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2522 return -1;
2523
2524 *str = q + 1;
2525 return r->reloc;
2526 }
2527
2528 /* Directives: register aliases. */
2529
2530 static struct reg_entry *
2531 insert_reg_alias (char *str, unsigned number, int type)
2532 {
2533 struct reg_entry *new_reg;
2534 const char *name;
2535
2536 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2537 {
2538 if (new_reg->builtin)
2539 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2540
2541 /* Only warn about a redefinition if it's not defined as the
2542 same register. */
2543 else if (new_reg->number != number || new_reg->type != type)
2544 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2545
2546 return NULL;
2547 }
2548
2549 name = xstrdup (str);
2550 new_reg = XNEW (struct reg_entry);
2551
2552 new_reg->name = name;
2553 new_reg->number = number;
2554 new_reg->type = type;
2555 new_reg->builtin = FALSE;
2556 new_reg->neon = NULL;
2557
2558 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2559 abort ();
2560
2561 return new_reg;
2562 }
2563
2564 static void
2565 insert_neon_reg_alias (char *str, int number, int type,
2566 struct neon_typed_alias *atype)
2567 {
2568 struct reg_entry *reg = insert_reg_alias (str, number, type);
2569
2570 if (!reg)
2571 {
2572 first_error (_("attempt to redefine typed alias"));
2573 return;
2574 }
2575
2576 if (atype)
2577 {
2578 reg->neon = XNEW (struct neon_typed_alias);
2579 *reg->neon = *atype;
2580 }
2581 }
2582
2583 /* Look for the .req directive. This is of the form:
2584
2585 new_register_name .req existing_register_name
2586
2587 If we find one, or if it looks sufficiently like one that we want to
2588 handle any error here, return TRUE. Otherwise return FALSE. */
2589
2590 static bfd_boolean
2591 create_register_alias (char * newname, char *p)
2592 {
2593 struct reg_entry *old;
2594 char *oldname, *nbuf;
2595 size_t nlen;
2596
2597 /* The input scrubber ensures that whitespace after the mnemonic is
2598 collapsed to single spaces. */
2599 oldname = p;
2600 if (strncmp (oldname, " .req ", 6) != 0)
2601 return FALSE;
2602
2603 oldname += 6;
2604 if (*oldname == '\0')
2605 return FALSE;
2606
2607 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2608 if (!old)
2609 {
2610 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2611 return TRUE;
2612 }
2613
2614 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2615 the desired alias name, and p points to its end. If not, then
2616 the desired alias name is in the global original_case_string. */
2617 #ifdef TC_CASE_SENSITIVE
2618 nlen = p - newname;
2619 #else
2620 newname = original_case_string;
2621 nlen = strlen (newname);
2622 #endif
2623
2624 nbuf = xmemdup0 (newname, nlen);
2625
2626 /* Create aliases under the new name as stated; an all-lowercase
2627 version of the new name; and an all-uppercase version of the new
2628 name. */
2629 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2630 {
2631 for (p = nbuf; *p; p++)
2632 *p = TOUPPER (*p);
2633
2634 if (strncmp (nbuf, newname, nlen))
2635 {
2636 /* If this attempt to create an additional alias fails, do not bother
2637 trying to create the all-lower case alias. We will fail and issue
2638 a second, duplicate error message. This situation arises when the
2639 programmer does something like:
2640 foo .req r0
2641 Foo .req r1
2642 The second .req creates the "Foo" alias but then fails to create
2643 the artificial FOO alias because it has already been created by the
2644 first .req. */
2645 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2646 {
2647 free (nbuf);
2648 return TRUE;
2649 }
2650 }
2651
2652 for (p = nbuf; *p; p++)
2653 *p = TOLOWER (*p);
2654
2655 if (strncmp (nbuf, newname, nlen))
2656 insert_reg_alias (nbuf, old->number, old->type);
2657 }
2658
2659 free (nbuf);
2660 return TRUE;
2661 }
2662
2663 /* Create a Neon typed/indexed register alias using directives, e.g.:
2664 X .dn d5.s32[1]
2665 Y .qn 6.s16
2666 Z .dn d7
2667 T .dn Z[0]
2668 These typed registers can be used instead of the types specified after the
2669 Neon mnemonic, so long as all operands given have types. Types can also be
2670 specified directly, e.g.:
2671 vadd d0.s32, d1.s32, d2.s32 */
2672
2673 static bfd_boolean
2674 create_neon_reg_alias (char *newname, char *p)
2675 {
2676 enum arm_reg_type basetype;
2677 struct reg_entry *basereg;
2678 struct reg_entry mybasereg;
2679 struct neon_type ntype;
2680 struct neon_typed_alias typeinfo;
2681 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2682 int namelen;
2683
2684 typeinfo.defined = 0;
2685 typeinfo.eltype.type = NT_invtype;
2686 typeinfo.eltype.size = -1;
2687 typeinfo.index = -1;
2688
2689 nameend = p;
2690
2691 if (strncmp (p, " .dn ", 5) == 0)
2692 basetype = REG_TYPE_VFD;
2693 else if (strncmp (p, " .qn ", 5) == 0)
2694 basetype = REG_TYPE_NQ;
2695 else
2696 return FALSE;
2697
2698 p += 5;
2699
2700 if (*p == '\0')
2701 return FALSE;
2702
2703 basereg = arm_reg_parse_multi (&p);
2704
2705 if (basereg && basereg->type != basetype)
2706 {
2707 as_bad (_("bad type for register"));
2708 return FALSE;
2709 }
2710
2711 if (basereg == NULL)
2712 {
2713 expressionS exp;
2714 /* Try parsing as an integer. */
2715 my_get_expression (&exp, &p, GE_NO_PREFIX);
2716 if (exp.X_op != O_constant)
2717 {
2718 as_bad (_("expression must be constant"));
2719 return FALSE;
2720 }
2721 basereg = &mybasereg;
2722 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2723 : exp.X_add_number;
2724 basereg->neon = 0;
2725 }
2726
2727 if (basereg->neon)
2728 typeinfo = *basereg->neon;
2729
2730 if (parse_neon_type (&ntype, &p) == SUCCESS)
2731 {
2732 /* We got a type. */
2733 if (typeinfo.defined & NTA_HASTYPE)
2734 {
2735 as_bad (_("can't redefine the type of a register alias"));
2736 return FALSE;
2737 }
2738
2739 typeinfo.defined |= NTA_HASTYPE;
2740 if (ntype.elems != 1)
2741 {
2742 as_bad (_("you must specify a single type only"));
2743 return FALSE;
2744 }
2745 typeinfo.eltype = ntype.el[0];
2746 }
2747
2748 if (skip_past_char (&p, '[') == SUCCESS)
2749 {
2750 expressionS exp;
2751 /* We got a scalar index. */
2752
2753 if (typeinfo.defined & NTA_HASINDEX)
2754 {
2755 as_bad (_("can't redefine the index of a scalar alias"));
2756 return FALSE;
2757 }
2758
2759 my_get_expression (&exp, &p, GE_NO_PREFIX);
2760
2761 if (exp.X_op != O_constant)
2762 {
2763 as_bad (_("scalar index must be constant"));
2764 return FALSE;
2765 }
2766
2767 typeinfo.defined |= NTA_HASINDEX;
2768 typeinfo.index = exp.X_add_number;
2769
2770 if (skip_past_char (&p, ']') == FAIL)
2771 {
2772 as_bad (_("expecting ]"));
2773 return FALSE;
2774 }
2775 }
2776
2777 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2778 the desired alias name, and p points to its end. If not, then
2779 the desired alias name is in the global original_case_string. */
2780 #ifdef TC_CASE_SENSITIVE
2781 namelen = nameend - newname;
2782 #else
2783 newname = original_case_string;
2784 namelen = strlen (newname);
2785 #endif
2786
2787 namebuf = xmemdup0 (newname, namelen);
2788
2789 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2790 typeinfo.defined != 0 ? &typeinfo : NULL);
2791
2792 /* Insert name in all uppercase. */
2793 for (p = namebuf; *p; p++)
2794 *p = TOUPPER (*p);
2795
2796 if (strncmp (namebuf, newname, namelen))
2797 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2798 typeinfo.defined != 0 ? &typeinfo : NULL);
2799
2800 /* Insert name in all lowercase. */
2801 for (p = namebuf; *p; p++)
2802 *p = TOLOWER (*p);
2803
2804 if (strncmp (namebuf, newname, namelen))
2805 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2806 typeinfo.defined != 0 ? &typeinfo : NULL);
2807
2808 free (namebuf);
2809 return TRUE;
2810 }
2811
2812 /* Should never be called, as .req goes between the alias and the
2813 register name, not at the beginning of the line. */
2814
2815 static void
2816 s_req (int a ATTRIBUTE_UNUSED)
2817 {
2818 as_bad (_("invalid syntax for .req directive"));
2819 }
2820
2821 static void
2822 s_dn (int a ATTRIBUTE_UNUSED)
2823 {
2824 as_bad (_("invalid syntax for .dn directive"));
2825 }
2826
2827 static void
2828 s_qn (int a ATTRIBUTE_UNUSED)
2829 {
2830 as_bad (_("invalid syntax for .qn directive"));
2831 }
2832
2833 /* The .unreq directive deletes an alias which was previously defined
2834 by .req. For example:
2835
2836 my_alias .req r11
2837 .unreq my_alias */
2838
2839 static void
2840 s_unreq (int a ATTRIBUTE_UNUSED)
2841 {
2842 char * name;
2843 char saved_char;
2844
2845 name = input_line_pointer;
2846
2847 while (*input_line_pointer != 0
2848 && *input_line_pointer != ' '
2849 && *input_line_pointer != '\n')
2850 ++input_line_pointer;
2851
2852 saved_char = *input_line_pointer;
2853 *input_line_pointer = 0;
2854
2855 if (!*name)
2856 as_bad (_("invalid syntax for .unreq directive"));
2857 else
2858 {
2859 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2860 name);
2861
2862 if (!reg)
2863 as_bad (_("unknown register alias '%s'"), name);
2864 else if (reg->builtin)
2865 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2866 name);
2867 else
2868 {
2869 char * p;
2870 char * nbuf;
2871
2872 hash_delete (arm_reg_hsh, name, FALSE);
2873 free ((char *) reg->name);
2874 if (reg->neon)
2875 free (reg->neon);
2876 free (reg);
2877
2878 /* Also locate the all upper case and all lower case versions.
2879 Do not complain if we cannot find one or the other as it
2880 was probably deleted above. */
2881
2882 nbuf = strdup (name);
2883 for (p = nbuf; *p; p++)
2884 *p = TOUPPER (*p);
2885 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2886 if (reg)
2887 {
2888 hash_delete (arm_reg_hsh, nbuf, FALSE);
2889 free ((char *) reg->name);
2890 if (reg->neon)
2891 free (reg->neon);
2892 free (reg);
2893 }
2894
2895 for (p = nbuf; *p; p++)
2896 *p = TOLOWER (*p);
2897 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2898 if (reg)
2899 {
2900 hash_delete (arm_reg_hsh, nbuf, FALSE);
2901 free ((char *) reg->name);
2902 if (reg->neon)
2903 free (reg->neon);
2904 free (reg);
2905 }
2906
2907 free (nbuf);
2908 }
2909 }
2910
2911 *input_line_pointer = saved_char;
2912 demand_empty_rest_of_line ();
2913 }
2914
2915 /* Directives: Instruction set selection. */
2916
2917 #ifdef OBJ_ELF
2918 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2919 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2920 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2921 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2922
2923 /* Create a new mapping symbol for the transition to STATE. */
2924
2925 static void
2926 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2927 {
2928 symbolS * symbolP;
2929 const char * symname;
2930 int type;
2931
2932 switch (state)
2933 {
2934 case MAP_DATA:
2935 symname = "$d";
2936 type = BSF_NO_FLAGS;
2937 break;
2938 case MAP_ARM:
2939 symname = "$a";
2940 type = BSF_NO_FLAGS;
2941 break;
2942 case MAP_THUMB:
2943 symname = "$t";
2944 type = BSF_NO_FLAGS;
2945 break;
2946 default:
2947 abort ();
2948 }
2949
2950 symbolP = symbol_new (symname, now_seg, value, frag);
2951 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2952
2953 switch (state)
2954 {
2955 case MAP_ARM:
2956 THUMB_SET_FUNC (symbolP, 0);
2957 ARM_SET_THUMB (symbolP, 0);
2958 ARM_SET_INTERWORK (symbolP, support_interwork);
2959 break;
2960
2961 case MAP_THUMB:
2962 THUMB_SET_FUNC (symbolP, 1);
2963 ARM_SET_THUMB (symbolP, 1);
2964 ARM_SET_INTERWORK (symbolP, support_interwork);
2965 break;
2966
2967 case MAP_DATA:
2968 default:
2969 break;
2970 }
2971
2972 /* Save the mapping symbols for future reference. Also check that
2973 we do not place two mapping symbols at the same offset within a
2974 frag. We'll handle overlap between frags in
2975 check_mapping_symbols.
2976
2977 If .fill or other data filling directive generates zero sized data,
2978 the mapping symbol for the following code will have the same value
2979 as the one generated for the data filling directive. In this case,
2980 we replace the old symbol with the new one at the same address. */
2981 if (value == 0)
2982 {
2983 if (frag->tc_frag_data.first_map != NULL)
2984 {
2985 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2986 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2987 }
2988 frag->tc_frag_data.first_map = symbolP;
2989 }
2990 if (frag->tc_frag_data.last_map != NULL)
2991 {
2992 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2993 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2994 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2995 }
2996 frag->tc_frag_data.last_map = symbolP;
2997 }
2998
2999 /* We must sometimes convert a region marked as code to data during
3000 code alignment, if an odd number of bytes have to be padded. The
3001 code mapping symbol is pushed to an aligned address. */
3002
3003 static void
3004 insert_data_mapping_symbol (enum mstate state,
3005 valueT value, fragS *frag, offsetT bytes)
3006 {
3007 /* If there was already a mapping symbol, remove it. */
3008 if (frag->tc_frag_data.last_map != NULL
3009 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3010 {
3011 symbolS *symp = frag->tc_frag_data.last_map;
3012
3013 if (value == 0)
3014 {
3015 know (frag->tc_frag_data.first_map == symp);
3016 frag->tc_frag_data.first_map = NULL;
3017 }
3018 frag->tc_frag_data.last_map = NULL;
3019 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3020 }
3021
3022 make_mapping_symbol (MAP_DATA, value, frag);
3023 make_mapping_symbol (state, value + bytes, frag);
3024 }
3025
3026 static void mapping_state_2 (enum mstate state, int max_chars);
3027
3028 /* Set the mapping state to STATE. Only call this when about to
3029 emit some STATE bytes to the file. */
3030
3031 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
3032 void
3033 mapping_state (enum mstate state)
3034 {
3035 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3036
3037 if (mapstate == state)
3038 /* The mapping symbol has already been emitted.
3039 There is nothing else to do. */
3040 return;
3041
3042 if (state == MAP_ARM || state == MAP_THUMB)
3043 /* PR gas/12931
3044 All ARM instructions require 4-byte alignment.
3045 (Almost) all Thumb instructions require 2-byte alignment.
3046
3047 When emitting instructions into any section, mark the section
3048 appropriately.
3049
3050 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3051 but themselves require 2-byte alignment; this applies to some
3052 PC- relative forms. However, these cases will involve implicit
3053 literal pool generation or an explicit .align >=2, both of
3054 which will cause the section to me marked with sufficient
3055 alignment. Thus, we don't handle those cases here. */
3056 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3057
3058 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3059 /* This case will be evaluated later. */
3060 return;
3061
3062 mapping_state_2 (state, 0);
3063 }
3064
3065 /* Same as mapping_state, but MAX_CHARS bytes have already been
3066 allocated. Put the mapping symbol that far back. */
3067
3068 static void
3069 mapping_state_2 (enum mstate state, int max_chars)
3070 {
3071 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3072
3073 if (!SEG_NORMAL (now_seg))
3074 return;
3075
3076 if (mapstate == state)
3077 /* The mapping symbol has already been emitted.
3078 There is nothing else to do. */
3079 return;
3080
3081 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3082 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3083 {
3084 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3085 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3086
3087 if (add_symbol)
3088 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3089 }
3090
3091 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3092 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3093 }
3094 #undef TRANSITION
3095 #else
3096 #define mapping_state(x) ((void)0)
3097 #define mapping_state_2(x, y) ((void)0)
3098 #endif
3099
3100 /* Find the real, Thumb encoded start of a Thumb function. */
3101
3102 #ifdef OBJ_COFF
3103 static symbolS *
3104 find_real_start (symbolS * symbolP)
3105 {
3106 char * real_start;
3107 const char * name = S_GET_NAME (symbolP);
3108 symbolS * new_target;
3109
3110 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
3111 #define STUB_NAME ".real_start_of"
3112
3113 if (name == NULL)
3114 abort ();
3115
3116 /* The compiler may generate BL instructions to local labels because
3117 it needs to perform a branch to a far away location. These labels
3118 do not have a corresponding ".real_start_of" label. We check
3119 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3120 the ".real_start_of" convention for nonlocal branches. */
3121 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3122 return symbolP;
3123
3124 real_start = concat (STUB_NAME, name, NULL);
3125 new_target = symbol_find (real_start);
3126 free (real_start);
3127
3128 if (new_target == NULL)
3129 {
3130 as_warn (_("Failed to find real start of function: %s\n"), name);
3131 new_target = symbolP;
3132 }
3133
3134 return new_target;
3135 }
3136 #endif
3137
3138 static void
3139 opcode_select (int width)
3140 {
3141 switch (width)
3142 {
3143 case 16:
3144 if (! thumb_mode)
3145 {
3146 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3147 as_bad (_("selected processor does not support THUMB opcodes"));
3148
3149 thumb_mode = 1;
3150 /* No need to force the alignment, since we will have been
3151 coming from ARM mode, which is word-aligned. */
3152 record_alignment (now_seg, 1);
3153 }
3154 break;
3155
3156 case 32:
3157 if (thumb_mode)
3158 {
3159 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3160 as_bad (_("selected processor does not support ARM opcodes"));
3161
3162 thumb_mode = 0;
3163
3164 if (!need_pass_2)
3165 frag_align (2, 0, 0);
3166
3167 record_alignment (now_seg, 1);
3168 }
3169 break;
3170
3171 default:
3172 as_bad (_("invalid instruction size selected (%d)"), width);
3173 }
3174 }
3175
3176 static void
3177 s_arm (int ignore ATTRIBUTE_UNUSED)
3178 {
3179 opcode_select (32);
3180 demand_empty_rest_of_line ();
3181 }
3182
3183 static void
3184 s_thumb (int ignore ATTRIBUTE_UNUSED)
3185 {
3186 opcode_select (16);
3187 demand_empty_rest_of_line ();
3188 }
3189
3190 static void
3191 s_code (int unused ATTRIBUTE_UNUSED)
3192 {
3193 int temp;
3194
3195 temp = get_absolute_expression ();
3196 switch (temp)
3197 {
3198 case 16:
3199 case 32:
3200 opcode_select (temp);
3201 break;
3202
3203 default:
3204 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3205 }
3206 }
3207
3208 static void
3209 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3210 {
3211 /* If we are not already in thumb mode go into it, EVEN if
3212 the target processor does not support thumb instructions.
3213 This is used by gcc/config/arm/lib1funcs.asm for example
3214 to compile interworking support functions even if the
3215 target processor should not support interworking. */
3216 if (! thumb_mode)
3217 {
3218 thumb_mode = 2;
3219 record_alignment (now_seg, 1);
3220 }
3221
3222 demand_empty_rest_of_line ();
3223 }
3224
3225 static void
3226 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3227 {
3228 s_thumb (0);
3229
3230 /* The following label is the name/address of the start of a Thumb function.
3231 We need to know this for the interworking support. */
3232 label_is_thumb_function_name = TRUE;
3233 }
3234
3235 /* Perform a .set directive, but also mark the alias as
3236 being a thumb function. */
3237
3238 static void
3239 s_thumb_set (int equiv)
3240 {
3241 /* XXX the following is a duplicate of the code for s_set() in read.c
3242 We cannot just call that code as we need to get at the symbol that
3243 is created. */
3244 char * name;
3245 char delim;
3246 char * end_name;
3247 symbolS * symbolP;
3248
3249 /* Especial apologies for the random logic:
3250 This just grew, and could be parsed much more simply!
3251 Dean - in haste. */
3252 delim = get_symbol_name (& name);
3253 end_name = input_line_pointer;
3254 (void) restore_line_pointer (delim);
3255
3256 if (*input_line_pointer != ',')
3257 {
3258 *end_name = 0;
3259 as_bad (_("expected comma after name \"%s\""), name);
3260 *end_name = delim;
3261 ignore_rest_of_line ();
3262 return;
3263 }
3264
3265 input_line_pointer++;
3266 *end_name = 0;
3267
3268 if (name[0] == '.' && name[1] == '\0')
3269 {
3270 /* XXX - this should not happen to .thumb_set. */
3271 abort ();
3272 }
3273
3274 if ((symbolP = symbol_find (name)) == NULL
3275 && (symbolP = md_undefined_symbol (name)) == NULL)
3276 {
3277 #ifndef NO_LISTING
3278 /* When doing symbol listings, play games with dummy fragments living
3279 outside the normal fragment chain to record the file and line info
3280 for this symbol. */
3281 if (listing & LISTING_SYMBOLS)
3282 {
3283 extern struct list_info_struct * listing_tail;
3284 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3285
3286 memset (dummy_frag, 0, sizeof (fragS));
3287 dummy_frag->fr_type = rs_fill;
3288 dummy_frag->line = listing_tail;
3289 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3290 dummy_frag->fr_symbol = symbolP;
3291 }
3292 else
3293 #endif
3294 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3295
3296 #ifdef OBJ_COFF
3297 /* "set" symbols are local unless otherwise specified. */
3298 SF_SET_LOCAL (symbolP);
3299 #endif /* OBJ_COFF */
3300 } /* Make a new symbol. */
3301
3302 symbol_table_insert (symbolP);
3303
3304 * end_name = delim;
3305
3306 if (equiv
3307 && S_IS_DEFINED (symbolP)
3308 && S_GET_SEGMENT (symbolP) != reg_section)
3309 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3310
3311 pseudo_set (symbolP);
3312
3313 demand_empty_rest_of_line ();
3314
3315 /* XXX Now we come to the Thumb specific bit of code. */
3316
3317 THUMB_SET_FUNC (symbolP, 1);
3318 ARM_SET_THUMB (symbolP, 1);
3319 #if defined OBJ_ELF || defined OBJ_COFF
3320 ARM_SET_INTERWORK (symbolP, support_interwork);
3321 #endif
3322 }
3323
3324 /* Directives: Mode selection. */
3325
3326 /* .syntax [unified|divided] - choose the new unified syntax
3327 (same for Arm and Thumb encoding, modulo slight differences in what
3328 can be represented) or the old divergent syntax for each mode. */
3329 static void
3330 s_syntax (int unused ATTRIBUTE_UNUSED)
3331 {
3332 char *name, delim;
3333
3334 delim = get_symbol_name (& name);
3335
3336 if (!strcasecmp (name, "unified"))
3337 unified_syntax = TRUE;
3338 else if (!strcasecmp (name, "divided"))
3339 unified_syntax = FALSE;
3340 else
3341 {
3342 as_bad (_("unrecognized syntax mode \"%s\""), name);
3343 return;
3344 }
3345 (void) restore_line_pointer (delim);
3346 demand_empty_rest_of_line ();
3347 }
3348
3349 /* Directives: sectioning and alignment. */
3350
3351 static void
3352 s_bss (int ignore ATTRIBUTE_UNUSED)
3353 {
3354 /* We don't support putting frags in the BSS segment, we fake it by
3355 marking in_bss, then looking at s_skip for clues. */
3356 subseg_set (bss_section, 0);
3357 demand_empty_rest_of_line ();
3358
3359 #ifdef md_elf_section_change_hook
3360 md_elf_section_change_hook ();
3361 #endif
3362 }
3363
3364 static void
3365 s_even (int ignore ATTRIBUTE_UNUSED)
3366 {
3367 /* Never make frag if expect extra pass. */
3368 if (!need_pass_2)
3369 frag_align (1, 0, 0);
3370
3371 record_alignment (now_seg, 1);
3372
3373 demand_empty_rest_of_line ();
3374 }
3375
3376 /* Directives: CodeComposer Studio. */
3377
3378 /* .ref (for CodeComposer Studio syntax only). */
3379 static void
3380 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3381 {
3382 if (codecomposer_syntax)
3383 ignore_rest_of_line ();
3384 else
3385 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3386 }
3387
3388 /* If name is not NULL, then it is used for marking the beginning of a
3389 function, whereas if it is NULL then it means the function end. */
3390 static void
3391 asmfunc_debug (const char * name)
3392 {
3393 static const char * last_name = NULL;
3394
3395 if (name != NULL)
3396 {
3397 gas_assert (last_name == NULL);
3398 last_name = name;
3399
3400 if (debug_type == DEBUG_STABS)
3401 stabs_generate_asm_func (name, name);
3402 }
3403 else
3404 {
3405 gas_assert (last_name != NULL);
3406
3407 if (debug_type == DEBUG_STABS)
3408 stabs_generate_asm_endfunc (last_name, last_name);
3409
3410 last_name = NULL;
3411 }
3412 }
3413
3414 static void
3415 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3416 {
3417 if (codecomposer_syntax)
3418 {
3419 switch (asmfunc_state)
3420 {
3421 case OUTSIDE_ASMFUNC:
3422 asmfunc_state = WAITING_ASMFUNC_NAME;
3423 break;
3424
3425 case WAITING_ASMFUNC_NAME:
3426 as_bad (_(".asmfunc repeated."));
3427 break;
3428
3429 case WAITING_ENDASMFUNC:
3430 as_bad (_(".asmfunc without function."));
3431 break;
3432 }
3433 demand_empty_rest_of_line ();
3434 }
3435 else
3436 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3437 }
3438
3439 static void
3440 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3441 {
3442 if (codecomposer_syntax)
3443 {
3444 switch (asmfunc_state)
3445 {
3446 case OUTSIDE_ASMFUNC:
3447 as_bad (_(".endasmfunc without a .asmfunc."));
3448 break;
3449
3450 case WAITING_ASMFUNC_NAME:
3451 as_bad (_(".endasmfunc without function."));
3452 break;
3453
3454 case WAITING_ENDASMFUNC:
3455 asmfunc_state = OUTSIDE_ASMFUNC;
3456 asmfunc_debug (NULL);
3457 break;
3458 }
3459 demand_empty_rest_of_line ();
3460 }
3461 else
3462 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3463 }
3464
3465 static void
3466 s_ccs_def (int name)
3467 {
3468 if (codecomposer_syntax)
3469 s_globl (name);
3470 else
3471 as_bad (_(".def pseudo-op only available with -mccs flag."));
3472 }
3473
3474 /* Directives: Literal pools. */
3475
3476 static literal_pool *
3477 find_literal_pool (void)
3478 {
3479 literal_pool * pool;
3480
3481 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3482 {
3483 if (pool->section == now_seg
3484 && pool->sub_section == now_subseg)
3485 break;
3486 }
3487
3488 return pool;
3489 }
3490
3491 static literal_pool *
3492 find_or_make_literal_pool (void)
3493 {
3494 /* Next literal pool ID number. */
3495 static unsigned int latest_pool_num = 1;
3496 literal_pool * pool;
3497
3498 pool = find_literal_pool ();
3499
3500 if (pool == NULL)
3501 {
3502 /* Create a new pool. */
3503 pool = XNEW (literal_pool);
3504 if (! pool)
3505 return NULL;
3506
3507 pool->next_free_entry = 0;
3508 pool->section = now_seg;
3509 pool->sub_section = now_subseg;
3510 pool->next = list_of_pools;
3511 pool->symbol = NULL;
3512 pool->alignment = 2;
3513
3514 /* Add it to the list. */
3515 list_of_pools = pool;
3516 }
3517
3518 /* New pools, and emptied pools, will have a NULL symbol. */
3519 if (pool->symbol == NULL)
3520 {
3521 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3522 (valueT) 0, &zero_address_frag);
3523 pool->id = latest_pool_num ++;
3524 }
3525
3526 /* Done. */
3527 return pool;
3528 }
3529
3530 /* Add the literal in the global 'inst'
3531 structure to the relevant literal pool. */
3532
3533 static int
3534 add_to_lit_pool (unsigned int nbytes)
3535 {
3536 #define PADDING_SLOT 0x1
3537 #define LIT_ENTRY_SIZE_MASK 0xFF
3538 literal_pool * pool;
3539 unsigned int entry, pool_size = 0;
3540 bfd_boolean padding_slot_p = FALSE;
3541 unsigned imm1 = 0;
3542 unsigned imm2 = 0;
3543
3544 if (nbytes == 8)
3545 {
3546 imm1 = inst.operands[1].imm;
3547 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3548 : inst.relocs[0].exp.X_unsigned ? 0
3549 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3550 if (target_big_endian)
3551 {
3552 imm1 = imm2;
3553 imm2 = inst.operands[1].imm;
3554 }
3555 }
3556
3557 pool = find_or_make_literal_pool ();
3558
3559 /* Check if this literal value is already in the pool. */
3560 for (entry = 0; entry < pool->next_free_entry; entry ++)
3561 {
3562 if (nbytes == 4)
3563 {
3564 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3565 && (inst.relocs[0].exp.X_op == O_constant)
3566 && (pool->literals[entry].X_add_number
3567 == inst.relocs[0].exp.X_add_number)
3568 && (pool->literals[entry].X_md == nbytes)
3569 && (pool->literals[entry].X_unsigned
3570 == inst.relocs[0].exp.X_unsigned))
3571 break;
3572
3573 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3574 && (inst.relocs[0].exp.X_op == O_symbol)
3575 && (pool->literals[entry].X_add_number
3576 == inst.relocs[0].exp.X_add_number)
3577 && (pool->literals[entry].X_add_symbol
3578 == inst.relocs[0].exp.X_add_symbol)
3579 && (pool->literals[entry].X_op_symbol
3580 == inst.relocs[0].exp.X_op_symbol)
3581 && (pool->literals[entry].X_md == nbytes))
3582 break;
3583 }
3584 else if ((nbytes == 8)
3585 && !(pool_size & 0x7)
3586 && ((entry + 1) != pool->next_free_entry)
3587 && (pool->literals[entry].X_op == O_constant)
3588 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3589 && (pool->literals[entry].X_unsigned
3590 == inst.relocs[0].exp.X_unsigned)
3591 && (pool->literals[entry + 1].X_op == O_constant)
3592 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3593 && (pool->literals[entry + 1].X_unsigned
3594 == inst.relocs[0].exp.X_unsigned))
3595 break;
3596
3597 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3598 if (padding_slot_p && (nbytes == 4))
3599 break;
3600
3601 pool_size += 4;
3602 }
3603
3604 /* Do we need to create a new entry? */
3605 if (entry == pool->next_free_entry)
3606 {
3607 if (entry >= MAX_LITERAL_POOL_SIZE)
3608 {
3609 inst.error = _("literal pool overflow");
3610 return FAIL;
3611 }
3612
3613 if (nbytes == 8)
3614 {
3615 /* For 8-byte entries, we align to an 8-byte boundary,
3616 and split it into two 4-byte entries, because on 32-bit
3617 host, 8-byte constants are treated as big num, thus
3618 saved in "generic_bignum" which will be overwritten
3619 by later assignments.
3620
3621 We also need to make sure there is enough space for
3622 the split.
3623
3624 We also check to make sure the literal operand is a
3625 constant number. */
3626 if (!(inst.relocs[0].exp.X_op == O_constant
3627 || inst.relocs[0].exp.X_op == O_big))
3628 {
3629 inst.error = _("invalid type for literal pool");
3630 return FAIL;
3631 }
3632 else if (pool_size & 0x7)
3633 {
3634 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3635 {
3636 inst.error = _("literal pool overflow");
3637 return FAIL;
3638 }
3639
3640 pool->literals[entry] = inst.relocs[0].exp;
3641 pool->literals[entry].X_op = O_constant;
3642 pool->literals[entry].X_add_number = 0;
3643 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3644 pool->next_free_entry += 1;
3645 pool_size += 4;
3646 }
3647 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3648 {
3649 inst.error = _("literal pool overflow");
3650 return FAIL;
3651 }
3652
3653 pool->literals[entry] = inst.relocs[0].exp;
3654 pool->literals[entry].X_op = O_constant;
3655 pool->literals[entry].X_add_number = imm1;
3656 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3657 pool->literals[entry++].X_md = 4;
3658 pool->literals[entry] = inst.relocs[0].exp;
3659 pool->literals[entry].X_op = O_constant;
3660 pool->literals[entry].X_add_number = imm2;
3661 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3662 pool->literals[entry].X_md = 4;
3663 pool->alignment = 3;
3664 pool->next_free_entry += 1;
3665 }
3666 else
3667 {
3668 pool->literals[entry] = inst.relocs[0].exp;
3669 pool->literals[entry].X_md = 4;
3670 }
3671
3672 #ifdef OBJ_ELF
3673 /* PR ld/12974: Record the location of the first source line to reference
3674 this entry in the literal pool. If it turns out during linking that the
3675 symbol does not exist we will be able to give an accurate line number for
3676 the (first use of the) missing reference. */
3677 if (debug_type == DEBUG_DWARF2)
3678 dwarf2_where (pool->locs + entry);
3679 #endif
3680 pool->next_free_entry += 1;
3681 }
3682 else if (padding_slot_p)
3683 {
3684 pool->literals[entry] = inst.relocs[0].exp;
3685 pool->literals[entry].X_md = nbytes;
3686 }
3687
3688 inst.relocs[0].exp.X_op = O_symbol;
3689 inst.relocs[0].exp.X_add_number = pool_size;
3690 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3691
3692 return SUCCESS;
3693 }
3694
3695 bfd_boolean
3696 tc_start_label_without_colon (void)
3697 {
3698 bfd_boolean ret = TRUE;
3699
3700 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3701 {
3702 const char *label = input_line_pointer;
3703
3704 while (!is_end_of_line[(int) label[-1]])
3705 --label;
3706
3707 if (*label == '.')
3708 {
3709 as_bad (_("Invalid label '%s'"), label);
3710 ret = FALSE;
3711 }
3712
3713 asmfunc_debug (label);
3714
3715 asmfunc_state = WAITING_ENDASMFUNC;
3716 }
3717
3718 return ret;
3719 }
3720
3721 /* Can't use symbol_new here, so have to create a symbol and then at
3722 a later date assign it a value. That's what these functions do. */
3723
3724 static void
3725 symbol_locate (symbolS * symbolP,
3726 const char * name, /* It is copied, the caller can modify. */
3727 segT segment, /* Segment identifier (SEG_<something>). */
3728 valueT valu, /* Symbol value. */
3729 fragS * frag) /* Associated fragment. */
3730 {
3731 size_t name_length;
3732 char * preserved_copy_of_name;
3733
3734 name_length = strlen (name) + 1; /* +1 for \0. */
3735 obstack_grow (&notes, name, name_length);
3736 preserved_copy_of_name = (char *) obstack_finish (&notes);
3737
3738 #ifdef tc_canonicalize_symbol_name
3739 preserved_copy_of_name =
3740 tc_canonicalize_symbol_name (preserved_copy_of_name);
3741 #endif
3742
3743 S_SET_NAME (symbolP, preserved_copy_of_name);
3744
3745 S_SET_SEGMENT (symbolP, segment);
3746 S_SET_VALUE (symbolP, valu);
3747 symbol_clear_list_pointers (symbolP);
3748
3749 symbol_set_frag (symbolP, frag);
3750
3751 /* Link to end of symbol chain. */
3752 {
3753 extern int symbol_table_frozen;
3754
3755 if (symbol_table_frozen)
3756 abort ();
3757 }
3758
3759 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3760
3761 obj_symbol_new_hook (symbolP);
3762
3763 #ifdef tc_symbol_new_hook
3764 tc_symbol_new_hook (symbolP);
3765 #endif
3766
3767 #ifdef DEBUG_SYMS
3768 verify_symbol_chain (symbol_rootP, symbol_lastP);
3769 #endif /* DEBUG_SYMS */
3770 }
3771
3772 static void
3773 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3774 {
3775 unsigned int entry;
3776 literal_pool * pool;
3777 char sym_name[20];
3778
3779 pool = find_literal_pool ();
3780 if (pool == NULL
3781 || pool->symbol == NULL
3782 || pool->next_free_entry == 0)
3783 return;
3784
3785 /* Align pool as you have word accesses.
3786 Only make a frag if we have to. */
3787 if (!need_pass_2)
3788 frag_align (pool->alignment, 0, 0);
3789
3790 record_alignment (now_seg, 2);
3791
3792 #ifdef OBJ_ELF
3793 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3794 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3795 #endif
3796 sprintf (sym_name, "$$lit_\002%x", pool->id);
3797
3798 symbol_locate (pool->symbol, sym_name, now_seg,
3799 (valueT) frag_now_fix (), frag_now);
3800 symbol_table_insert (pool->symbol);
3801
3802 ARM_SET_THUMB (pool->symbol, thumb_mode);
3803
3804 #if defined OBJ_COFF || defined OBJ_ELF
3805 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3806 #endif
3807
3808 for (entry = 0; entry < pool->next_free_entry; entry ++)
3809 {
3810 #ifdef OBJ_ELF
3811 if (debug_type == DEBUG_DWARF2)
3812 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3813 #endif
3814 /* First output the expression in the instruction to the pool. */
3815 emit_expr (&(pool->literals[entry]),
3816 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3817 }
3818
3819 /* Mark the pool as empty. */
3820 pool->next_free_entry = 0;
3821 pool->symbol = NULL;
3822 }
3823
3824 #ifdef OBJ_ELF
3825 /* Forward declarations for functions below, in the MD interface
3826 section. */
3827 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3828 static valueT create_unwind_entry (int);
3829 static void start_unwind_section (const segT, int);
3830 static void add_unwind_opcode (valueT, int);
3831 static void flush_pending_unwind (void);
3832
3833 /* Directives: Data. */
3834
3835 static void
3836 s_arm_elf_cons (int nbytes)
3837 {
3838 expressionS exp;
3839
3840 #ifdef md_flush_pending_output
3841 md_flush_pending_output ();
3842 #endif
3843
3844 if (is_it_end_of_statement ())
3845 {
3846 demand_empty_rest_of_line ();
3847 return;
3848 }
3849
3850 #ifdef md_cons_align
3851 md_cons_align (nbytes);
3852 #endif
3853
3854 mapping_state (MAP_DATA);
3855 do
3856 {
3857 int reloc;
3858 char *base = input_line_pointer;
3859
3860 expression (& exp);
3861
3862 if (exp.X_op != O_symbol)
3863 emit_expr (&exp, (unsigned int) nbytes);
3864 else
3865 {
3866 char *before_reloc = input_line_pointer;
3867 reloc = parse_reloc (&input_line_pointer);
3868 if (reloc == -1)
3869 {
3870 as_bad (_("unrecognized relocation suffix"));
3871 ignore_rest_of_line ();
3872 return;
3873 }
3874 else if (reloc == BFD_RELOC_UNUSED)
3875 emit_expr (&exp, (unsigned int) nbytes);
3876 else
3877 {
3878 reloc_howto_type *howto = (reloc_howto_type *)
3879 bfd_reloc_type_lookup (stdoutput,
3880 (bfd_reloc_code_real_type) reloc);
3881 int size = bfd_get_reloc_size (howto);
3882
3883 if (reloc == BFD_RELOC_ARM_PLT32)
3884 {
3885 as_bad (_("(plt) is only valid on branch targets"));
3886 reloc = BFD_RELOC_UNUSED;
3887 size = 0;
3888 }
3889
3890 if (size > nbytes)
3891 as_bad (ngettext ("%s relocations do not fit in %d byte",
3892 "%s relocations do not fit in %d bytes",
3893 nbytes),
3894 howto->name, nbytes);
3895 else
3896 {
3897 /* We've parsed an expression stopping at O_symbol.
3898 But there may be more expression left now that we
3899 have parsed the relocation marker. Parse it again.
3900 XXX Surely there is a cleaner way to do this. */
3901 char *p = input_line_pointer;
3902 int offset;
3903 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3904
3905 memcpy (save_buf, base, input_line_pointer - base);
3906 memmove (base + (input_line_pointer - before_reloc),
3907 base, before_reloc - base);
3908
3909 input_line_pointer = base + (input_line_pointer-before_reloc);
3910 expression (&exp);
3911 memcpy (base, save_buf, p - base);
3912
3913 offset = nbytes - size;
3914 p = frag_more (nbytes);
3915 memset (p, 0, nbytes);
3916 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3917 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3918 free (save_buf);
3919 }
3920 }
3921 }
3922 }
3923 while (*input_line_pointer++ == ',');
3924
3925 /* Put terminator back into stream. */
3926 input_line_pointer --;
3927 demand_empty_rest_of_line ();
3928 }
3929
3930 /* Emit an expression containing a 32-bit thumb instruction.
3931 Implementation based on put_thumb32_insn. */
3932
3933 static void
3934 emit_thumb32_expr (expressionS * exp)
3935 {
3936 expressionS exp_high = *exp;
3937
3938 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3939 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3940 exp->X_add_number &= 0xffff;
3941 emit_expr (exp, (unsigned int) THUMB_SIZE);
3942 }
3943
3944 /* Guess the instruction size based on the opcode. */
3945
3946 static int
3947 thumb_insn_size (int opcode)
3948 {
3949 if ((unsigned int) opcode < 0xe800u)
3950 return 2;
3951 else if ((unsigned int) opcode >= 0xe8000000u)
3952 return 4;
3953 else
3954 return 0;
3955 }
3956
3957 static bfd_boolean
3958 emit_insn (expressionS *exp, int nbytes)
3959 {
3960 int size = 0;
3961
3962 if (exp->X_op == O_constant)
3963 {
3964 size = nbytes;
3965
3966 if (size == 0)
3967 size = thumb_insn_size (exp->X_add_number);
3968
3969 if (size != 0)
3970 {
3971 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3972 {
3973 as_bad (_(".inst.n operand too big. "\
3974 "Use .inst.w instead"));
3975 size = 0;
3976 }
3977 else
3978 {
3979 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3980 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3981 else
3982 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3983
3984 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3985 emit_thumb32_expr (exp);
3986 else
3987 emit_expr (exp, (unsigned int) size);
3988
3989 it_fsm_post_encode ();
3990 }
3991 }
3992 else
3993 as_bad (_("cannot determine Thumb instruction size. " \
3994 "Use .inst.n/.inst.w instead"));
3995 }
3996 else
3997 as_bad (_("constant expression required"));
3998
3999 return (size != 0);
4000 }
4001
4002 /* Like s_arm_elf_cons but do not use md_cons_align and
4003 set the mapping state to MAP_ARM/MAP_THUMB. */
4004
4005 static void
4006 s_arm_elf_inst (int nbytes)
4007 {
4008 if (is_it_end_of_statement ())
4009 {
4010 demand_empty_rest_of_line ();
4011 return;
4012 }
4013
4014 /* Calling mapping_state () here will not change ARM/THUMB,
4015 but will ensure not to be in DATA state. */
4016
4017 if (thumb_mode)
4018 mapping_state (MAP_THUMB);
4019 else
4020 {
4021 if (nbytes != 0)
4022 {
4023 as_bad (_("width suffixes are invalid in ARM mode"));
4024 ignore_rest_of_line ();
4025 return;
4026 }
4027
4028 nbytes = 4;
4029
4030 mapping_state (MAP_ARM);
4031 }
4032
4033 do
4034 {
4035 expressionS exp;
4036
4037 expression (& exp);
4038
4039 if (! emit_insn (& exp, nbytes))
4040 {
4041 ignore_rest_of_line ();
4042 return;
4043 }
4044 }
4045 while (*input_line_pointer++ == ',');
4046
4047 /* Put terminator back into stream. */
4048 input_line_pointer --;
4049 demand_empty_rest_of_line ();
4050 }
4051
4052 /* Parse a .rel31 directive. */
4053
4054 static void
4055 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4056 {
4057 expressionS exp;
4058 char *p;
4059 valueT highbit;
4060
4061 highbit = 0;
4062 if (*input_line_pointer == '1')
4063 highbit = 0x80000000;
4064 else if (*input_line_pointer != '0')
4065 as_bad (_("expected 0 or 1"));
4066
4067 input_line_pointer++;
4068 if (*input_line_pointer != ',')
4069 as_bad (_("missing comma"));
4070 input_line_pointer++;
4071
4072 #ifdef md_flush_pending_output
4073 md_flush_pending_output ();
4074 #endif
4075
4076 #ifdef md_cons_align
4077 md_cons_align (4);
4078 #endif
4079
4080 mapping_state (MAP_DATA);
4081
4082 expression (&exp);
4083
4084 p = frag_more (4);
4085 md_number_to_chars (p, highbit, 4);
4086 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4087 BFD_RELOC_ARM_PREL31);
4088
4089 demand_empty_rest_of_line ();
4090 }
4091
4092 /* Directives: AEABI stack-unwind tables. */
4093
4094 /* Parse an unwind_fnstart directive. Simply records the current location. */
4095
4096 static void
4097 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4098 {
4099 demand_empty_rest_of_line ();
4100 if (unwind.proc_start)
4101 {
4102 as_bad (_("duplicate .fnstart directive"));
4103 return;
4104 }
4105
4106 /* Mark the start of the function. */
4107 unwind.proc_start = expr_build_dot ();
4108
4109 /* Reset the rest of the unwind info. */
4110 unwind.opcode_count = 0;
4111 unwind.table_entry = NULL;
4112 unwind.personality_routine = NULL;
4113 unwind.personality_index = -1;
4114 unwind.frame_size = 0;
4115 unwind.fp_offset = 0;
4116 unwind.fp_reg = REG_SP;
4117 unwind.fp_used = 0;
4118 unwind.sp_restored = 0;
4119 }
4120
4121
4122 /* Parse a handlerdata directive. Creates the exception handling table entry
4123 for the function. */
4124
4125 static void
4126 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4127 {
4128 demand_empty_rest_of_line ();
4129 if (!unwind.proc_start)
4130 as_bad (MISSING_FNSTART);
4131
4132 if (unwind.table_entry)
4133 as_bad (_("duplicate .handlerdata directive"));
4134
4135 create_unwind_entry (1);
4136 }
4137
4138 /* Parse an unwind_fnend directive. Generates the index table entry. */
4139
4140 static void
4141 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4142 {
4143 long where;
4144 char *ptr;
4145 valueT val;
4146 unsigned int marked_pr_dependency;
4147
4148 demand_empty_rest_of_line ();
4149
4150 if (!unwind.proc_start)
4151 {
4152 as_bad (_(".fnend directive without .fnstart"));
4153 return;
4154 }
4155
4156 /* Add eh table entry. */
4157 if (unwind.table_entry == NULL)
4158 val = create_unwind_entry (0);
4159 else
4160 val = 0;
4161
4162 /* Add index table entry. This is two words. */
4163 start_unwind_section (unwind.saved_seg, 1);
4164 frag_align (2, 0, 0);
4165 record_alignment (now_seg, 2);
4166
4167 ptr = frag_more (8);
4168 memset (ptr, 0, 8);
4169 where = frag_now_fix () - 8;
4170
4171 /* Self relative offset of the function start. */
4172 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4173 BFD_RELOC_ARM_PREL31);
4174
4175 /* Indicate dependency on EHABI-defined personality routines to the
4176 linker, if it hasn't been done already. */
4177 marked_pr_dependency
4178 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4179 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4180 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4181 {
4182 static const char *const name[] =
4183 {
4184 "__aeabi_unwind_cpp_pr0",
4185 "__aeabi_unwind_cpp_pr1",
4186 "__aeabi_unwind_cpp_pr2"
4187 };
4188 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4189 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4190 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4191 |= 1 << unwind.personality_index;
4192 }
4193
4194 if (val)
4195 /* Inline exception table entry. */
4196 md_number_to_chars (ptr + 4, val, 4);
4197 else
4198 /* Self relative offset of the table entry. */
4199 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4200 BFD_RELOC_ARM_PREL31);
4201
4202 /* Restore the original section. */
4203 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4204
4205 unwind.proc_start = NULL;
4206 }
4207
4208
4209 /* Parse an unwind_cantunwind directive. */
4210
4211 static void
4212 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4213 {
4214 demand_empty_rest_of_line ();
4215 if (!unwind.proc_start)
4216 as_bad (MISSING_FNSTART);
4217
4218 if (unwind.personality_routine || unwind.personality_index != -1)
4219 as_bad (_("personality routine specified for cantunwind frame"));
4220
4221 unwind.personality_index = -2;
4222 }
4223
4224
4225 /* Parse a personalityindex directive. */
4226
4227 static void
4228 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4229 {
4230 expressionS exp;
4231
4232 if (!unwind.proc_start)
4233 as_bad (MISSING_FNSTART);
4234
4235 if (unwind.personality_routine || unwind.personality_index != -1)
4236 as_bad (_("duplicate .personalityindex directive"));
4237
4238 expression (&exp);
4239
4240 if (exp.X_op != O_constant
4241 || exp.X_add_number < 0 || exp.X_add_number > 15)
4242 {
4243 as_bad (_("bad personality routine number"));
4244 ignore_rest_of_line ();
4245 return;
4246 }
4247
4248 unwind.personality_index = exp.X_add_number;
4249
4250 demand_empty_rest_of_line ();
4251 }
4252
4253
4254 /* Parse a personality directive. */
4255
4256 static void
4257 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4258 {
4259 char *name, *p, c;
4260
4261 if (!unwind.proc_start)
4262 as_bad (MISSING_FNSTART);
4263
4264 if (unwind.personality_routine || unwind.personality_index != -1)
4265 as_bad (_("duplicate .personality directive"));
4266
4267 c = get_symbol_name (& name);
4268 p = input_line_pointer;
4269 if (c == '"')
4270 ++ input_line_pointer;
4271 unwind.personality_routine = symbol_find_or_make (name);
4272 *p = c;
4273 demand_empty_rest_of_line ();
4274 }
4275
4276
4277 /* Parse a directive saving core registers. */
4278
4279 static void
4280 s_arm_unwind_save_core (void)
4281 {
4282 valueT op;
4283 long range;
4284 int n;
4285
4286 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4287 if (range == FAIL)
4288 {
4289 as_bad (_("expected register list"));
4290 ignore_rest_of_line ();
4291 return;
4292 }
4293
4294 demand_empty_rest_of_line ();
4295
4296 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4297 into .unwind_save {..., sp...}. We aren't bothered about the value of
4298 ip because it is clobbered by calls. */
4299 if (unwind.sp_restored && unwind.fp_reg == 12
4300 && (range & 0x3000) == 0x1000)
4301 {
4302 unwind.opcode_count--;
4303 unwind.sp_restored = 0;
4304 range = (range | 0x2000) & ~0x1000;
4305 unwind.pending_offset = 0;
4306 }
4307
4308 /* Pop r4-r15. */
4309 if (range & 0xfff0)
4310 {
4311 /* See if we can use the short opcodes. These pop a block of up to 8
4312 registers starting with r4, plus maybe r14. */
4313 for (n = 0; n < 8; n++)
4314 {
4315 /* Break at the first non-saved register. */
4316 if ((range & (1 << (n + 4))) == 0)
4317 break;
4318 }
4319 /* See if there are any other bits set. */
4320 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4321 {
4322 /* Use the long form. */
4323 op = 0x8000 | ((range >> 4) & 0xfff);
4324 add_unwind_opcode (op, 2);
4325 }
4326 else
4327 {
4328 /* Use the short form. */
4329 if (range & 0x4000)
4330 op = 0xa8; /* Pop r14. */
4331 else
4332 op = 0xa0; /* Do not pop r14. */
4333 op |= (n - 1);
4334 add_unwind_opcode (op, 1);
4335 }
4336 }
4337
4338 /* Pop r0-r3. */
4339 if (range & 0xf)
4340 {
4341 op = 0xb100 | (range & 0xf);
4342 add_unwind_opcode (op, 2);
4343 }
4344
4345 /* Record the number of bytes pushed. */
4346 for (n = 0; n < 16; n++)
4347 {
4348 if (range & (1 << n))
4349 unwind.frame_size += 4;
4350 }
4351 }
4352
4353
4354 /* Parse a directive saving FPA registers. */
4355
4356 static void
4357 s_arm_unwind_save_fpa (int reg)
4358 {
4359 expressionS exp;
4360 int num_regs;
4361 valueT op;
4362
4363 /* Get Number of registers to transfer. */
4364 if (skip_past_comma (&input_line_pointer) != FAIL)
4365 expression (&exp);
4366 else
4367 exp.X_op = O_illegal;
4368
4369 if (exp.X_op != O_constant)
4370 {
4371 as_bad (_("expected , <constant>"));
4372 ignore_rest_of_line ();
4373 return;
4374 }
4375
4376 num_regs = exp.X_add_number;
4377
4378 if (num_regs < 1 || num_regs > 4)
4379 {
4380 as_bad (_("number of registers must be in the range [1:4]"));
4381 ignore_rest_of_line ();
4382 return;
4383 }
4384
4385 demand_empty_rest_of_line ();
4386
4387 if (reg == 4)
4388 {
4389 /* Short form. */
4390 op = 0xb4 | (num_regs - 1);
4391 add_unwind_opcode (op, 1);
4392 }
4393 else
4394 {
4395 /* Long form. */
4396 op = 0xc800 | (reg << 4) | (num_regs - 1);
4397 add_unwind_opcode (op, 2);
4398 }
4399 unwind.frame_size += num_regs * 12;
4400 }
4401
4402
4403 /* Parse a directive saving VFP registers for ARMv6 and above. */
4404
4405 static void
4406 s_arm_unwind_save_vfp_armv6 (void)
4407 {
4408 int count;
4409 unsigned int start;
4410 valueT op;
4411 int num_vfpv3_regs = 0;
4412 int num_regs_below_16;
4413 bfd_boolean partial_match;
4414
4415 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4416 &partial_match);
4417 if (count == FAIL)
4418 {
4419 as_bad (_("expected register list"));
4420 ignore_rest_of_line ();
4421 return;
4422 }
4423
4424 demand_empty_rest_of_line ();
4425
4426 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4427 than FSTMX/FLDMX-style ones). */
4428
4429 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4430 if (start >= 16)
4431 num_vfpv3_regs = count;
4432 else if (start + count > 16)
4433 num_vfpv3_regs = start + count - 16;
4434
4435 if (num_vfpv3_regs > 0)
4436 {
4437 int start_offset = start > 16 ? start - 16 : 0;
4438 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4439 add_unwind_opcode (op, 2);
4440 }
4441
4442 /* Generate opcode for registers numbered in the range 0 .. 15. */
4443 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4444 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4445 if (num_regs_below_16 > 0)
4446 {
4447 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4448 add_unwind_opcode (op, 2);
4449 }
4450
4451 unwind.frame_size += count * 8;
4452 }
4453
4454
4455 /* Parse a directive saving VFP registers for pre-ARMv6. */
4456
4457 static void
4458 s_arm_unwind_save_vfp (void)
4459 {
4460 int count;
4461 unsigned int reg;
4462 valueT op;
4463 bfd_boolean partial_match;
4464
4465 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4466 &partial_match);
4467 if (count == FAIL)
4468 {
4469 as_bad (_("expected register list"));
4470 ignore_rest_of_line ();
4471 return;
4472 }
4473
4474 demand_empty_rest_of_line ();
4475
4476 if (reg == 8)
4477 {
4478 /* Short form. */
4479 op = 0xb8 | (count - 1);
4480 add_unwind_opcode (op, 1);
4481 }
4482 else
4483 {
4484 /* Long form. */
4485 op = 0xb300 | (reg << 4) | (count - 1);
4486 add_unwind_opcode (op, 2);
4487 }
4488 unwind.frame_size += count * 8 + 4;
4489 }
4490
4491
4492 /* Parse a directive saving iWMMXt data registers. */
4493
4494 static void
4495 s_arm_unwind_save_mmxwr (void)
4496 {
4497 int reg;
4498 int hi_reg;
4499 int i;
4500 unsigned mask = 0;
4501 valueT op;
4502
4503 if (*input_line_pointer == '{')
4504 input_line_pointer++;
4505
4506 do
4507 {
4508 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4509
4510 if (reg == FAIL)
4511 {
4512 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4513 goto error;
4514 }
4515
4516 if (mask >> reg)
4517 as_tsktsk (_("register list not in ascending order"));
4518 mask |= 1 << reg;
4519
4520 if (*input_line_pointer == '-')
4521 {
4522 input_line_pointer++;
4523 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4524 if (hi_reg == FAIL)
4525 {
4526 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4527 goto error;
4528 }
4529 else if (reg >= hi_reg)
4530 {
4531 as_bad (_("bad register range"));
4532 goto error;
4533 }
4534 for (; reg < hi_reg; reg++)
4535 mask |= 1 << reg;
4536 }
4537 }
4538 while (skip_past_comma (&input_line_pointer) != FAIL);
4539
4540 skip_past_char (&input_line_pointer, '}');
4541
4542 demand_empty_rest_of_line ();
4543
4544 /* Generate any deferred opcodes because we're going to be looking at
4545 the list. */
4546 flush_pending_unwind ();
4547
4548 for (i = 0; i < 16; i++)
4549 {
4550 if (mask & (1 << i))
4551 unwind.frame_size += 8;
4552 }
4553
4554 /* Attempt to combine with a previous opcode. We do this because gcc
4555 likes to output separate unwind directives for a single block of
4556 registers. */
4557 if (unwind.opcode_count > 0)
4558 {
4559 i = unwind.opcodes[unwind.opcode_count - 1];
4560 if ((i & 0xf8) == 0xc0)
4561 {
4562 i &= 7;
4563 /* Only merge if the blocks are contiguous. */
4564 if (i < 6)
4565 {
4566 if ((mask & 0xfe00) == (1 << 9))
4567 {
4568 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4569 unwind.opcode_count--;
4570 }
4571 }
4572 else if (i == 6 && unwind.opcode_count >= 2)
4573 {
4574 i = unwind.opcodes[unwind.opcode_count - 2];
4575 reg = i >> 4;
4576 i &= 0xf;
4577
4578 op = 0xffff << (reg - 1);
4579 if (reg > 0
4580 && ((mask & op) == (1u << (reg - 1))))
4581 {
4582 op = (1 << (reg + i + 1)) - 1;
4583 op &= ~((1 << reg) - 1);
4584 mask |= op;
4585 unwind.opcode_count -= 2;
4586 }
4587 }
4588 }
4589 }
4590
4591 hi_reg = 15;
4592 /* We want to generate opcodes in the order the registers have been
4593 saved, ie. descending order. */
4594 for (reg = 15; reg >= -1; reg--)
4595 {
4596 /* Save registers in blocks. */
4597 if (reg < 0
4598 || !(mask & (1 << reg)))
4599 {
4600 /* We found an unsaved reg. Generate opcodes to save the
4601 preceding block. */
4602 if (reg != hi_reg)
4603 {
4604 if (reg == 9)
4605 {
4606 /* Short form. */
4607 op = 0xc0 | (hi_reg - 10);
4608 add_unwind_opcode (op, 1);
4609 }
4610 else
4611 {
4612 /* Long form. */
4613 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4614 add_unwind_opcode (op, 2);
4615 }
4616 }
4617 hi_reg = reg - 1;
4618 }
4619 }
4620
4621 return;
4622 error:
4623 ignore_rest_of_line ();
4624 }
4625
4626 static void
4627 s_arm_unwind_save_mmxwcg (void)
4628 {
4629 int reg;
4630 int hi_reg;
4631 unsigned mask = 0;
4632 valueT op;
4633
4634 if (*input_line_pointer == '{')
4635 input_line_pointer++;
4636
4637 skip_whitespace (input_line_pointer);
4638
4639 do
4640 {
4641 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4642
4643 if (reg == FAIL)
4644 {
4645 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4646 goto error;
4647 }
4648
4649 reg -= 8;
4650 if (mask >> reg)
4651 as_tsktsk (_("register list not in ascending order"));
4652 mask |= 1 << reg;
4653
4654 if (*input_line_pointer == '-')
4655 {
4656 input_line_pointer++;
4657 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4658 if (hi_reg == FAIL)
4659 {
4660 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4661 goto error;
4662 }
4663 else if (reg >= hi_reg)
4664 {
4665 as_bad (_("bad register range"));
4666 goto error;
4667 }
4668 for (; reg < hi_reg; reg++)
4669 mask |= 1 << reg;
4670 }
4671 }
4672 while (skip_past_comma (&input_line_pointer) != FAIL);
4673
4674 skip_past_char (&input_line_pointer, '}');
4675
4676 demand_empty_rest_of_line ();
4677
4678 /* Generate any deferred opcodes because we're going to be looking at
4679 the list. */
4680 flush_pending_unwind ();
4681
4682 for (reg = 0; reg < 16; reg++)
4683 {
4684 if (mask & (1 << reg))
4685 unwind.frame_size += 4;
4686 }
4687 op = 0xc700 | mask;
4688 add_unwind_opcode (op, 2);
4689 return;
4690 error:
4691 ignore_rest_of_line ();
4692 }
4693
4694
4695 /* Parse an unwind_save directive.
4696 If the argument is non-zero, this is a .vsave directive. */
4697
4698 static void
4699 s_arm_unwind_save (int arch_v6)
4700 {
4701 char *peek;
4702 struct reg_entry *reg;
4703 bfd_boolean had_brace = FALSE;
4704
4705 if (!unwind.proc_start)
4706 as_bad (MISSING_FNSTART);
4707
4708 /* Figure out what sort of save we have. */
4709 peek = input_line_pointer;
4710
4711 if (*peek == '{')
4712 {
4713 had_brace = TRUE;
4714 peek++;
4715 }
4716
4717 reg = arm_reg_parse_multi (&peek);
4718
4719 if (!reg)
4720 {
4721 as_bad (_("register expected"));
4722 ignore_rest_of_line ();
4723 return;
4724 }
4725
4726 switch (reg->type)
4727 {
4728 case REG_TYPE_FN:
4729 if (had_brace)
4730 {
4731 as_bad (_("FPA .unwind_save does not take a register list"));
4732 ignore_rest_of_line ();
4733 return;
4734 }
4735 input_line_pointer = peek;
4736 s_arm_unwind_save_fpa (reg->number);
4737 return;
4738
4739 case REG_TYPE_RN:
4740 s_arm_unwind_save_core ();
4741 return;
4742
4743 case REG_TYPE_VFD:
4744 if (arch_v6)
4745 s_arm_unwind_save_vfp_armv6 ();
4746 else
4747 s_arm_unwind_save_vfp ();
4748 return;
4749
4750 case REG_TYPE_MMXWR:
4751 s_arm_unwind_save_mmxwr ();
4752 return;
4753
4754 case REG_TYPE_MMXWCG:
4755 s_arm_unwind_save_mmxwcg ();
4756 return;
4757
4758 default:
4759 as_bad (_(".unwind_save does not support this kind of register"));
4760 ignore_rest_of_line ();
4761 }
4762 }
4763
4764
4765 /* Parse an unwind_movsp directive. */
4766
4767 static void
4768 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4769 {
4770 int reg;
4771 valueT op;
4772 int offset;
4773
4774 if (!unwind.proc_start)
4775 as_bad (MISSING_FNSTART);
4776
4777 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4778 if (reg == FAIL)
4779 {
4780 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4781 ignore_rest_of_line ();
4782 return;
4783 }
4784
4785 /* Optional constant. */
4786 if (skip_past_comma (&input_line_pointer) != FAIL)
4787 {
4788 if (immediate_for_directive (&offset) == FAIL)
4789 return;
4790 }
4791 else
4792 offset = 0;
4793
4794 demand_empty_rest_of_line ();
4795
4796 if (reg == REG_SP || reg == REG_PC)
4797 {
4798 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4799 return;
4800 }
4801
4802 if (unwind.fp_reg != REG_SP)
4803 as_bad (_("unexpected .unwind_movsp directive"));
4804
4805 /* Generate opcode to restore the value. */
4806 op = 0x90 | reg;
4807 add_unwind_opcode (op, 1);
4808
4809 /* Record the information for later. */
4810 unwind.fp_reg = reg;
4811 unwind.fp_offset = unwind.frame_size - offset;
4812 unwind.sp_restored = 1;
4813 }
4814
4815 /* Parse an unwind_pad directive. */
4816
4817 static void
4818 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4819 {
4820 int offset;
4821
4822 if (!unwind.proc_start)
4823 as_bad (MISSING_FNSTART);
4824
4825 if (immediate_for_directive (&offset) == FAIL)
4826 return;
4827
4828 if (offset & 3)
4829 {
4830 as_bad (_("stack increment must be multiple of 4"));
4831 ignore_rest_of_line ();
4832 return;
4833 }
4834
4835 /* Don't generate any opcodes, just record the details for later. */
4836 unwind.frame_size += offset;
4837 unwind.pending_offset += offset;
4838
4839 demand_empty_rest_of_line ();
4840 }
4841
4842 /* Parse an unwind_setfp directive. */
4843
4844 static void
4845 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4846 {
4847 int sp_reg;
4848 int fp_reg;
4849 int offset;
4850
4851 if (!unwind.proc_start)
4852 as_bad (MISSING_FNSTART);
4853
4854 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4855 if (skip_past_comma (&input_line_pointer) == FAIL)
4856 sp_reg = FAIL;
4857 else
4858 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4859
4860 if (fp_reg == FAIL || sp_reg == FAIL)
4861 {
4862 as_bad (_("expected <reg>, <reg>"));
4863 ignore_rest_of_line ();
4864 return;
4865 }
4866
4867 /* Optional constant. */
4868 if (skip_past_comma (&input_line_pointer) != FAIL)
4869 {
4870 if (immediate_for_directive (&offset) == FAIL)
4871 return;
4872 }
4873 else
4874 offset = 0;
4875
4876 demand_empty_rest_of_line ();
4877
4878 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4879 {
4880 as_bad (_("register must be either sp or set by a previous"
4881 "unwind_movsp directive"));
4882 return;
4883 }
4884
4885 /* Don't generate any opcodes, just record the information for later. */
4886 unwind.fp_reg = fp_reg;
4887 unwind.fp_used = 1;
4888 if (sp_reg == REG_SP)
4889 unwind.fp_offset = unwind.frame_size - offset;
4890 else
4891 unwind.fp_offset -= offset;
4892 }
4893
4894 /* Parse an unwind_raw directive. */
4895
4896 static void
4897 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4898 {
4899 expressionS exp;
4900 /* This is an arbitrary limit. */
4901 unsigned char op[16];
4902 int count;
4903
4904 if (!unwind.proc_start)
4905 as_bad (MISSING_FNSTART);
4906
4907 expression (&exp);
4908 if (exp.X_op == O_constant
4909 && skip_past_comma (&input_line_pointer) != FAIL)
4910 {
4911 unwind.frame_size += exp.X_add_number;
4912 expression (&exp);
4913 }
4914 else
4915 exp.X_op = O_illegal;
4916
4917 if (exp.X_op != O_constant)
4918 {
4919 as_bad (_("expected <offset>, <opcode>"));
4920 ignore_rest_of_line ();
4921 return;
4922 }
4923
4924 count = 0;
4925
4926 /* Parse the opcode. */
4927 for (;;)
4928 {
4929 if (count >= 16)
4930 {
4931 as_bad (_("unwind opcode too long"));
4932 ignore_rest_of_line ();
4933 }
4934 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4935 {
4936 as_bad (_("invalid unwind opcode"));
4937 ignore_rest_of_line ();
4938 return;
4939 }
4940 op[count++] = exp.X_add_number;
4941
4942 /* Parse the next byte. */
4943 if (skip_past_comma (&input_line_pointer) == FAIL)
4944 break;
4945
4946 expression (&exp);
4947 }
4948
4949 /* Add the opcode bytes in reverse order. */
4950 while (count--)
4951 add_unwind_opcode (op[count], 1);
4952
4953 demand_empty_rest_of_line ();
4954 }
4955
4956
4957 /* Parse a .eabi_attribute directive. */
4958
4959 static void
4960 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4961 {
4962 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4963
4964 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4965 attributes_set_explicitly[tag] = 1;
4966 }
4967
4968 /* Emit a tls fix for the symbol. */
4969
4970 static void
4971 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4972 {
4973 char *p;
4974 expressionS exp;
4975 #ifdef md_flush_pending_output
4976 md_flush_pending_output ();
4977 #endif
4978
4979 #ifdef md_cons_align
4980 md_cons_align (4);
4981 #endif
4982
4983 /* Since we're just labelling the code, there's no need to define a
4984 mapping symbol. */
4985 expression (&exp);
4986 p = obstack_next_free (&frchain_now->frch_obstack);
4987 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4988 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4989 : BFD_RELOC_ARM_TLS_DESCSEQ);
4990 }
4991 #endif /* OBJ_ELF */
4992
4993 static void s_arm_arch (int);
4994 static void s_arm_object_arch (int);
4995 static void s_arm_cpu (int);
4996 static void s_arm_fpu (int);
4997 static void s_arm_arch_extension (int);
4998
4999 #ifdef TE_PE
5000
5001 static void
5002 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5003 {
5004 expressionS exp;
5005
5006 do
5007 {
5008 expression (&exp);
5009 if (exp.X_op == O_symbol)
5010 exp.X_op = O_secrel;
5011
5012 emit_expr (&exp, 4);
5013 }
5014 while (*input_line_pointer++ == ',');
5015
5016 input_line_pointer--;
5017 demand_empty_rest_of_line ();
5018 }
5019 #endif /* TE_PE */
5020
5021 int
5022 arm_is_largest_exponent_ok (int precision)
5023 {
5024 /* precision == 1 ensures that this will only return
5025 true for 16 bit floats. */
5026 return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5027 }
5028
5029 static void
5030 set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5031 {
5032 char saved_char;
5033 char* name;
5034 enum fp_16bit_format new_format;
5035
5036 new_format = ARM_FP16_FORMAT_DEFAULT;
5037
5038 name = input_line_pointer;
5039 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5040 input_line_pointer++;
5041
5042 saved_char = *input_line_pointer;
5043 *input_line_pointer = 0;
5044
5045 if (strcasecmp (name, "ieee") == 0)
5046 new_format = ARM_FP16_FORMAT_IEEE;
5047 else if (strcasecmp (name, "alternative") == 0)
5048 new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5049 else
5050 {
5051 as_bad (_("unrecognised float16 format \"%s\""), name);
5052 goto cleanup;
5053 }
5054
5055 /* Only set fp16_format if it is still the default (aka not already
5056 been set yet). */
5057 if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5058 fp16_format = new_format;
5059 else
5060 {
5061 if (new_format != fp16_format)
5062 as_warn (_("float16 format cannot be set more than once, ignoring."));
5063 }
5064
5065 cleanup:
5066 *input_line_pointer = saved_char;
5067 ignore_rest_of_line ();
5068 }
5069
5070 /* This table describes all the machine specific pseudo-ops the assembler
5071 has to support. The fields are:
5072 pseudo-op name without dot
5073 function to call to execute this pseudo-op
5074 Integer arg to pass to the function. */
5075
5076 const pseudo_typeS md_pseudo_table[] =
5077 {
5078 /* Never called because '.req' does not start a line. */
5079 { "req", s_req, 0 },
5080 /* Following two are likewise never called. */
5081 { "dn", s_dn, 0 },
5082 { "qn", s_qn, 0 },
5083 { "unreq", s_unreq, 0 },
5084 { "bss", s_bss, 0 },
5085 { "align", s_align_ptwo, 2 },
5086 { "arm", s_arm, 0 },
5087 { "thumb", s_thumb, 0 },
5088 { "code", s_code, 0 },
5089 { "force_thumb", s_force_thumb, 0 },
5090 { "thumb_func", s_thumb_func, 0 },
5091 { "thumb_set", s_thumb_set, 0 },
5092 { "even", s_even, 0 },
5093 { "ltorg", s_ltorg, 0 },
5094 { "pool", s_ltorg, 0 },
5095 { "syntax", s_syntax, 0 },
5096 { "cpu", s_arm_cpu, 0 },
5097 { "arch", s_arm_arch, 0 },
5098 { "object_arch", s_arm_object_arch, 0 },
5099 { "fpu", s_arm_fpu, 0 },
5100 { "arch_extension", s_arm_arch_extension, 0 },
5101 #ifdef OBJ_ELF
5102 { "word", s_arm_elf_cons, 4 },
5103 { "long", s_arm_elf_cons, 4 },
5104 { "inst.n", s_arm_elf_inst, 2 },
5105 { "inst.w", s_arm_elf_inst, 4 },
5106 { "inst", s_arm_elf_inst, 0 },
5107 { "rel31", s_arm_rel31, 0 },
5108 { "fnstart", s_arm_unwind_fnstart, 0 },
5109 { "fnend", s_arm_unwind_fnend, 0 },
5110 { "cantunwind", s_arm_unwind_cantunwind, 0 },
5111 { "personality", s_arm_unwind_personality, 0 },
5112 { "personalityindex", s_arm_unwind_personalityindex, 0 },
5113 { "handlerdata", s_arm_unwind_handlerdata, 0 },
5114 { "save", s_arm_unwind_save, 0 },
5115 { "vsave", s_arm_unwind_save, 1 },
5116 { "movsp", s_arm_unwind_movsp, 0 },
5117 { "pad", s_arm_unwind_pad, 0 },
5118 { "setfp", s_arm_unwind_setfp, 0 },
5119 { "unwind_raw", s_arm_unwind_raw, 0 },
5120 { "eabi_attribute", s_arm_eabi_attribute, 0 },
5121 { "tlsdescseq", s_arm_tls_descseq, 0 },
5122 #else
5123 { "word", cons, 4},
5124
5125 /* These are used for dwarf. */
5126 {"2byte", cons, 2},
5127 {"4byte", cons, 4},
5128 {"8byte", cons, 8},
5129 /* These are used for dwarf2. */
5130 { "file", dwarf2_directive_file, 0 },
5131 { "loc", dwarf2_directive_loc, 0 },
5132 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5133 #endif
5134 { "extend", float_cons, 'x' },
5135 { "ldouble", float_cons, 'x' },
5136 { "packed", float_cons, 'p' },
5137 { "bfloat16", float_cons, 'b' },
5138 #ifdef TE_PE
5139 {"secrel32", pe_directive_secrel, 0},
5140 #endif
5141
5142 /* These are for compatibility with CodeComposer Studio. */
5143 {"ref", s_ccs_ref, 0},
5144 {"def", s_ccs_def, 0},
5145 {"asmfunc", s_ccs_asmfunc, 0},
5146 {"endasmfunc", s_ccs_endasmfunc, 0},
5147
5148 {"float16", float_cons, 'h' },
5149 {"float16_format", set_fp16_format, 0 },
5150
5151 { 0, 0, 0 }
5152 };
5153
5154 /* Parser functions used exclusively in instruction operands. */
5155
5156 /* Generic immediate-value read function for use in insn parsing.
5157 STR points to the beginning of the immediate (the leading #);
5158 VAL receives the value; if the value is outside [MIN, MAX]
5159 issue an error. PREFIX_OPT is true if the immediate prefix is
5160 optional. */
5161
5162 static int
5163 parse_immediate (char **str, int *val, int min, int max,
5164 bfd_boolean prefix_opt)
5165 {
5166 expressionS exp;
5167
5168 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5169 if (exp.X_op != O_constant)
5170 {
5171 inst.error = _("constant expression required");
5172 return FAIL;
5173 }
5174
5175 if (exp.X_add_number < min || exp.X_add_number > max)
5176 {
5177 inst.error = _("immediate value out of range");
5178 return FAIL;
5179 }
5180
5181 *val = exp.X_add_number;
5182 return SUCCESS;
5183 }
5184
5185 /* Less-generic immediate-value read function with the possibility of loading a
5186 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5187 instructions. Puts the result directly in inst.operands[i]. */
5188
5189 static int
5190 parse_big_immediate (char **str, int i, expressionS *in_exp,
5191 bfd_boolean allow_symbol_p)
5192 {
5193 expressionS exp;
5194 expressionS *exp_p = in_exp ? in_exp : &exp;
5195 char *ptr = *str;
5196
5197 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5198
5199 if (exp_p->X_op == O_constant)
5200 {
5201 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5202 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5203 O_constant. We have to be careful not to break compilation for
5204 32-bit X_add_number, though. */
5205 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5206 {
5207 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5208 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5209 & 0xffffffff);
5210 inst.operands[i].regisimm = 1;
5211 }
5212 }
5213 else if (exp_p->X_op == O_big
5214 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5215 {
5216 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5217
5218 /* Bignums have their least significant bits in
5219 generic_bignum[0]. Make sure we put 32 bits in imm and
5220 32 bits in reg, in a (hopefully) portable way. */
5221 gas_assert (parts != 0);
5222
5223 /* Make sure that the number is not too big.
5224 PR 11972: Bignums can now be sign-extended to the
5225 size of a .octa so check that the out of range bits
5226 are all zero or all one. */
5227 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5228 {
5229 LITTLENUM_TYPE m = -1;
5230
5231 if (generic_bignum[parts * 2] != 0
5232 && generic_bignum[parts * 2] != m)
5233 return FAIL;
5234
5235 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5236 if (generic_bignum[j] != generic_bignum[j-1])
5237 return FAIL;
5238 }
5239
5240 inst.operands[i].imm = 0;
5241 for (j = 0; j < parts; j++, idx++)
5242 inst.operands[i].imm |= generic_bignum[idx]
5243 << (LITTLENUM_NUMBER_OF_BITS * j);
5244 inst.operands[i].reg = 0;
5245 for (j = 0; j < parts; j++, idx++)
5246 inst.operands[i].reg |= generic_bignum[idx]
5247 << (LITTLENUM_NUMBER_OF_BITS * j);
5248 inst.operands[i].regisimm = 1;
5249 }
5250 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5251 return FAIL;
5252
5253 *str = ptr;
5254
5255 return SUCCESS;
5256 }
5257
5258 /* Returns the pseudo-register number of an FPA immediate constant,
5259 or FAIL if there isn't a valid constant here. */
5260
5261 static int
5262 parse_fpa_immediate (char ** str)
5263 {
5264 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5265 char * save_in;
5266 expressionS exp;
5267 int i;
5268 int j;
5269
5270 /* First try and match exact strings, this is to guarantee
5271 that some formats will work even for cross assembly. */
5272
5273 for (i = 0; fp_const[i]; i++)
5274 {
5275 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5276 {
5277 char *start = *str;
5278
5279 *str += strlen (fp_const[i]);
5280 if (is_end_of_line[(unsigned char) **str])
5281 return i + 8;
5282 *str = start;
5283 }
5284 }
5285
5286 /* Just because we didn't get a match doesn't mean that the constant
5287 isn't valid, just that it is in a format that we don't
5288 automatically recognize. Try parsing it with the standard
5289 expression routines. */
5290
5291 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5292
5293 /* Look for a raw floating point number. */
5294 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5295 && is_end_of_line[(unsigned char) *save_in])
5296 {
5297 for (i = 0; i < NUM_FLOAT_VALS; i++)
5298 {
5299 for (j = 0; j < MAX_LITTLENUMS; j++)
5300 {
5301 if (words[j] != fp_values[i][j])
5302 break;
5303 }
5304
5305 if (j == MAX_LITTLENUMS)
5306 {
5307 *str = save_in;
5308 return i + 8;
5309 }
5310 }
5311 }
5312
5313 /* Try and parse a more complex expression, this will probably fail
5314 unless the code uses a floating point prefix (eg "0f"). */
5315 save_in = input_line_pointer;
5316 input_line_pointer = *str;
5317 if (expression (&exp) == absolute_section
5318 && exp.X_op == O_big
5319 && exp.X_add_number < 0)
5320 {
5321 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5322 Ditto for 15. */
5323 #define X_PRECISION 5
5324 #define E_PRECISION 15L
5325 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5326 {
5327 for (i = 0; i < NUM_FLOAT_VALS; i++)
5328 {
5329 for (j = 0; j < MAX_LITTLENUMS; j++)
5330 {
5331 if (words[j] != fp_values[i][j])
5332 break;
5333 }
5334
5335 if (j == MAX_LITTLENUMS)
5336 {
5337 *str = input_line_pointer;
5338 input_line_pointer = save_in;
5339 return i + 8;
5340 }
5341 }
5342 }
5343 }
5344
5345 *str = input_line_pointer;
5346 input_line_pointer = save_in;
5347 inst.error = _("invalid FPA immediate expression");
5348 return FAIL;
5349 }
5350
5351 /* Returns 1 if a number has "quarter-precision" float format
5352 0baBbbbbbc defgh000 00000000 00000000. */
5353
5354 static int
5355 is_quarter_float (unsigned imm)
5356 {
5357 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5358 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5359 }
5360
5361
5362 /* Detect the presence of a floating point or integer zero constant,
5363 i.e. #0.0 or #0. */
5364
5365 static bfd_boolean
5366 parse_ifimm_zero (char **in)
5367 {
5368 int error_code;
5369
5370 if (!is_immediate_prefix (**in))
5371 {
5372 /* In unified syntax, all prefixes are optional. */
5373 if (!unified_syntax)
5374 return FALSE;
5375 }
5376 else
5377 ++*in;
5378
5379 /* Accept #0x0 as a synonym for #0. */
5380 if (strncmp (*in, "0x", 2) == 0)
5381 {
5382 int val;
5383 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5384 return FALSE;
5385 return TRUE;
5386 }
5387
5388 error_code = atof_generic (in, ".", EXP_CHARS,
5389 &generic_floating_point_number);
5390
5391 if (!error_code
5392 && generic_floating_point_number.sign == '+'
5393 && (generic_floating_point_number.low
5394 > generic_floating_point_number.leader))
5395 return TRUE;
5396
5397 return FALSE;
5398 }
5399
5400 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5401 0baBbbbbbc defgh000 00000000 00000000.
5402 The zero and minus-zero cases need special handling, since they can't be
5403 encoded in the "quarter-precision" float format, but can nonetheless be
5404 loaded as integer constants. */
5405
5406 static unsigned
5407 parse_qfloat_immediate (char **ccp, int *immed)
5408 {
5409 char *str = *ccp;
5410 char *fpnum;
5411 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5412 int found_fpchar = 0;
5413
5414 skip_past_char (&str, '#');
5415
5416 /* We must not accidentally parse an integer as a floating-point number. Make
5417 sure that the value we parse is not an integer by checking for special
5418 characters '.' or 'e'.
5419 FIXME: This is a horrible hack, but doing better is tricky because type
5420 information isn't in a very usable state at parse time. */
5421 fpnum = str;
5422 skip_whitespace (fpnum);
5423
5424 if (strncmp (fpnum, "0x", 2) == 0)
5425 return FAIL;
5426 else
5427 {
5428 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5429 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5430 {
5431 found_fpchar = 1;
5432 break;
5433 }
5434
5435 if (!found_fpchar)
5436 return FAIL;
5437 }
5438
5439 if ((str = atof_ieee (str, 's', words)) != NULL)
5440 {
5441 unsigned fpword = 0;
5442 int i;
5443
5444 /* Our FP word must be 32 bits (single-precision FP). */
5445 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5446 {
5447 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5448 fpword |= words[i];
5449 }
5450
5451 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5452 *immed = fpword;
5453 else
5454 return FAIL;
5455
5456 *ccp = str;
5457
5458 return SUCCESS;
5459 }
5460
5461 return FAIL;
5462 }
5463
5464 /* Shift operands. */
5465 enum shift_kind
5466 {
5467 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5468 };
5469
5470 struct asm_shift_name
5471 {
5472 const char *name;
5473 enum shift_kind kind;
5474 };
5475
5476 /* Third argument to parse_shift. */
5477 enum parse_shift_mode
5478 {
5479 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5480 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5481 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5482 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5483 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5484 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5485 };
5486
5487 /* Parse a <shift> specifier on an ARM data processing instruction.
5488 This has three forms:
5489
5490 (LSL|LSR|ASL|ASR|ROR) Rs
5491 (LSL|LSR|ASL|ASR|ROR) #imm
5492 RRX
5493
5494 Note that ASL is assimilated to LSL in the instruction encoding, and
5495 RRX to ROR #0 (which cannot be written as such). */
5496
5497 static int
5498 parse_shift (char **str, int i, enum parse_shift_mode mode)
5499 {
5500 const struct asm_shift_name *shift_name;
5501 enum shift_kind shift;
5502 char *s = *str;
5503 char *p = s;
5504 int reg;
5505
5506 for (p = *str; ISALPHA (*p); p++)
5507 ;
5508
5509 if (p == *str)
5510 {
5511 inst.error = _("shift expression expected");
5512 return FAIL;
5513 }
5514
5515 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5516 p - *str);
5517
5518 if (shift_name == NULL)
5519 {
5520 inst.error = _("shift expression expected");
5521 return FAIL;
5522 }
5523
5524 shift = shift_name->kind;
5525
5526 switch (mode)
5527 {
5528 case NO_SHIFT_RESTRICT:
5529 case SHIFT_IMMEDIATE:
5530 if (shift == SHIFT_UXTW)
5531 {
5532 inst.error = _("'UXTW' not allowed here");
5533 return FAIL;
5534 }
5535 break;
5536
5537 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5538 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5539 {
5540 inst.error = _("'LSL' or 'ASR' required");
5541 return FAIL;
5542 }
5543 break;
5544
5545 case SHIFT_LSL_IMMEDIATE:
5546 if (shift != SHIFT_LSL)
5547 {
5548 inst.error = _("'LSL' required");
5549 return FAIL;
5550 }
5551 break;
5552
5553 case SHIFT_ASR_IMMEDIATE:
5554 if (shift != SHIFT_ASR)
5555 {
5556 inst.error = _("'ASR' required");
5557 return FAIL;
5558 }
5559 break;
5560 case SHIFT_UXTW_IMMEDIATE:
5561 if (shift != SHIFT_UXTW)
5562 {
5563 inst.error = _("'UXTW' required");
5564 return FAIL;
5565 }
5566 break;
5567
5568 default: abort ();
5569 }
5570
5571 if (shift != SHIFT_RRX)
5572 {
5573 /* Whitespace can appear here if the next thing is a bare digit. */
5574 skip_whitespace (p);
5575
5576 if (mode == NO_SHIFT_RESTRICT
5577 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5578 {
5579 inst.operands[i].imm = reg;
5580 inst.operands[i].immisreg = 1;
5581 }
5582 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5583 return FAIL;
5584 }
5585 inst.operands[i].shift_kind = shift;
5586 inst.operands[i].shifted = 1;
5587 *str = p;
5588 return SUCCESS;
5589 }
5590
5591 /* Parse a <shifter_operand> for an ARM data processing instruction:
5592
5593 #<immediate>
5594 #<immediate>, <rotate>
5595 <Rm>
5596 <Rm>, <shift>
5597
5598 where <shift> is defined by parse_shift above, and <rotate> is a
5599 multiple of 2 between 0 and 30. Validation of immediate operands
5600 is deferred to md_apply_fix. */
5601
5602 static int
5603 parse_shifter_operand (char **str, int i)
5604 {
5605 int value;
5606 expressionS exp;
5607
5608 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5609 {
5610 inst.operands[i].reg = value;
5611 inst.operands[i].isreg = 1;
5612
5613 /* parse_shift will override this if appropriate */
5614 inst.relocs[0].exp.X_op = O_constant;
5615 inst.relocs[0].exp.X_add_number = 0;
5616
5617 if (skip_past_comma (str) == FAIL)
5618 return SUCCESS;
5619
5620 /* Shift operation on register. */
5621 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5622 }
5623
5624 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5625 return FAIL;
5626
5627 if (skip_past_comma (str) == SUCCESS)
5628 {
5629 /* #x, y -- ie explicit rotation by Y. */
5630 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5631 return FAIL;
5632
5633 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5634 {
5635 inst.error = _("constant expression expected");
5636 return FAIL;
5637 }
5638
5639 value = exp.X_add_number;
5640 if (value < 0 || value > 30 || value % 2 != 0)
5641 {
5642 inst.error = _("invalid rotation");
5643 return FAIL;
5644 }
5645 if (inst.relocs[0].exp.X_add_number < 0
5646 || inst.relocs[0].exp.X_add_number > 255)
5647 {
5648 inst.error = _("invalid constant");
5649 return FAIL;
5650 }
5651
5652 /* Encode as specified. */
5653 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5654 return SUCCESS;
5655 }
5656
5657 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5658 inst.relocs[0].pc_rel = 0;
5659 return SUCCESS;
5660 }
5661
5662 /* Group relocation information. Each entry in the table contains the
5663 textual name of the relocation as may appear in assembler source
5664 and must end with a colon.
5665 Along with this textual name are the relocation codes to be used if
5666 the corresponding instruction is an ALU instruction (ADD or SUB only),
5667 an LDR, an LDRS, or an LDC. */
5668
5669 struct group_reloc_table_entry
5670 {
5671 const char *name;
5672 int alu_code;
5673 int ldr_code;
5674 int ldrs_code;
5675 int ldc_code;
5676 };
5677
5678 typedef enum
5679 {
5680 /* Varieties of non-ALU group relocation. */
5681
5682 GROUP_LDR,
5683 GROUP_LDRS,
5684 GROUP_LDC,
5685 GROUP_MVE
5686 } group_reloc_type;
5687
5688 static struct group_reloc_table_entry group_reloc_table[] =
5689 { /* Program counter relative: */
5690 { "pc_g0_nc",
5691 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5692 0, /* LDR */
5693 0, /* LDRS */
5694 0 }, /* LDC */
5695 { "pc_g0",
5696 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5697 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5698 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5699 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5700 { "pc_g1_nc",
5701 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5702 0, /* LDR */
5703 0, /* LDRS */
5704 0 }, /* LDC */
5705 { "pc_g1",
5706 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5707 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5708 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5709 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5710 { "pc_g2",
5711 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5712 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5713 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5714 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5715 /* Section base relative */
5716 { "sb_g0_nc",
5717 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5718 0, /* LDR */
5719 0, /* LDRS */
5720 0 }, /* LDC */
5721 { "sb_g0",
5722 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5723 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5724 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5725 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5726 { "sb_g1_nc",
5727 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5728 0, /* LDR */
5729 0, /* LDRS */
5730 0 }, /* LDC */
5731 { "sb_g1",
5732 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5733 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5734 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5735 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5736 { "sb_g2",
5737 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5738 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5739 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5740 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5741 /* Absolute thumb alu relocations. */
5742 { "lower0_7",
5743 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5744 0, /* LDR. */
5745 0, /* LDRS. */
5746 0 }, /* LDC. */
5747 { "lower8_15",
5748 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5749 0, /* LDR. */
5750 0, /* LDRS. */
5751 0 }, /* LDC. */
5752 { "upper0_7",
5753 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5754 0, /* LDR. */
5755 0, /* LDRS. */
5756 0 }, /* LDC. */
5757 { "upper8_15",
5758 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5759 0, /* LDR. */
5760 0, /* LDRS. */
5761 0 } }; /* LDC. */
5762
5763 /* Given the address of a pointer pointing to the textual name of a group
5764 relocation as may appear in assembler source, attempt to find its details
5765 in group_reloc_table. The pointer will be updated to the character after
5766 the trailing colon. On failure, FAIL will be returned; SUCCESS
5767 otherwise. On success, *entry will be updated to point at the relevant
5768 group_reloc_table entry. */
5769
5770 static int
5771 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5772 {
5773 unsigned int i;
5774 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5775 {
5776 int length = strlen (group_reloc_table[i].name);
5777
5778 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5779 && (*str)[length] == ':')
5780 {
5781 *out = &group_reloc_table[i];
5782 *str += (length + 1);
5783 return SUCCESS;
5784 }
5785 }
5786
5787 return FAIL;
5788 }
5789
5790 /* Parse a <shifter_operand> for an ARM data processing instruction
5791 (as for parse_shifter_operand) where group relocations are allowed:
5792
5793 #<immediate>
5794 #<immediate>, <rotate>
5795 #:<group_reloc>:<expression>
5796 <Rm>
5797 <Rm>, <shift>
5798
5799 where <group_reloc> is one of the strings defined in group_reloc_table.
5800 The hashes are optional.
5801
5802 Everything else is as for parse_shifter_operand. */
5803
5804 static parse_operand_result
5805 parse_shifter_operand_group_reloc (char **str, int i)
5806 {
5807 /* Determine if we have the sequence of characters #: or just :
5808 coming next. If we do, then we check for a group relocation.
5809 If we don't, punt the whole lot to parse_shifter_operand. */
5810
5811 if (((*str)[0] == '#' && (*str)[1] == ':')
5812 || (*str)[0] == ':')
5813 {
5814 struct group_reloc_table_entry *entry;
5815
5816 if ((*str)[0] == '#')
5817 (*str) += 2;
5818 else
5819 (*str)++;
5820
5821 /* Try to parse a group relocation. Anything else is an error. */
5822 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5823 {
5824 inst.error = _("unknown group relocation");
5825 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5826 }
5827
5828 /* We now have the group relocation table entry corresponding to
5829 the name in the assembler source. Next, we parse the expression. */
5830 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5831 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5832
5833 /* Record the relocation type (always the ALU variant here). */
5834 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5835 gas_assert (inst.relocs[0].type != 0);
5836
5837 return PARSE_OPERAND_SUCCESS;
5838 }
5839 else
5840 return parse_shifter_operand (str, i) == SUCCESS
5841 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5842
5843 /* Never reached. */
5844 }
5845
5846 /* Parse a Neon alignment expression. Information is written to
5847 inst.operands[i]. We assume the initial ':' has been skipped.
5848
5849 align .imm = align << 8, .immisalign=1, .preind=0 */
5850 static parse_operand_result
5851 parse_neon_alignment (char **str, int i)
5852 {
5853 char *p = *str;
5854 expressionS exp;
5855
5856 my_get_expression (&exp, &p, GE_NO_PREFIX);
5857
5858 if (exp.X_op != O_constant)
5859 {
5860 inst.error = _("alignment must be constant");
5861 return PARSE_OPERAND_FAIL;
5862 }
5863
5864 inst.operands[i].imm = exp.X_add_number << 8;
5865 inst.operands[i].immisalign = 1;
5866 /* Alignments are not pre-indexes. */
5867 inst.operands[i].preind = 0;
5868
5869 *str = p;
5870 return PARSE_OPERAND_SUCCESS;
5871 }
5872
5873 /* Parse all forms of an ARM address expression. Information is written
5874 to inst.operands[i] and/or inst.relocs[0].
5875
5876 Preindexed addressing (.preind=1):
5877
5878 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5879 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5880 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5881 .shift_kind=shift .relocs[0].exp=shift_imm
5882
5883 These three may have a trailing ! which causes .writeback to be set also.
5884
5885 Postindexed addressing (.postind=1, .writeback=1):
5886
5887 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5888 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5889 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5890 .shift_kind=shift .relocs[0].exp=shift_imm
5891
5892 Unindexed addressing (.preind=0, .postind=0):
5893
5894 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5895
5896 Other:
5897
5898 [Rn]{!} shorthand for [Rn,#0]{!}
5899 =immediate .isreg=0 .relocs[0].exp=immediate
5900 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5901
5902 It is the caller's responsibility to check for addressing modes not
5903 supported by the instruction, and to set inst.relocs[0].type. */
5904
5905 static parse_operand_result
5906 parse_address_main (char **str, int i, int group_relocations,
5907 group_reloc_type group_type)
5908 {
5909 char *p = *str;
5910 int reg;
5911
5912 if (skip_past_char (&p, '[') == FAIL)
5913 {
5914 if (skip_past_char (&p, '=') == FAIL)
5915 {
5916 /* Bare address - translate to PC-relative offset. */
5917 inst.relocs[0].pc_rel = 1;
5918 inst.operands[i].reg = REG_PC;
5919 inst.operands[i].isreg = 1;
5920 inst.operands[i].preind = 1;
5921
5922 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5923 return PARSE_OPERAND_FAIL;
5924 }
5925 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5926 /*allow_symbol_p=*/TRUE))
5927 return PARSE_OPERAND_FAIL;
5928
5929 *str = p;
5930 return PARSE_OPERAND_SUCCESS;
5931 }
5932
5933 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5934 skip_whitespace (p);
5935
5936 if (group_type == GROUP_MVE)
5937 {
5938 enum arm_reg_type rtype = REG_TYPE_MQ;
5939 struct neon_type_el et;
5940 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5941 {
5942 inst.operands[i].isquad = 1;
5943 }
5944 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5945 {
5946 inst.error = BAD_ADDR_MODE;
5947 return PARSE_OPERAND_FAIL;
5948 }
5949 }
5950 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5951 {
5952 if (group_type == GROUP_MVE)
5953 inst.error = BAD_ADDR_MODE;
5954 else
5955 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5956 return PARSE_OPERAND_FAIL;
5957 }
5958 inst.operands[i].reg = reg;
5959 inst.operands[i].isreg = 1;
5960
5961 if (skip_past_comma (&p) == SUCCESS)
5962 {
5963 inst.operands[i].preind = 1;
5964
5965 if (*p == '+') p++;
5966 else if (*p == '-') p++, inst.operands[i].negative = 1;
5967
5968 enum arm_reg_type rtype = REG_TYPE_MQ;
5969 struct neon_type_el et;
5970 if (group_type == GROUP_MVE
5971 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5972 {
5973 inst.operands[i].immisreg = 2;
5974 inst.operands[i].imm = reg;
5975
5976 if (skip_past_comma (&p) == SUCCESS)
5977 {
5978 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
5979 {
5980 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
5981 inst.relocs[0].exp.X_add_number = 0;
5982 }
5983 else
5984 return PARSE_OPERAND_FAIL;
5985 }
5986 }
5987 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5988 {
5989 inst.operands[i].imm = reg;
5990 inst.operands[i].immisreg = 1;
5991
5992 if (skip_past_comma (&p) == SUCCESS)
5993 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5994 return PARSE_OPERAND_FAIL;
5995 }
5996 else if (skip_past_char (&p, ':') == SUCCESS)
5997 {
5998 /* FIXME: '@' should be used here, but it's filtered out by generic
5999 code before we get to see it here. This may be subject to
6000 change. */
6001 parse_operand_result result = parse_neon_alignment (&p, i);
6002
6003 if (result != PARSE_OPERAND_SUCCESS)
6004 return result;
6005 }
6006 else
6007 {
6008 if (inst.operands[i].negative)
6009 {
6010 inst.operands[i].negative = 0;
6011 p--;
6012 }
6013
6014 if (group_relocations
6015 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6016 {
6017 struct group_reloc_table_entry *entry;
6018
6019 /* Skip over the #: or : sequence. */
6020 if (*p == '#')
6021 p += 2;
6022 else
6023 p++;
6024
6025 /* Try to parse a group relocation. Anything else is an
6026 error. */
6027 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6028 {
6029 inst.error = _("unknown group relocation");
6030 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6031 }
6032
6033 /* We now have the group relocation table entry corresponding to
6034 the name in the assembler source. Next, we parse the
6035 expression. */
6036 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6037 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6038
6039 /* Record the relocation type. */
6040 switch (group_type)
6041 {
6042 case GROUP_LDR:
6043 inst.relocs[0].type
6044 = (bfd_reloc_code_real_type) entry->ldr_code;
6045 break;
6046
6047 case GROUP_LDRS:
6048 inst.relocs[0].type
6049 = (bfd_reloc_code_real_type) entry->ldrs_code;
6050 break;
6051
6052 case GROUP_LDC:
6053 inst.relocs[0].type
6054 = (bfd_reloc_code_real_type) entry->ldc_code;
6055 break;
6056
6057 default:
6058 gas_assert (0);
6059 }
6060
6061 if (inst.relocs[0].type == 0)
6062 {
6063 inst.error = _("this group relocation is not allowed on this instruction");
6064 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6065 }
6066 }
6067 else
6068 {
6069 char *q = p;
6070
6071 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6072 return PARSE_OPERAND_FAIL;
6073 /* If the offset is 0, find out if it's a +0 or -0. */
6074 if (inst.relocs[0].exp.X_op == O_constant
6075 && inst.relocs[0].exp.X_add_number == 0)
6076 {
6077 skip_whitespace (q);
6078 if (*q == '#')
6079 {
6080 q++;
6081 skip_whitespace (q);
6082 }
6083 if (*q == '-')
6084 inst.operands[i].negative = 1;
6085 }
6086 }
6087 }
6088 }
6089 else if (skip_past_char (&p, ':') == SUCCESS)
6090 {
6091 /* FIXME: '@' should be used here, but it's filtered out by generic code
6092 before we get to see it here. This may be subject to change. */
6093 parse_operand_result result = parse_neon_alignment (&p, i);
6094
6095 if (result != PARSE_OPERAND_SUCCESS)
6096 return result;
6097 }
6098
6099 if (skip_past_char (&p, ']') == FAIL)
6100 {
6101 inst.error = _("']' expected");
6102 return PARSE_OPERAND_FAIL;
6103 }
6104
6105 if (skip_past_char (&p, '!') == SUCCESS)
6106 inst.operands[i].writeback = 1;
6107
6108 else if (skip_past_comma (&p) == SUCCESS)
6109 {
6110 if (skip_past_char (&p, '{') == SUCCESS)
6111 {
6112 /* [Rn], {expr} - unindexed, with option */
6113 if (parse_immediate (&p, &inst.operands[i].imm,
6114 0, 255, TRUE) == FAIL)
6115 return PARSE_OPERAND_FAIL;
6116
6117 if (skip_past_char (&p, '}') == FAIL)
6118 {
6119 inst.error = _("'}' expected at end of 'option' field");
6120 return PARSE_OPERAND_FAIL;
6121 }
6122 if (inst.operands[i].preind)
6123 {
6124 inst.error = _("cannot combine index with option");
6125 return PARSE_OPERAND_FAIL;
6126 }
6127 *str = p;
6128 return PARSE_OPERAND_SUCCESS;
6129 }
6130 else
6131 {
6132 inst.operands[i].postind = 1;
6133 inst.operands[i].writeback = 1;
6134
6135 if (inst.operands[i].preind)
6136 {
6137 inst.error = _("cannot combine pre- and post-indexing");
6138 return PARSE_OPERAND_FAIL;
6139 }
6140
6141 if (*p == '+') p++;
6142 else if (*p == '-') p++, inst.operands[i].negative = 1;
6143
6144 enum arm_reg_type rtype = REG_TYPE_MQ;
6145 struct neon_type_el et;
6146 if (group_type == GROUP_MVE
6147 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6148 {
6149 inst.operands[i].immisreg = 2;
6150 inst.operands[i].imm = reg;
6151 }
6152 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6153 {
6154 /* We might be using the immediate for alignment already. If we
6155 are, OR the register number into the low-order bits. */
6156 if (inst.operands[i].immisalign)
6157 inst.operands[i].imm |= reg;
6158 else
6159 inst.operands[i].imm = reg;
6160 inst.operands[i].immisreg = 1;
6161
6162 if (skip_past_comma (&p) == SUCCESS)
6163 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6164 return PARSE_OPERAND_FAIL;
6165 }
6166 else
6167 {
6168 char *q = p;
6169
6170 if (inst.operands[i].negative)
6171 {
6172 inst.operands[i].negative = 0;
6173 p--;
6174 }
6175 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6176 return PARSE_OPERAND_FAIL;
6177 /* If the offset is 0, find out if it's a +0 or -0. */
6178 if (inst.relocs[0].exp.X_op == O_constant
6179 && inst.relocs[0].exp.X_add_number == 0)
6180 {
6181 skip_whitespace (q);
6182 if (*q == '#')
6183 {
6184 q++;
6185 skip_whitespace (q);
6186 }
6187 if (*q == '-')
6188 inst.operands[i].negative = 1;
6189 }
6190 }
6191 }
6192 }
6193
6194 /* If at this point neither .preind nor .postind is set, we have a
6195 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6196 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6197 {
6198 inst.operands[i].preind = 1;
6199 inst.relocs[0].exp.X_op = O_constant;
6200 inst.relocs[0].exp.X_add_number = 0;
6201 }
6202 *str = p;
6203 return PARSE_OPERAND_SUCCESS;
6204 }
6205
6206 static int
6207 parse_address (char **str, int i)
6208 {
6209 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6210 ? SUCCESS : FAIL;
6211 }
6212
6213 static parse_operand_result
6214 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6215 {
6216 return parse_address_main (str, i, 1, type);
6217 }
6218
6219 /* Parse an operand for a MOVW or MOVT instruction. */
6220 static int
6221 parse_half (char **str)
6222 {
6223 char * p;
6224
6225 p = *str;
6226 skip_past_char (&p, '#');
6227 if (strncasecmp (p, ":lower16:", 9) == 0)
6228 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6229 else if (strncasecmp (p, ":upper16:", 9) == 0)
6230 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6231
6232 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6233 {
6234 p += 9;
6235 skip_whitespace (p);
6236 }
6237
6238 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6239 return FAIL;
6240
6241 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6242 {
6243 if (inst.relocs[0].exp.X_op != O_constant)
6244 {
6245 inst.error = _("constant expression expected");
6246 return FAIL;
6247 }
6248 if (inst.relocs[0].exp.X_add_number < 0
6249 || inst.relocs[0].exp.X_add_number > 0xffff)
6250 {
6251 inst.error = _("immediate value out of range");
6252 return FAIL;
6253 }
6254 }
6255 *str = p;
6256 return SUCCESS;
6257 }
6258
6259 /* Miscellaneous. */
6260
6261 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6262 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6263 static int
6264 parse_psr (char **str, bfd_boolean lhs)
6265 {
6266 char *p;
6267 unsigned long psr_field;
6268 const struct asm_psr *psr;
6269 char *start;
6270 bfd_boolean is_apsr = FALSE;
6271 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6272
6273 /* PR gas/12698: If the user has specified -march=all then m_profile will
6274 be TRUE, but we want to ignore it in this case as we are building for any
6275 CPU type, including non-m variants. */
6276 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6277 m_profile = FALSE;
6278
6279 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6280 feature for ease of use and backwards compatibility. */
6281 p = *str;
6282 if (strncasecmp (p, "SPSR", 4) == 0)
6283 {
6284 if (m_profile)
6285 goto unsupported_psr;
6286
6287 psr_field = SPSR_BIT;
6288 }
6289 else if (strncasecmp (p, "CPSR", 4) == 0)
6290 {
6291 if (m_profile)
6292 goto unsupported_psr;
6293
6294 psr_field = 0;
6295 }
6296 else if (strncasecmp (p, "APSR", 4) == 0)
6297 {
6298 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6299 and ARMv7-R architecture CPUs. */
6300 is_apsr = TRUE;
6301 psr_field = 0;
6302 }
6303 else if (m_profile)
6304 {
6305 start = p;
6306 do
6307 p++;
6308 while (ISALNUM (*p) || *p == '_');
6309
6310 if (strncasecmp (start, "iapsr", 5) == 0
6311 || strncasecmp (start, "eapsr", 5) == 0
6312 || strncasecmp (start, "xpsr", 4) == 0
6313 || strncasecmp (start, "psr", 3) == 0)
6314 p = start + strcspn (start, "rR") + 1;
6315
6316 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
6317 p - start);
6318
6319 if (!psr)
6320 return FAIL;
6321
6322 /* If APSR is being written, a bitfield may be specified. Note that
6323 APSR itself is handled above. */
6324 if (psr->field <= 3)
6325 {
6326 psr_field = psr->field;
6327 is_apsr = TRUE;
6328 goto check_suffix;
6329 }
6330
6331 *str = p;
6332 /* M-profile MSR instructions have the mask field set to "10", except
6333 *PSR variants which modify APSR, which may use a different mask (and
6334 have been handled already). Do that by setting the PSR_f field
6335 here. */
6336 return psr->field | (lhs ? PSR_f : 0);
6337 }
6338 else
6339 goto unsupported_psr;
6340
6341 p += 4;
6342 check_suffix:
6343 if (*p == '_')
6344 {
6345 /* A suffix follows. */
6346 p++;
6347 start = p;
6348
6349 do
6350 p++;
6351 while (ISALNUM (*p) || *p == '_');
6352
6353 if (is_apsr)
6354 {
6355 /* APSR uses a notation for bits, rather than fields. */
6356 unsigned int nzcvq_bits = 0;
6357 unsigned int g_bit = 0;
6358 char *bit;
6359
6360 for (bit = start; bit != p; bit++)
6361 {
6362 switch (TOLOWER (*bit))
6363 {
6364 case 'n':
6365 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6366 break;
6367
6368 case 'z':
6369 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6370 break;
6371
6372 case 'c':
6373 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6374 break;
6375
6376 case 'v':
6377 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6378 break;
6379
6380 case 'q':
6381 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6382 break;
6383
6384 case 'g':
6385 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6386 break;
6387
6388 default:
6389 inst.error = _("unexpected bit specified after APSR");
6390 return FAIL;
6391 }
6392 }
6393
6394 if (nzcvq_bits == 0x1f)
6395 psr_field |= PSR_f;
6396
6397 if (g_bit == 0x1)
6398 {
6399 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6400 {
6401 inst.error = _("selected processor does not "
6402 "support DSP extension");
6403 return FAIL;
6404 }
6405
6406 psr_field |= PSR_s;
6407 }
6408
6409 if ((nzcvq_bits & 0x20) != 0
6410 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6411 || (g_bit & 0x2) != 0)
6412 {
6413 inst.error = _("bad bitmask specified after APSR");
6414 return FAIL;
6415 }
6416 }
6417 else
6418 {
6419 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6420 p - start);
6421 if (!psr)
6422 goto error;
6423
6424 psr_field |= psr->field;
6425 }
6426 }
6427 else
6428 {
6429 if (ISALNUM (*p))
6430 goto error; /* Garbage after "[CS]PSR". */
6431
6432 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6433 is deprecated, but allow it anyway. */
6434 if (is_apsr && lhs)
6435 {
6436 psr_field |= PSR_f;
6437 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6438 "deprecated"));
6439 }
6440 else if (!m_profile)
6441 /* These bits are never right for M-profile devices: don't set them
6442 (only code paths which read/write APSR reach here). */
6443 psr_field |= (PSR_c | PSR_f);
6444 }
6445 *str = p;
6446 return psr_field;
6447
6448 unsupported_psr:
6449 inst.error = _("selected processor does not support requested special "
6450 "purpose register");
6451 return FAIL;
6452
6453 error:
6454 inst.error = _("flag for {c}psr instruction expected");
6455 return FAIL;
6456 }
6457
6458 static int
6459 parse_sys_vldr_vstr (char **str)
6460 {
6461 unsigned i;
6462 int val = FAIL;
6463 struct {
6464 const char *name;
6465 int regl;
6466 int regh;
6467 } sysregs[] = {
6468 {"FPSCR", 0x1, 0x0},
6469 {"FPSCR_nzcvqc", 0x2, 0x0},
6470 {"VPR", 0x4, 0x1},
6471 {"P0", 0x5, 0x1},
6472 {"FPCXTNS", 0x6, 0x1},
6473 {"FPCXTS", 0x7, 0x1}
6474 };
6475 char *op_end = strchr (*str, ',');
6476 size_t op_strlen = op_end - *str;
6477
6478 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6479 {
6480 if (!strncmp (*str, sysregs[i].name, op_strlen))
6481 {
6482 val = sysregs[i].regl | (sysregs[i].regh << 3);
6483 *str = op_end;
6484 break;
6485 }
6486 }
6487
6488 return val;
6489 }
6490
6491 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6492 value suitable for splatting into the AIF field of the instruction. */
6493
6494 static int
6495 parse_cps_flags (char **str)
6496 {
6497 int val = 0;
6498 int saw_a_flag = 0;
6499 char *s = *str;
6500
6501 for (;;)
6502 switch (*s++)
6503 {
6504 case '\0': case ',':
6505 goto done;
6506
6507 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6508 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6509 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6510
6511 default:
6512 inst.error = _("unrecognized CPS flag");
6513 return FAIL;
6514 }
6515
6516 done:
6517 if (saw_a_flag == 0)
6518 {
6519 inst.error = _("missing CPS flags");
6520 return FAIL;
6521 }
6522
6523 *str = s - 1;
6524 return val;
6525 }
6526
6527 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6528 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6529
6530 static int
6531 parse_endian_specifier (char **str)
6532 {
6533 int little_endian;
6534 char *s = *str;
6535
6536 if (strncasecmp (s, "BE", 2))
6537 little_endian = 0;
6538 else if (strncasecmp (s, "LE", 2))
6539 little_endian = 1;
6540 else
6541 {
6542 inst.error = _("valid endian specifiers are be or le");
6543 return FAIL;
6544 }
6545
6546 if (ISALNUM (s[2]) || s[2] == '_')
6547 {
6548 inst.error = _("valid endian specifiers are be or le");
6549 return FAIL;
6550 }
6551
6552 *str = s + 2;
6553 return little_endian;
6554 }
6555
6556 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6557 value suitable for poking into the rotate field of an sxt or sxta
6558 instruction, or FAIL on error. */
6559
6560 static int
6561 parse_ror (char **str)
6562 {
6563 int rot;
6564 char *s = *str;
6565
6566 if (strncasecmp (s, "ROR", 3) == 0)
6567 s += 3;
6568 else
6569 {
6570 inst.error = _("missing rotation field after comma");
6571 return FAIL;
6572 }
6573
6574 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6575 return FAIL;
6576
6577 switch (rot)
6578 {
6579 case 0: *str = s; return 0x0;
6580 case 8: *str = s; return 0x1;
6581 case 16: *str = s; return 0x2;
6582 case 24: *str = s; return 0x3;
6583
6584 default:
6585 inst.error = _("rotation can only be 0, 8, 16, or 24");
6586 return FAIL;
6587 }
6588 }
6589
6590 /* Parse a conditional code (from conds[] below). The value returned is in the
6591 range 0 .. 14, or FAIL. */
6592 static int
6593 parse_cond (char **str)
6594 {
6595 char *q;
6596 const struct asm_cond *c;
6597 int n;
6598 /* Condition codes are always 2 characters, so matching up to
6599 3 characters is sufficient. */
6600 char cond[3];
6601
6602 q = *str;
6603 n = 0;
6604 while (ISALPHA (*q) && n < 3)
6605 {
6606 cond[n] = TOLOWER (*q);
6607 q++;
6608 n++;
6609 }
6610
6611 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6612 if (!c)
6613 {
6614 inst.error = _("condition required");
6615 return FAIL;
6616 }
6617
6618 *str = q;
6619 return c->value;
6620 }
6621
6622 /* Parse an option for a barrier instruction. Returns the encoding for the
6623 option, or FAIL. */
6624 static int
6625 parse_barrier (char **str)
6626 {
6627 char *p, *q;
6628 const struct asm_barrier_opt *o;
6629
6630 p = q = *str;
6631 while (ISALPHA (*q))
6632 q++;
6633
6634 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6635 q - p);
6636 if (!o)
6637 return FAIL;
6638
6639 if (!mark_feature_used (&o->arch))
6640 return FAIL;
6641
6642 *str = q;
6643 return o->value;
6644 }
6645
6646 /* Parse the operands of a table branch instruction. Similar to a memory
6647 operand. */
6648 static int
6649 parse_tb (char **str)
6650 {
6651 char * p = *str;
6652 int reg;
6653
6654 if (skip_past_char (&p, '[') == FAIL)
6655 {
6656 inst.error = _("'[' expected");
6657 return FAIL;
6658 }
6659
6660 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6661 {
6662 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6663 return FAIL;
6664 }
6665 inst.operands[0].reg = reg;
6666
6667 if (skip_past_comma (&p) == FAIL)
6668 {
6669 inst.error = _("',' expected");
6670 return FAIL;
6671 }
6672
6673 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6674 {
6675 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6676 return FAIL;
6677 }
6678 inst.operands[0].imm = reg;
6679
6680 if (skip_past_comma (&p) == SUCCESS)
6681 {
6682 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6683 return FAIL;
6684 if (inst.relocs[0].exp.X_add_number != 1)
6685 {
6686 inst.error = _("invalid shift");
6687 return FAIL;
6688 }
6689 inst.operands[0].shifted = 1;
6690 }
6691
6692 if (skip_past_char (&p, ']') == FAIL)
6693 {
6694 inst.error = _("']' expected");
6695 return FAIL;
6696 }
6697 *str = p;
6698 return SUCCESS;
6699 }
6700
6701 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6702 information on the types the operands can take and how they are encoded.
6703 Up to four operands may be read; this function handles setting the
6704 ".present" field for each read operand itself.
6705 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6706 else returns FAIL. */
6707
6708 static int
6709 parse_neon_mov (char **str, int *which_operand)
6710 {
6711 int i = *which_operand, val;
6712 enum arm_reg_type rtype;
6713 char *ptr = *str;
6714 struct neon_type_el optype;
6715
6716 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6717 {
6718 /* Cases 17 or 19. */
6719 inst.operands[i].reg = val;
6720 inst.operands[i].isvec = 1;
6721 inst.operands[i].isscalar = 2;
6722 inst.operands[i].vectype = optype;
6723 inst.operands[i++].present = 1;
6724
6725 if (skip_past_comma (&ptr) == FAIL)
6726 goto wanted_comma;
6727
6728 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6729 {
6730 /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */
6731 inst.operands[i].reg = val;
6732 inst.operands[i].isreg = 1;
6733 inst.operands[i].present = 1;
6734 }
6735 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6736 {
6737 /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */
6738 inst.operands[i].reg = val;
6739 inst.operands[i].isvec = 1;
6740 inst.operands[i].isscalar = 2;
6741 inst.operands[i].vectype = optype;
6742 inst.operands[i++].present = 1;
6743
6744 if (skip_past_comma (&ptr) == FAIL)
6745 goto wanted_comma;
6746
6747 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6748 goto wanted_arm;
6749
6750 inst.operands[i].reg = val;
6751 inst.operands[i].isreg = 1;
6752 inst.operands[i++].present = 1;
6753
6754 if (skip_past_comma (&ptr) == FAIL)
6755 goto wanted_comma;
6756
6757 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6758 goto wanted_arm;
6759
6760 inst.operands[i].reg = val;
6761 inst.operands[i].isreg = 1;
6762 inst.operands[i].present = 1;
6763 }
6764 else
6765 {
6766 first_error (_("expected ARM or MVE vector register"));
6767 return FAIL;
6768 }
6769 }
6770 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6771 {
6772 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6773 inst.operands[i].reg = val;
6774 inst.operands[i].isscalar = 1;
6775 inst.operands[i].vectype = optype;
6776 inst.operands[i++].present = 1;
6777
6778 if (skip_past_comma (&ptr) == FAIL)
6779 goto wanted_comma;
6780
6781 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6782 goto wanted_arm;
6783
6784 inst.operands[i].reg = val;
6785 inst.operands[i].isreg = 1;
6786 inst.operands[i].present = 1;
6787 }
6788 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6789 != FAIL)
6790 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6791 != FAIL))
6792 {
6793 /* Cases 0, 1, 2, 3, 5 (D only). */
6794 if (skip_past_comma (&ptr) == FAIL)
6795 goto wanted_comma;
6796
6797 inst.operands[i].reg = val;
6798 inst.operands[i].isreg = 1;
6799 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6800 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6801 inst.operands[i].isvec = 1;
6802 inst.operands[i].vectype = optype;
6803 inst.operands[i++].present = 1;
6804
6805 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6806 {
6807 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6808 Case 13: VMOV <Sd>, <Rm> */
6809 inst.operands[i].reg = val;
6810 inst.operands[i].isreg = 1;
6811 inst.operands[i].present = 1;
6812
6813 if (rtype == REG_TYPE_NQ)
6814 {
6815 first_error (_("can't use Neon quad register here"));
6816 return FAIL;
6817 }
6818 else if (rtype != REG_TYPE_VFS)
6819 {
6820 i++;
6821 if (skip_past_comma (&ptr) == FAIL)
6822 goto wanted_comma;
6823 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6824 goto wanted_arm;
6825 inst.operands[i].reg = val;
6826 inst.operands[i].isreg = 1;
6827 inst.operands[i].present = 1;
6828 }
6829 }
6830 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6831 &optype)) != FAIL)
6832 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6833 &optype)) != FAIL))
6834 {
6835 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6836 Case 1: VMOV<c><q> <Dd>, <Dm>
6837 Case 8: VMOV.F32 <Sd>, <Sm>
6838 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6839
6840 inst.operands[i].reg = val;
6841 inst.operands[i].isreg = 1;
6842 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6843 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6844 inst.operands[i].isvec = 1;
6845 inst.operands[i].vectype = optype;
6846 inst.operands[i].present = 1;
6847
6848 if (skip_past_comma (&ptr) == SUCCESS)
6849 {
6850 /* Case 15. */
6851 i++;
6852
6853 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6854 goto wanted_arm;
6855
6856 inst.operands[i].reg = val;
6857 inst.operands[i].isreg = 1;
6858 inst.operands[i++].present = 1;
6859
6860 if (skip_past_comma (&ptr) == FAIL)
6861 goto wanted_comma;
6862
6863 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6864 goto wanted_arm;
6865
6866 inst.operands[i].reg = val;
6867 inst.operands[i].isreg = 1;
6868 inst.operands[i].present = 1;
6869 }
6870 }
6871 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6872 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6873 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6874 Case 10: VMOV.F32 <Sd>, #<imm>
6875 Case 11: VMOV.F64 <Dd>, #<imm> */
6876 inst.operands[i].immisfloat = 1;
6877 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6878 == SUCCESS)
6879 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6880 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6881 ;
6882 else
6883 {
6884 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6885 return FAIL;
6886 }
6887 }
6888 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6889 {
6890 /* Cases 6, 7, 16, 18. */
6891 inst.operands[i].reg = val;
6892 inst.operands[i].isreg = 1;
6893 inst.operands[i++].present = 1;
6894
6895 if (skip_past_comma (&ptr) == FAIL)
6896 goto wanted_comma;
6897
6898 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6899 {
6900 /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */
6901 inst.operands[i].reg = val;
6902 inst.operands[i].isscalar = 2;
6903 inst.operands[i].present = 1;
6904 inst.operands[i].vectype = optype;
6905 }
6906 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6907 {
6908 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6909 inst.operands[i].reg = val;
6910 inst.operands[i].isscalar = 1;
6911 inst.operands[i].present = 1;
6912 inst.operands[i].vectype = optype;
6913 }
6914 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6915 {
6916 inst.operands[i].reg = val;
6917 inst.operands[i].isreg = 1;
6918 inst.operands[i++].present = 1;
6919
6920 if (skip_past_comma (&ptr) == FAIL)
6921 goto wanted_comma;
6922
6923 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6924 != FAIL)
6925 {
6926 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6927
6928 inst.operands[i].reg = val;
6929 inst.operands[i].isreg = 1;
6930 inst.operands[i].isvec = 1;
6931 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6932 inst.operands[i].vectype = optype;
6933 inst.operands[i].present = 1;
6934
6935 if (rtype == REG_TYPE_VFS)
6936 {
6937 /* Case 14. */
6938 i++;
6939 if (skip_past_comma (&ptr) == FAIL)
6940 goto wanted_comma;
6941 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6942 &optype)) == FAIL)
6943 {
6944 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6945 return FAIL;
6946 }
6947 inst.operands[i].reg = val;
6948 inst.operands[i].isreg = 1;
6949 inst.operands[i].isvec = 1;
6950 inst.operands[i].issingle = 1;
6951 inst.operands[i].vectype = optype;
6952 inst.operands[i].present = 1;
6953 }
6954 }
6955 else
6956 {
6957 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6958 != FAIL)
6959 {
6960 /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */
6961 inst.operands[i].reg = val;
6962 inst.operands[i].isvec = 1;
6963 inst.operands[i].isscalar = 2;
6964 inst.operands[i].vectype = optype;
6965 inst.operands[i++].present = 1;
6966
6967 if (skip_past_comma (&ptr) == FAIL)
6968 goto wanted_comma;
6969
6970 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6971 == FAIL)
6972 {
6973 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
6974 return FAIL;
6975 }
6976 inst.operands[i].reg = val;
6977 inst.operands[i].isvec = 1;
6978 inst.operands[i].isscalar = 2;
6979 inst.operands[i].vectype = optype;
6980 inst.operands[i].present = 1;
6981 }
6982 else
6983 {
6984 first_error (_("VFP single, double or MVE vector register"
6985 " expected"));
6986 return FAIL;
6987 }
6988 }
6989 }
6990 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6991 != FAIL)
6992 {
6993 /* Case 13. */
6994 inst.operands[i].reg = val;
6995 inst.operands[i].isreg = 1;
6996 inst.operands[i].isvec = 1;
6997 inst.operands[i].issingle = 1;
6998 inst.operands[i].vectype = optype;
6999 inst.operands[i].present = 1;
7000 }
7001 }
7002 else
7003 {
7004 first_error (_("parse error"));
7005 return FAIL;
7006 }
7007
7008 /* Successfully parsed the operands. Update args. */
7009 *which_operand = i;
7010 *str = ptr;
7011 return SUCCESS;
7012
7013 wanted_comma:
7014 first_error (_("expected comma"));
7015 return FAIL;
7016
7017 wanted_arm:
7018 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7019 return FAIL;
7020 }
7021
7022 /* Use this macro when the operand constraints are different
7023 for ARM and THUMB (e.g. ldrd). */
7024 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7025 ((arm_operand) | ((thumb_operand) << 16))
7026
7027 /* Matcher codes for parse_operands. */
7028 enum operand_parse_code
7029 {
7030 OP_stop, /* end of line */
7031
7032 OP_RR, /* ARM register */
7033 OP_RRnpc, /* ARM register, not r15 */
7034 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7035 OP_RRnpcb, /* ARM register, not r15, in square brackets */
7036 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
7037 optional trailing ! */
7038 OP_RRw, /* ARM register, not r15, optional trailing ! */
7039 OP_RCP, /* Coprocessor number */
7040 OP_RCN, /* Coprocessor register */
7041 OP_RF, /* FPA register */
7042 OP_RVS, /* VFP single precision register */
7043 OP_RVD, /* VFP double precision register (0..15) */
7044 OP_RND, /* Neon double precision register (0..31) */
7045 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
7046 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
7047 */
7048 OP_RNQ, /* Neon quad precision register */
7049 OP_RNQMQ, /* Neon quad or MVE vector register. */
7050 OP_RVSD, /* VFP single or double precision register */
7051 OP_RVSD_COND, /* VFP single, double precision register or condition code. */
7052 OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */
7053 OP_RNSD, /* Neon single or double precision register */
7054 OP_RNDQ, /* Neon double or quad precision register */
7055 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
7056 OP_RNDQMQR, /* Neon double, quad, MVE vector or ARM register. */
7057 OP_RNSDQ, /* Neon single, double or quad precision register */
7058 OP_RNSC, /* Neon scalar D[X] */
7059 OP_RVC, /* VFP control register */
7060 OP_RMF, /* Maverick F register */
7061 OP_RMD, /* Maverick D register */
7062 OP_RMFX, /* Maverick FX register */
7063 OP_RMDX, /* Maverick DX register */
7064 OP_RMAX, /* Maverick AX register */
7065 OP_RMDS, /* Maverick DSPSC register */
7066 OP_RIWR, /* iWMMXt wR register */
7067 OP_RIWC, /* iWMMXt wC register */
7068 OP_RIWG, /* iWMMXt wCG register */
7069 OP_RXA, /* XScale accumulator register */
7070
7071 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
7072 */
7073 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
7074 GPR (no SP/SP) */
7075 OP_RMQ, /* MVE vector register. */
7076 OP_RMQRZ, /* MVE vector or ARM register including ZR. */
7077 OP_RMQRR, /* MVE vector or ARM register. */
7078
7079 /* New operands for Armv8.1-M Mainline. */
7080 OP_LR, /* ARM LR register */
7081 OP_RRe, /* ARM register, only even numbered. */
7082 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
7083 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7084 OP_RR_ZR, /* ARM register or ZR but no PC */
7085
7086 OP_REGLST, /* ARM register list */
7087 OP_CLRMLST, /* CLRM register list */
7088 OP_VRSLST, /* VFP single-precision register list */
7089 OP_VRDLST, /* VFP double-precision register list */
7090 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
7091 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
7092 OP_NSTRLST, /* Neon element/structure list */
7093 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
7094 OP_MSTRLST2, /* MVE vector list with two elements. */
7095 OP_MSTRLST4, /* MVE vector list with four elements. */
7096
7097 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
7098 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
7099 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
7100 OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7101 zero. */
7102 OP_RR_RNSC, /* ARM reg or Neon scalar. */
7103 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
7104 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
7105 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7106 */
7107 OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7108 scalar, or ARM register. */
7109 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
7110 OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register. */
7111 OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7112 register. */
7113 OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar. */
7114 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
7115 OP_VMOV, /* Neon VMOV operands. */
7116 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
7117 /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */
7118 OP_RNDQMQ_Ibig,
7119 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
7120 OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7121 ARM register. */
7122 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
7123 OP_VLDR, /* VLDR operand. */
7124
7125 OP_I0, /* immediate zero */
7126 OP_I7, /* immediate value 0 .. 7 */
7127 OP_I15, /* 0 .. 15 */
7128 OP_I16, /* 1 .. 16 */
7129 OP_I16z, /* 0 .. 16 */
7130 OP_I31, /* 0 .. 31 */
7131 OP_I31w, /* 0 .. 31, optional trailing ! */
7132 OP_I32, /* 1 .. 32 */
7133 OP_I32z, /* 0 .. 32 */
7134 OP_I48_I64, /* 48 or 64 */
7135 OP_I63, /* 0 .. 63 */
7136 OP_I63s, /* -64 .. 63 */
7137 OP_I64, /* 1 .. 64 */
7138 OP_I64z, /* 0 .. 64 */
7139 OP_I255, /* 0 .. 255 */
7140
7141 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
7142 OP_I7b, /* 0 .. 7 */
7143 OP_I15b, /* 0 .. 15 */
7144 OP_I31b, /* 0 .. 31 */
7145
7146 OP_SH, /* shifter operand */
7147 OP_SHG, /* shifter operand with possible group relocation */
7148 OP_ADDR, /* Memory address expression (any mode) */
7149 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
7150 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
7151 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7152 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
7153 OP_EXP, /* arbitrary expression */
7154 OP_EXPi, /* same, with optional immediate prefix */
7155 OP_EXPr, /* same, with optional relocation suffix */
7156 OP_EXPs, /* same, with optional non-first operand relocation suffix */
7157 OP_HALF, /* 0 .. 65535 or low/high reloc. */
7158 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
7159 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
7160
7161 OP_CPSF, /* CPS flags */
7162 OP_ENDI, /* Endianness specifier */
7163 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
7164 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
7165 OP_COND, /* conditional code */
7166 OP_TB, /* Table branch. */
7167
7168 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
7169
7170 OP_RRnpc_I0, /* ARM register or literal 0 */
7171 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
7172 OP_RR_EXi, /* ARM register or expression with imm prefix */
7173 OP_RF_IF, /* FPA register or immediate */
7174 OP_RIWR_RIWC, /* iWMMXt R or C reg */
7175 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7176
7177 /* Optional operands. */
7178 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
7179 OP_oI31b, /* 0 .. 31 */
7180 OP_oI32b, /* 1 .. 32 */
7181 OP_oI32z, /* 0 .. 32 */
7182 OP_oIffffb, /* 0 .. 65535 */
7183 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
7184
7185 OP_oRR, /* ARM register */
7186 OP_oLR, /* ARM LR register */
7187 OP_oRRnpc, /* ARM register, not the PC */
7188 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7189 OP_oRRw, /* ARM register, not r15, optional trailing ! */
7190 OP_oRND, /* Optional Neon double precision register */
7191 OP_oRNQ, /* Optional Neon quad precision register */
7192 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
7193 OP_oRNDQ, /* Optional Neon double or quad precision register */
7194 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
7195 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
7196 register. */
7197 OP_oSHll, /* LSL immediate */
7198 OP_oSHar, /* ASR immediate */
7199 OP_oSHllar, /* LSL or ASR immediate */
7200 OP_oROR, /* ROR 0/8/16/24 */
7201 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
7202
7203 OP_oRMQRZ, /* optional MVE vector or ARM register including ZR. */
7204
7205 /* Some pre-defined mixed (ARM/THUMB) operands. */
7206 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7207 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7208 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7209
7210 OP_FIRST_OPTIONAL = OP_oI7b
7211 };
7212
7213 /* Generic instruction operand parser. This does no encoding and no
7214 semantic validation; it merely squirrels values away in the inst
7215 structure. Returns SUCCESS or FAIL depending on whether the
7216 specified grammar matched. */
7217 static int
7218 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
7219 {
7220 unsigned const int *upat = pattern;
7221 char *backtrack_pos = 0;
7222 const char *backtrack_error = 0;
7223 int i, val = 0, backtrack_index = 0;
7224 enum arm_reg_type rtype;
7225 parse_operand_result result;
7226 unsigned int op_parse_code;
7227 bfd_boolean partial_match;
7228
7229 #define po_char_or_fail(chr) \
7230 do \
7231 { \
7232 if (skip_past_char (&str, chr) == FAIL) \
7233 goto bad_args; \
7234 } \
7235 while (0)
7236
7237 #define po_reg_or_fail(regtype) \
7238 do \
7239 { \
7240 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7241 & inst.operands[i].vectype); \
7242 if (val == FAIL) \
7243 { \
7244 first_error (_(reg_expected_msgs[regtype])); \
7245 goto failure; \
7246 } \
7247 inst.operands[i].reg = val; \
7248 inst.operands[i].isreg = 1; \
7249 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7250 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7251 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7252 || rtype == REG_TYPE_VFD \
7253 || rtype == REG_TYPE_NQ); \
7254 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7255 } \
7256 while (0)
7257
7258 #define po_reg_or_goto(regtype, label) \
7259 do \
7260 { \
7261 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7262 & inst.operands[i].vectype); \
7263 if (val == FAIL) \
7264 goto label; \
7265 \
7266 inst.operands[i].reg = val; \
7267 inst.operands[i].isreg = 1; \
7268 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7269 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7270 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7271 || rtype == REG_TYPE_VFD \
7272 || rtype == REG_TYPE_NQ); \
7273 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7274 } \
7275 while (0)
7276
7277 #define po_imm_or_fail(min, max, popt) \
7278 do \
7279 { \
7280 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
7281 goto failure; \
7282 inst.operands[i].imm = val; \
7283 } \
7284 while (0)
7285
7286 #define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \
7287 do \
7288 { \
7289 expressionS exp; \
7290 my_get_expression (&exp, &str, popt); \
7291 if (exp.X_op != O_constant) \
7292 { \
7293 inst.error = _("constant expression required"); \
7294 goto failure; \
7295 } \
7296 if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7297 { \
7298 inst.error = _("immediate value 48 or 64 expected"); \
7299 goto failure; \
7300 } \
7301 inst.operands[i].imm = exp.X_add_number; \
7302 } \
7303 while (0)
7304
7305 #define po_scalar_or_goto(elsz, label, reg_type) \
7306 do \
7307 { \
7308 val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \
7309 reg_type); \
7310 if (val == FAIL) \
7311 goto label; \
7312 inst.operands[i].reg = val; \
7313 inst.operands[i].isscalar = 1; \
7314 } \
7315 while (0)
7316
7317 #define po_misc_or_fail(expr) \
7318 do \
7319 { \
7320 if (expr) \
7321 goto failure; \
7322 } \
7323 while (0)
7324
7325 #define po_misc_or_fail_no_backtrack(expr) \
7326 do \
7327 { \
7328 result = expr; \
7329 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7330 backtrack_pos = 0; \
7331 if (result != PARSE_OPERAND_SUCCESS) \
7332 goto failure; \
7333 } \
7334 while (0)
7335
7336 #define po_barrier_or_imm(str) \
7337 do \
7338 { \
7339 val = parse_barrier (&str); \
7340 if (val == FAIL && ! ISALPHA (*str)) \
7341 goto immediate; \
7342 if (val == FAIL \
7343 /* ISB can only take SY as an option. */ \
7344 || ((inst.instruction & 0xf0) == 0x60 \
7345 && val != 0xf)) \
7346 { \
7347 inst.error = _("invalid barrier type"); \
7348 backtrack_pos = 0; \
7349 goto failure; \
7350 } \
7351 } \
7352 while (0)
7353
7354 skip_whitespace (str);
7355
7356 for (i = 0; upat[i] != OP_stop; i++)
7357 {
7358 op_parse_code = upat[i];
7359 if (op_parse_code >= 1<<16)
7360 op_parse_code = thumb ? (op_parse_code >> 16)
7361 : (op_parse_code & ((1<<16)-1));
7362
7363 if (op_parse_code >= OP_FIRST_OPTIONAL)
7364 {
7365 /* Remember where we are in case we need to backtrack. */
7366 backtrack_pos = str;
7367 backtrack_error = inst.error;
7368 backtrack_index = i;
7369 }
7370
7371 if (i > 0 && (i > 1 || inst.operands[0].present))
7372 po_char_or_fail (',');
7373
7374 switch (op_parse_code)
7375 {
7376 /* Registers */
7377 case OP_oRRnpc:
7378 case OP_oRRnpcsp:
7379 case OP_RRnpc:
7380 case OP_RRnpcsp:
7381 case OP_oRR:
7382 case OP_RRe:
7383 case OP_RRo:
7384 case OP_LR:
7385 case OP_oLR:
7386 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7387 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7388 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7389 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7390 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7391 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7392 case OP_oRND:
7393 case OP_RNDMQR:
7394 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7395 break;
7396 try_rndmq:
7397 case OP_RNDMQ:
7398 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7399 break;
7400 try_rnd:
7401 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7402 case OP_RVC:
7403 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7404 break;
7405 /* Also accept generic coprocessor regs for unknown registers. */
7406 coproc_reg:
7407 po_reg_or_goto (REG_TYPE_CN, vpr_po);
7408 break;
7409 /* Also accept P0 or p0 for VPR.P0. Since P0 is already an
7410 existing register with a value of 0, this seems like the
7411 best way to parse P0. */
7412 vpr_po:
7413 if (strncasecmp (str, "P0", 2) == 0)
7414 {
7415 str += 2;
7416 inst.operands[i].isreg = 1;
7417 inst.operands[i].reg = 13;
7418 }
7419 else
7420 goto failure;
7421 break;
7422 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7423 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7424 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7425 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7426 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7427 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7428 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7429 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7430 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7431 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7432 case OP_oRNQ:
7433 case OP_RNQMQ:
7434 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7435 break;
7436 try_nq:
7437 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7438 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7439 case OP_RNDQMQR:
7440 po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7441 break;
7442 try_rndqmq:
7443 case OP_oRNDQMQ:
7444 case OP_RNDQMQ:
7445 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7446 break;
7447 try_rndq:
7448 case OP_oRNDQ:
7449 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7450 case OP_RVSDMQ:
7451 po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7452 break;
7453 try_rvsd:
7454 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7455 case OP_RVSD_COND:
7456 po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7457 break;
7458 case OP_oRNSDQ:
7459 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7460 case OP_RNSDQMQR:
7461 po_reg_or_goto (REG_TYPE_RN, try_mq);
7462 break;
7463 try_mq:
7464 case OP_oRNSDQMQ:
7465 case OP_RNSDQMQ:
7466 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7467 break;
7468 try_nsdq2:
7469 po_reg_or_fail (REG_TYPE_NSDQ);
7470 inst.error = 0;
7471 break;
7472 case OP_RMQRR:
7473 po_reg_or_goto (REG_TYPE_RN, try_rmq);
7474 break;
7475 try_rmq:
7476 case OP_RMQ:
7477 po_reg_or_fail (REG_TYPE_MQ);
7478 break;
7479 /* Neon scalar. Using an element size of 8 means that some invalid
7480 scalars are accepted here, so deal with those in later code. */
7481 case OP_RNSC: po_scalar_or_goto (8, failure, REG_TYPE_VFD); break;
7482
7483 case OP_RNDQ_I0:
7484 {
7485 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7486 break;
7487 try_imm0:
7488 po_imm_or_fail (0, 0, TRUE);
7489 }
7490 break;
7491
7492 case OP_RVSD_I0:
7493 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7494 break;
7495
7496 case OP_RSVDMQ_FI0:
7497 po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7498 break;
7499 try_rsvd_fi0:
7500 case OP_RSVD_FI0:
7501 {
7502 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7503 break;
7504 try_ifimm0:
7505 if (parse_ifimm_zero (&str))
7506 inst.operands[i].imm = 0;
7507 else
7508 {
7509 inst.error
7510 = _("only floating point zero is allowed as immediate value");
7511 goto failure;
7512 }
7513 }
7514 break;
7515
7516 case OP_RR_RNSC:
7517 {
7518 po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7519 break;
7520 try_rr:
7521 po_reg_or_fail (REG_TYPE_RN);
7522 }
7523 break;
7524
7525 case OP_RNSDQ_RNSC_MQ_RR:
7526 po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7527 break;
7528 try_rnsdq_rnsc_mq:
7529 case OP_RNSDQ_RNSC_MQ:
7530 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7531 break;
7532 try_rnsdq_rnsc:
7533 case OP_RNSDQ_RNSC:
7534 {
7535 po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7536 inst.error = 0;
7537 break;
7538 try_nsdq:
7539 po_reg_or_fail (REG_TYPE_NSDQ);
7540 inst.error = 0;
7541 }
7542 break;
7543
7544 case OP_RNSD_RNSC:
7545 {
7546 po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7547 break;
7548 try_s_scalar:
7549 po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7550 break;
7551 try_nsd:
7552 po_reg_or_fail (REG_TYPE_NSD);
7553 }
7554 break;
7555
7556 case OP_RNDQMQ_RNSC_RR:
7557 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7558 break;
7559 try_rndq_rnsc_rr:
7560 case OP_RNDQ_RNSC_RR:
7561 po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7562 break;
7563 case OP_RNDQMQ_RNSC:
7564 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7565 break;
7566 try_rndq_rnsc:
7567 case OP_RNDQ_RNSC:
7568 {
7569 po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7570 break;
7571 try_ndq:
7572 po_reg_or_fail (REG_TYPE_NDQ);
7573 }
7574 break;
7575
7576 case OP_RND_RNSC:
7577 {
7578 po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7579 break;
7580 try_vfd:
7581 po_reg_or_fail (REG_TYPE_VFD);
7582 }
7583 break;
7584
7585 case OP_VMOV:
7586 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7587 not careful then bad things might happen. */
7588 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7589 break;
7590
7591 case OP_RNDQMQ_Ibig:
7592 po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7593 break;
7594 try_rndq_ibig:
7595 case OP_RNDQ_Ibig:
7596 {
7597 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7598 break;
7599 try_immbig:
7600 /* There's a possibility of getting a 64-bit immediate here, so
7601 we need special handling. */
7602 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7603 == FAIL)
7604 {
7605 inst.error = _("immediate value is out of range");
7606 goto failure;
7607 }
7608 }
7609 break;
7610
7611 case OP_RNDQMQ_I63b_RR:
7612 po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7613 break;
7614 try_rndq_i63b_rr:
7615 po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7616 break;
7617 try_rndq_i63b:
7618 case OP_RNDQ_I63b:
7619 {
7620 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7621 break;
7622 try_shimm:
7623 po_imm_or_fail (0, 63, TRUE);
7624 }
7625 break;
7626
7627 case OP_RRnpcb:
7628 po_char_or_fail ('[');
7629 po_reg_or_fail (REG_TYPE_RN);
7630 po_char_or_fail (']');
7631 break;
7632
7633 case OP_RRnpctw:
7634 case OP_RRw:
7635 case OP_oRRw:
7636 po_reg_or_fail (REG_TYPE_RN);
7637 if (skip_past_char (&str, '!') == SUCCESS)
7638 inst.operands[i].writeback = 1;
7639 break;
7640
7641 /* Immediates */
7642 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7643 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7644 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7645 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7646 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7647 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7648 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7649 case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
7650 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7651 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7652 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7653 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7654 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7655
7656 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7657 case OP_oI7b:
7658 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7659 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7660 case OP_oI31b:
7661 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7662 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7663 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7664 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7665
7666 /* Immediate variants */
7667 case OP_oI255c:
7668 po_char_or_fail ('{');
7669 po_imm_or_fail (0, 255, TRUE);
7670 po_char_or_fail ('}');
7671 break;
7672
7673 case OP_I31w:
7674 /* The expression parser chokes on a trailing !, so we have
7675 to find it first and zap it. */
7676 {
7677 char *s = str;
7678 while (*s && *s != ',')
7679 s++;
7680 if (s[-1] == '!')
7681 {
7682 s[-1] = '\0';
7683 inst.operands[i].writeback = 1;
7684 }
7685 po_imm_or_fail (0, 31, TRUE);
7686 if (str == s - 1)
7687 str = s;
7688 }
7689 break;
7690
7691 /* Expressions */
7692 case OP_EXPi: EXPi:
7693 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7694 GE_OPT_PREFIX));
7695 break;
7696
7697 case OP_EXP:
7698 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7699 GE_NO_PREFIX));
7700 break;
7701
7702 case OP_EXPr: EXPr:
7703 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7704 GE_NO_PREFIX));
7705 if (inst.relocs[0].exp.X_op == O_symbol)
7706 {
7707 val = parse_reloc (&str);
7708 if (val == -1)
7709 {
7710 inst.error = _("unrecognized relocation suffix");
7711 goto failure;
7712 }
7713 else if (val != BFD_RELOC_UNUSED)
7714 {
7715 inst.operands[i].imm = val;
7716 inst.operands[i].hasreloc = 1;
7717 }
7718 }
7719 break;
7720
7721 case OP_EXPs:
7722 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7723 GE_NO_PREFIX));
7724 if (inst.relocs[i].exp.X_op == O_symbol)
7725 {
7726 inst.operands[i].hasreloc = 1;
7727 }
7728 else if (inst.relocs[i].exp.X_op == O_constant)
7729 {
7730 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7731 inst.operands[i].hasreloc = 0;
7732 }
7733 break;
7734
7735 /* Operand for MOVW or MOVT. */
7736 case OP_HALF:
7737 po_misc_or_fail (parse_half (&str));
7738 break;
7739
7740 /* Register or expression. */
7741 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7742 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7743
7744 /* Register or immediate. */
7745 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7746 I0: po_imm_or_fail (0, 0, FALSE); break;
7747
7748 case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break;
7749 I32: po_imm_or_fail (1, 32, FALSE); break;
7750
7751 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7752 IF:
7753 if (!is_immediate_prefix (*str))
7754 goto bad_args;
7755 str++;
7756 val = parse_fpa_immediate (&str);
7757 if (val == FAIL)
7758 goto failure;
7759 /* FPA immediates are encoded as registers 8-15.
7760 parse_fpa_immediate has already applied the offset. */
7761 inst.operands[i].reg = val;
7762 inst.operands[i].isreg = 1;
7763 break;
7764
7765 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7766 I32z: po_imm_or_fail (0, 32, FALSE); break;
7767
7768 /* Two kinds of register. */
7769 case OP_RIWR_RIWC:
7770 {
7771 struct reg_entry *rege = arm_reg_parse_multi (&str);
7772 if (!rege
7773 || (rege->type != REG_TYPE_MMXWR
7774 && rege->type != REG_TYPE_MMXWC
7775 && rege->type != REG_TYPE_MMXWCG))
7776 {
7777 inst.error = _("iWMMXt data or control register expected");
7778 goto failure;
7779 }
7780 inst.operands[i].reg = rege->number;
7781 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7782 }
7783 break;
7784
7785 case OP_RIWC_RIWG:
7786 {
7787 struct reg_entry *rege = arm_reg_parse_multi (&str);
7788 if (!rege
7789 || (rege->type != REG_TYPE_MMXWC
7790 && rege->type != REG_TYPE_MMXWCG))
7791 {
7792 inst.error = _("iWMMXt control register expected");
7793 goto failure;
7794 }
7795 inst.operands[i].reg = rege->number;
7796 inst.operands[i].isreg = 1;
7797 }
7798 break;
7799
7800 /* Misc */
7801 case OP_CPSF: val = parse_cps_flags (&str); break;
7802 case OP_ENDI: val = parse_endian_specifier (&str); break;
7803 case OP_oROR: val = parse_ror (&str); break;
7804 try_cond:
7805 case OP_COND: val = parse_cond (&str); break;
7806 case OP_oBARRIER_I15:
7807 po_barrier_or_imm (str); break;
7808 immediate:
7809 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7810 goto failure;
7811 break;
7812
7813 case OP_wPSR:
7814 case OP_rPSR:
7815 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7816 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7817 {
7818 inst.error = _("Banked registers are not available with this "
7819 "architecture.");
7820 goto failure;
7821 }
7822 break;
7823 try_psr:
7824 val = parse_psr (&str, op_parse_code == OP_wPSR);
7825 break;
7826
7827 case OP_VLDR:
7828 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7829 break;
7830 try_sysreg:
7831 val = parse_sys_vldr_vstr (&str);
7832 break;
7833
7834 case OP_APSR_RR:
7835 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7836 break;
7837 try_apsr:
7838 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7839 instruction). */
7840 if (strncasecmp (str, "APSR_", 5) == 0)
7841 {
7842 unsigned found = 0;
7843 str += 5;
7844 while (found < 15)
7845 switch (*str++)
7846 {
7847 case 'c': found = (found & 1) ? 16 : found | 1; break;
7848 case 'n': found = (found & 2) ? 16 : found | 2; break;
7849 case 'z': found = (found & 4) ? 16 : found | 4; break;
7850 case 'v': found = (found & 8) ? 16 : found | 8; break;
7851 default: found = 16;
7852 }
7853 if (found != 15)
7854 goto failure;
7855 inst.operands[i].isvec = 1;
7856 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7857 inst.operands[i].reg = REG_PC;
7858 }
7859 else
7860 goto failure;
7861 break;
7862
7863 case OP_TB:
7864 po_misc_or_fail (parse_tb (&str));
7865 break;
7866
7867 /* Register lists. */
7868 case OP_REGLST:
7869 val = parse_reg_list (&str, REGLIST_RN);
7870 if (*str == '^')
7871 {
7872 inst.operands[i].writeback = 1;
7873 str++;
7874 }
7875 break;
7876
7877 case OP_CLRMLST:
7878 val = parse_reg_list (&str, REGLIST_CLRM);
7879 break;
7880
7881 case OP_VRSLST:
7882 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7883 &partial_match);
7884 break;
7885
7886 case OP_VRDLST:
7887 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7888 &partial_match);
7889 break;
7890
7891 case OP_VRSDLST:
7892 /* Allow Q registers too. */
7893 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7894 REGLIST_NEON_D, &partial_match);
7895 if (val == FAIL)
7896 {
7897 inst.error = NULL;
7898 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7899 REGLIST_VFP_S, &partial_match);
7900 inst.operands[i].issingle = 1;
7901 }
7902 break;
7903
7904 case OP_VRSDVLST:
7905 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7906 REGLIST_VFP_D_VPR, &partial_match);
7907 if (val == FAIL && !partial_match)
7908 {
7909 inst.error = NULL;
7910 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7911 REGLIST_VFP_S_VPR, &partial_match);
7912 inst.operands[i].issingle = 1;
7913 }
7914 break;
7915
7916 case OP_NRDLST:
7917 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7918 REGLIST_NEON_D, &partial_match);
7919 break;
7920
7921 case OP_MSTRLST4:
7922 case OP_MSTRLST2:
7923 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7924 1, &inst.operands[i].vectype);
7925 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7926 goto failure;
7927 break;
7928 case OP_NSTRLST:
7929 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7930 0, &inst.operands[i].vectype);
7931 break;
7932
7933 /* Addressing modes */
7934 case OP_ADDRMVE:
7935 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7936 break;
7937
7938 case OP_ADDR:
7939 po_misc_or_fail (parse_address (&str, i));
7940 break;
7941
7942 case OP_ADDRGLDR:
7943 po_misc_or_fail_no_backtrack (
7944 parse_address_group_reloc (&str, i, GROUP_LDR));
7945 break;
7946
7947 case OP_ADDRGLDRS:
7948 po_misc_or_fail_no_backtrack (
7949 parse_address_group_reloc (&str, i, GROUP_LDRS));
7950 break;
7951
7952 case OP_ADDRGLDC:
7953 po_misc_or_fail_no_backtrack (
7954 parse_address_group_reloc (&str, i, GROUP_LDC));
7955 break;
7956
7957 case OP_SH:
7958 po_misc_or_fail (parse_shifter_operand (&str, i));
7959 break;
7960
7961 case OP_SHG:
7962 po_misc_or_fail_no_backtrack (
7963 parse_shifter_operand_group_reloc (&str, i));
7964 break;
7965
7966 case OP_oSHll:
7967 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7968 break;
7969
7970 case OP_oSHar:
7971 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7972 break;
7973
7974 case OP_oSHllar:
7975 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7976 break;
7977
7978 case OP_RMQRZ:
7979 case OP_oRMQRZ:
7980 po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
7981 break;
7982
7983 case OP_RR_ZR:
7984 try_rr_zr:
7985 po_reg_or_goto (REG_TYPE_RN, ZR);
7986 break;
7987 ZR:
7988 po_reg_or_fail (REG_TYPE_ZR);
7989 break;
7990
7991 default:
7992 as_fatal (_("unhandled operand code %d"), op_parse_code);
7993 }
7994
7995 /* Various value-based sanity checks and shared operations. We
7996 do not signal immediate failures for the register constraints;
7997 this allows a syntax error to take precedence. */
7998 switch (op_parse_code)
7999 {
8000 case OP_oRRnpc:
8001 case OP_RRnpc:
8002 case OP_RRnpcb:
8003 case OP_RRw:
8004 case OP_oRRw:
8005 case OP_RRnpc_I0:
8006 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8007 inst.error = BAD_PC;
8008 break;
8009
8010 case OP_oRRnpcsp:
8011 case OP_RRnpcsp:
8012 case OP_RRnpcsp_I32:
8013 if (inst.operands[i].isreg)
8014 {
8015 if (inst.operands[i].reg == REG_PC)
8016 inst.error = BAD_PC;
8017 else if (inst.operands[i].reg == REG_SP
8018 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8019 relaxed since ARMv8-A. */
8020 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8021 {
8022 gas_assert (thumb);
8023 inst.error = BAD_SP;
8024 }
8025 }
8026 break;
8027
8028 case OP_RRnpctw:
8029 if (inst.operands[i].isreg
8030 && inst.operands[i].reg == REG_PC
8031 && (inst.operands[i].writeback || thumb))
8032 inst.error = BAD_PC;
8033 break;
8034
8035 case OP_RVSD_COND:
8036 case OP_VLDR:
8037 if (inst.operands[i].isreg)
8038 break;
8039 /* fall through. */
8040
8041 case OP_CPSF:
8042 case OP_ENDI:
8043 case OP_oROR:
8044 case OP_wPSR:
8045 case OP_rPSR:
8046 case OP_COND:
8047 case OP_oBARRIER_I15:
8048 case OP_REGLST:
8049 case OP_CLRMLST:
8050 case OP_VRSLST:
8051 case OP_VRDLST:
8052 case OP_VRSDLST:
8053 case OP_VRSDVLST:
8054 case OP_NRDLST:
8055 case OP_NSTRLST:
8056 case OP_MSTRLST2:
8057 case OP_MSTRLST4:
8058 if (val == FAIL)
8059 goto failure;
8060 inst.operands[i].imm = val;
8061 break;
8062
8063 case OP_LR:
8064 case OP_oLR:
8065 if (inst.operands[i].reg != REG_LR)
8066 inst.error = _("operand must be LR register");
8067 break;
8068
8069 case OP_RMQRZ:
8070 case OP_oRMQRZ:
8071 case OP_RR_ZR:
8072 if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8073 inst.error = BAD_PC;
8074 break;
8075
8076 case OP_RRe:
8077 if (inst.operands[i].isreg
8078 && (inst.operands[i].reg & 0x00000001) != 0)
8079 inst.error = BAD_ODD;
8080 break;
8081
8082 case OP_RRo:
8083 if (inst.operands[i].isreg)
8084 {
8085 if ((inst.operands[i].reg & 0x00000001) != 1)
8086 inst.error = BAD_EVEN;
8087 else if (inst.operands[i].reg == REG_SP)
8088 as_tsktsk (MVE_BAD_SP);
8089 else if (inst.operands[i].reg == REG_PC)
8090 inst.error = BAD_PC;
8091 }
8092 break;
8093
8094 default:
8095 break;
8096 }
8097
8098 /* If we get here, this operand was successfully parsed. */
8099 inst.operands[i].present = 1;
8100 continue;
8101
8102 bad_args:
8103 inst.error = BAD_ARGS;
8104
8105 failure:
8106 if (!backtrack_pos)
8107 {
8108 /* The parse routine should already have set inst.error, but set a
8109 default here just in case. */
8110 if (!inst.error)
8111 inst.error = BAD_SYNTAX;
8112 return FAIL;
8113 }
8114
8115 /* Do not backtrack over a trailing optional argument that
8116 absorbed some text. We will only fail again, with the
8117 'garbage following instruction' error message, which is
8118 probably less helpful than the current one. */
8119 if (backtrack_index == i && backtrack_pos != str
8120 && upat[i+1] == OP_stop)
8121 {
8122 if (!inst.error)
8123 inst.error = BAD_SYNTAX;
8124 return FAIL;
8125 }
8126
8127 /* Try again, skipping the optional argument at backtrack_pos. */
8128 str = backtrack_pos;
8129 inst.error = backtrack_error;
8130 inst.operands[backtrack_index].present = 0;
8131 i = backtrack_index;
8132 backtrack_pos = 0;
8133 }
8134
8135 /* Check that we have parsed all the arguments. */
8136 if (*str != '\0' && !inst.error)
8137 inst.error = _("garbage following instruction");
8138
8139 return inst.error ? FAIL : SUCCESS;
8140 }
8141
8142 #undef po_char_or_fail
8143 #undef po_reg_or_fail
8144 #undef po_reg_or_goto
8145 #undef po_imm_or_fail
8146 #undef po_scalar_or_fail
8147 #undef po_barrier_or_imm
8148
8149 /* Shorthand macro for instruction encoding functions issuing errors. */
8150 #define constraint(expr, err) \
8151 do \
8152 { \
8153 if (expr) \
8154 { \
8155 inst.error = err; \
8156 return; \
8157 } \
8158 } \
8159 while (0)
8160
8161 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
8162 instructions are unpredictable if these registers are used. This
8163 is the BadReg predicate in ARM's Thumb-2 documentation.
8164
8165 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8166 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
8167 #define reject_bad_reg(reg) \
8168 do \
8169 if (reg == REG_PC) \
8170 { \
8171 inst.error = BAD_PC; \
8172 return; \
8173 } \
8174 else if (reg == REG_SP \
8175 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
8176 { \
8177 inst.error = BAD_SP; \
8178 return; \
8179 } \
8180 while (0)
8181
8182 /* If REG is R13 (the stack pointer), warn that its use is
8183 deprecated. */
8184 #define warn_deprecated_sp(reg) \
8185 do \
8186 if (warn_on_deprecated && reg == REG_SP) \
8187 as_tsktsk (_("use of r13 is deprecated")); \
8188 while (0)
8189
8190 /* Functions for operand encoding. ARM, then Thumb. */
8191
8192 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8193
8194 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8195
8196 The only binary encoding difference is the Coprocessor number. Coprocessor
8197 9 is used for half-precision calculations or conversions. The format of the
8198 instruction is the same as the equivalent Coprocessor 10 instruction that
8199 exists for Single-Precision operation. */
8200
8201 static void
8202 do_scalar_fp16_v82_encode (void)
8203 {
8204 if (inst.cond < COND_ALWAYS)
8205 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8206 " the behaviour is UNPREDICTABLE"));
8207 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8208 _(BAD_FP16));
8209
8210 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8211 mark_feature_used (&arm_ext_fp16);
8212 }
8213
8214 /* If VAL can be encoded in the immediate field of an ARM instruction,
8215 return the encoded form. Otherwise, return FAIL. */
8216
8217 static unsigned int
8218 encode_arm_immediate (unsigned int val)
8219 {
8220 unsigned int a, i;
8221
8222 if (val <= 0xff)
8223 return val;
8224
8225 for (i = 2; i < 32; i += 2)
8226 if ((a = rotate_left (val, i)) <= 0xff)
8227 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
8228
8229 return FAIL;
8230 }
8231
8232 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8233 return the encoded form. Otherwise, return FAIL. */
8234 static unsigned int
8235 encode_thumb32_immediate (unsigned int val)
8236 {
8237 unsigned int a, i;
8238
8239 if (val <= 0xff)
8240 return val;
8241
8242 for (i = 1; i <= 24; i++)
8243 {
8244 a = val >> i;
8245 if ((val & ~(0xff << i)) == 0)
8246 return ((val >> i) & 0x7f) | ((32 - i) << 7);
8247 }
8248
8249 a = val & 0xff;
8250 if (val == ((a << 16) | a))
8251 return 0x100 | a;
8252 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8253 return 0x300 | a;
8254
8255 a = val & 0xff00;
8256 if (val == ((a << 16) | a))
8257 return 0x200 | (a >> 8);
8258
8259 return FAIL;
8260 }
8261 /* Encode a VFP SP or DP register number into inst.instruction. */
8262
8263 static void
8264 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8265 {
8266 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8267 && reg > 15)
8268 {
8269 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8270 {
8271 if (thumb_mode)
8272 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8273 fpu_vfp_ext_d32);
8274 else
8275 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8276 fpu_vfp_ext_d32);
8277 }
8278 else
8279 {
8280 first_error (_("D register out of range for selected VFP version"));
8281 return;
8282 }
8283 }
8284
8285 switch (pos)
8286 {
8287 case VFP_REG_Sd:
8288 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8289 break;
8290
8291 case VFP_REG_Sn:
8292 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8293 break;
8294
8295 case VFP_REG_Sm:
8296 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8297 break;
8298
8299 case VFP_REG_Dd:
8300 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8301 break;
8302
8303 case VFP_REG_Dn:
8304 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8305 break;
8306
8307 case VFP_REG_Dm:
8308 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8309 break;
8310
8311 default:
8312 abort ();
8313 }
8314 }
8315
8316 /* Encode a <shift> in an ARM-format instruction. The immediate,
8317 if any, is handled by md_apply_fix. */
8318 static void
8319 encode_arm_shift (int i)
8320 {
8321 /* register-shifted register. */
8322 if (inst.operands[i].immisreg)
8323 {
8324 int op_index;
8325 for (op_index = 0; op_index <= i; ++op_index)
8326 {
8327 /* Check the operand only when it's presented. In pre-UAL syntax,
8328 if the destination register is the same as the first operand, two
8329 register form of the instruction can be used. */
8330 if (inst.operands[op_index].present && inst.operands[op_index].isreg
8331 && inst.operands[op_index].reg == REG_PC)
8332 as_warn (UNPRED_REG ("r15"));
8333 }
8334
8335 if (inst.operands[i].imm == REG_PC)
8336 as_warn (UNPRED_REG ("r15"));
8337 }
8338
8339 if (inst.operands[i].shift_kind == SHIFT_RRX)
8340 inst.instruction |= SHIFT_ROR << 5;
8341 else
8342 {
8343 inst.instruction |= inst.operands[i].shift_kind << 5;
8344 if (inst.operands[i].immisreg)
8345 {
8346 inst.instruction |= SHIFT_BY_REG;
8347 inst.instruction |= inst.operands[i].imm << 8;
8348 }
8349 else
8350 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8351 }
8352 }
8353
8354 static void
8355 encode_arm_shifter_operand (int i)
8356 {
8357 if (inst.operands[i].isreg)
8358 {
8359 inst.instruction |= inst.operands[i].reg;
8360 encode_arm_shift (i);
8361 }
8362 else
8363 {
8364 inst.instruction |= INST_IMMEDIATE;
8365 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8366 inst.instruction |= inst.operands[i].imm;
8367 }
8368 }
8369
8370 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
8371 static void
8372 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
8373 {
8374 /* PR 14260:
8375 Generate an error if the operand is not a register. */
8376 constraint (!inst.operands[i].isreg,
8377 _("Instruction does not support =N addresses"));
8378
8379 inst.instruction |= inst.operands[i].reg << 16;
8380
8381 if (inst.operands[i].preind)
8382 {
8383 if (is_t)
8384 {
8385 inst.error = _("instruction does not accept preindexed addressing");
8386 return;
8387 }
8388 inst.instruction |= PRE_INDEX;
8389 if (inst.operands[i].writeback)
8390 inst.instruction |= WRITE_BACK;
8391
8392 }
8393 else if (inst.operands[i].postind)
8394 {
8395 gas_assert (inst.operands[i].writeback);
8396 if (is_t)
8397 inst.instruction |= WRITE_BACK;
8398 }
8399 else /* unindexed - only for coprocessor */
8400 {
8401 inst.error = _("instruction does not accept unindexed addressing");
8402 return;
8403 }
8404
8405 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8406 && (((inst.instruction & 0x000f0000) >> 16)
8407 == ((inst.instruction & 0x0000f000) >> 12)))
8408 as_warn ((inst.instruction & LOAD_BIT)
8409 ? _("destination register same as write-back base")
8410 : _("source register same as write-back base"));
8411 }
8412
8413 /* inst.operands[i] was set up by parse_address. Encode it into an
8414 ARM-format mode 2 load or store instruction. If is_t is true,
8415 reject forms that cannot be used with a T instruction (i.e. not
8416 post-indexed). */
8417 static void
8418 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8419 {
8420 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8421
8422 encode_arm_addr_mode_common (i, is_t);
8423
8424 if (inst.operands[i].immisreg)
8425 {
8426 constraint ((inst.operands[i].imm == REG_PC
8427 || (is_pc && inst.operands[i].writeback)),
8428 BAD_PC_ADDRESSING);
8429 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8430 inst.instruction |= inst.operands[i].imm;
8431 if (!inst.operands[i].negative)
8432 inst.instruction |= INDEX_UP;
8433 if (inst.operands[i].shifted)
8434 {
8435 if (inst.operands[i].shift_kind == SHIFT_RRX)
8436 inst.instruction |= SHIFT_ROR << 5;
8437 else
8438 {
8439 inst.instruction |= inst.operands[i].shift_kind << 5;
8440 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8441 }
8442 }
8443 }
8444 else /* immediate offset in inst.relocs[0] */
8445 {
8446 if (is_pc && !inst.relocs[0].pc_rel)
8447 {
8448 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8449
8450 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8451 cannot use PC in addressing.
8452 PC cannot be used in writeback addressing, either. */
8453 constraint ((is_t || inst.operands[i].writeback),
8454 BAD_PC_ADDRESSING);
8455
8456 /* Use of PC in str is deprecated for ARMv7. */
8457 if (warn_on_deprecated
8458 && !is_load
8459 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8460 as_tsktsk (_("use of PC in this instruction is deprecated"));
8461 }
8462
8463 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8464 {
8465 /* Prefer + for zero encoded value. */
8466 if (!inst.operands[i].negative)
8467 inst.instruction |= INDEX_UP;
8468 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8469 }
8470 }
8471 }
8472
8473 /* inst.operands[i] was set up by parse_address. Encode it into an
8474 ARM-format mode 3 load or store instruction. Reject forms that
8475 cannot be used with such instructions. If is_t is true, reject
8476 forms that cannot be used with a T instruction (i.e. not
8477 post-indexed). */
8478 static void
8479 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8480 {
8481 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8482 {
8483 inst.error = _("instruction does not accept scaled register index");
8484 return;
8485 }
8486
8487 encode_arm_addr_mode_common (i, is_t);
8488
8489 if (inst.operands[i].immisreg)
8490 {
8491 constraint ((inst.operands[i].imm == REG_PC
8492 || (is_t && inst.operands[i].reg == REG_PC)),
8493 BAD_PC_ADDRESSING);
8494 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8495 BAD_PC_WRITEBACK);
8496 inst.instruction |= inst.operands[i].imm;
8497 if (!inst.operands[i].negative)
8498 inst.instruction |= INDEX_UP;
8499 }
8500 else /* immediate offset in inst.relocs[0] */
8501 {
8502 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8503 && inst.operands[i].writeback),
8504 BAD_PC_WRITEBACK);
8505 inst.instruction |= HWOFFSET_IMM;
8506 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8507 {
8508 /* Prefer + for zero encoded value. */
8509 if (!inst.operands[i].negative)
8510 inst.instruction |= INDEX_UP;
8511
8512 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8513 }
8514 }
8515 }
8516
8517 /* Write immediate bits [7:0] to the following locations:
8518
8519 |28/24|23 19|18 16|15 4|3 0|
8520 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8521
8522 This function is used by VMOV/VMVN/VORR/VBIC. */
8523
8524 static void
8525 neon_write_immbits (unsigned immbits)
8526 {
8527 inst.instruction |= immbits & 0xf;
8528 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8529 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8530 }
8531
8532 /* Invert low-order SIZE bits of XHI:XLO. */
8533
8534 static void
8535 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8536 {
8537 unsigned immlo = xlo ? *xlo : 0;
8538 unsigned immhi = xhi ? *xhi : 0;
8539
8540 switch (size)
8541 {
8542 case 8:
8543 immlo = (~immlo) & 0xff;
8544 break;
8545
8546 case 16:
8547 immlo = (~immlo) & 0xffff;
8548 break;
8549
8550 case 64:
8551 immhi = (~immhi) & 0xffffffff;
8552 /* fall through. */
8553
8554 case 32:
8555 immlo = (~immlo) & 0xffffffff;
8556 break;
8557
8558 default:
8559 abort ();
8560 }
8561
8562 if (xlo)
8563 *xlo = immlo;
8564
8565 if (xhi)
8566 *xhi = immhi;
8567 }
8568
8569 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8570 A, B, C, D. */
8571
8572 static int
8573 neon_bits_same_in_bytes (unsigned imm)
8574 {
8575 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8576 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8577 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8578 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8579 }
8580
8581 /* For immediate of above form, return 0bABCD. */
8582
8583 static unsigned
8584 neon_squash_bits (unsigned imm)
8585 {
8586 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8587 | ((imm & 0x01000000) >> 21);
8588 }
8589
8590 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8591
8592 static unsigned
8593 neon_qfloat_bits (unsigned imm)
8594 {
8595 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8596 }
8597
8598 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8599 the instruction. *OP is passed as the initial value of the op field, and
8600 may be set to a different value depending on the constant (i.e.
8601 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8602 MVN). If the immediate looks like a repeated pattern then also
8603 try smaller element sizes. */
8604
8605 static int
8606 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8607 unsigned *immbits, int *op, int size,
8608 enum neon_el_type type)
8609 {
8610 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8611 float. */
8612 if (type == NT_float && !float_p)
8613 return FAIL;
8614
8615 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8616 {
8617 if (size != 32 || *op == 1)
8618 return FAIL;
8619 *immbits = neon_qfloat_bits (immlo);
8620 return 0xf;
8621 }
8622
8623 if (size == 64)
8624 {
8625 if (neon_bits_same_in_bytes (immhi)
8626 && neon_bits_same_in_bytes (immlo))
8627 {
8628 if (*op == 1)
8629 return FAIL;
8630 *immbits = (neon_squash_bits (immhi) << 4)
8631 | neon_squash_bits (immlo);
8632 *op = 1;
8633 return 0xe;
8634 }
8635
8636 if (immhi != immlo)
8637 return FAIL;
8638 }
8639
8640 if (size >= 32)
8641 {
8642 if (immlo == (immlo & 0x000000ff))
8643 {
8644 *immbits = immlo;
8645 return 0x0;
8646 }
8647 else if (immlo == (immlo & 0x0000ff00))
8648 {
8649 *immbits = immlo >> 8;
8650 return 0x2;
8651 }
8652 else if (immlo == (immlo & 0x00ff0000))
8653 {
8654 *immbits = immlo >> 16;
8655 return 0x4;
8656 }
8657 else if (immlo == (immlo & 0xff000000))
8658 {
8659 *immbits = immlo >> 24;
8660 return 0x6;
8661 }
8662 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8663 {
8664 *immbits = (immlo >> 8) & 0xff;
8665 return 0xc;
8666 }
8667 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8668 {
8669 *immbits = (immlo >> 16) & 0xff;
8670 return 0xd;
8671 }
8672
8673 if ((immlo & 0xffff) != (immlo >> 16))
8674 return FAIL;
8675 immlo &= 0xffff;
8676 }
8677
8678 if (size >= 16)
8679 {
8680 if (immlo == (immlo & 0x000000ff))
8681 {
8682 *immbits = immlo;
8683 return 0x8;
8684 }
8685 else if (immlo == (immlo & 0x0000ff00))
8686 {
8687 *immbits = immlo >> 8;
8688 return 0xa;
8689 }
8690
8691 if ((immlo & 0xff) != (immlo >> 8))
8692 return FAIL;
8693 immlo &= 0xff;
8694 }
8695
8696 if (immlo == (immlo & 0x000000ff))
8697 {
8698 /* Don't allow MVN with 8-bit immediate. */
8699 if (*op == 1)
8700 return FAIL;
8701 *immbits = immlo;
8702 return 0xe;
8703 }
8704
8705 return FAIL;
8706 }
8707
8708 #if defined BFD_HOST_64_BIT
8709 /* Returns TRUE if double precision value V may be cast
8710 to single precision without loss of accuracy. */
8711
8712 static bfd_boolean
8713 is_double_a_single (bfd_int64_t v)
8714 {
8715 int exp = (int)((v >> 52) & 0x7FF);
8716 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8717
8718 return (exp == 0 || exp == 0x7FF
8719 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8720 && (mantissa & 0x1FFFFFFFl) == 0;
8721 }
8722
8723 /* Returns a double precision value casted to single precision
8724 (ignoring the least significant bits in exponent and mantissa). */
8725
8726 static int
8727 double_to_single (bfd_int64_t v)
8728 {
8729 int sign = (int) ((v >> 63) & 1l);
8730 int exp = (int) ((v >> 52) & 0x7FF);
8731 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8732
8733 if (exp == 0x7FF)
8734 exp = 0xFF;
8735 else
8736 {
8737 exp = exp - 1023 + 127;
8738 if (exp >= 0xFF)
8739 {
8740 /* Infinity. */
8741 exp = 0x7F;
8742 mantissa = 0;
8743 }
8744 else if (exp < 0)
8745 {
8746 /* No denormalized numbers. */
8747 exp = 0;
8748 mantissa = 0;
8749 }
8750 }
8751 mantissa >>= 29;
8752 return (sign << 31) | (exp << 23) | mantissa;
8753 }
8754 #endif /* BFD_HOST_64_BIT */
8755
8756 enum lit_type
8757 {
8758 CONST_THUMB,
8759 CONST_ARM,
8760 CONST_VEC
8761 };
8762
8763 static void do_vfp_nsyn_opcode (const char *);
8764
8765 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8766 Determine whether it can be performed with a move instruction; if
8767 it can, convert inst.instruction to that move instruction and
8768 return TRUE; if it can't, convert inst.instruction to a literal-pool
8769 load and return FALSE. If this is not a valid thing to do in the
8770 current context, set inst.error and return TRUE.
8771
8772 inst.operands[i] describes the destination register. */
8773
8774 static bfd_boolean
8775 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8776 {
8777 unsigned long tbit;
8778 bfd_boolean thumb_p = (t == CONST_THUMB);
8779 bfd_boolean arm_p = (t == CONST_ARM);
8780
8781 if (thumb_p)
8782 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8783 else
8784 tbit = LOAD_BIT;
8785
8786 if ((inst.instruction & tbit) == 0)
8787 {
8788 inst.error = _("invalid pseudo operation");
8789 return TRUE;
8790 }
8791
8792 if (inst.relocs[0].exp.X_op != O_constant
8793 && inst.relocs[0].exp.X_op != O_symbol
8794 && inst.relocs[0].exp.X_op != O_big)
8795 {
8796 inst.error = _("constant expression expected");
8797 return TRUE;
8798 }
8799
8800 if (inst.relocs[0].exp.X_op == O_constant
8801 || inst.relocs[0].exp.X_op == O_big)
8802 {
8803 #if defined BFD_HOST_64_BIT
8804 bfd_int64_t v;
8805 #else
8806 offsetT v;
8807 #endif
8808 if (inst.relocs[0].exp.X_op == O_big)
8809 {
8810 LITTLENUM_TYPE w[X_PRECISION];
8811 LITTLENUM_TYPE * l;
8812
8813 if (inst.relocs[0].exp.X_add_number == -1)
8814 {
8815 gen_to_words (w, X_PRECISION, E_PRECISION);
8816 l = w;
8817 /* FIXME: Should we check words w[2..5] ? */
8818 }
8819 else
8820 l = generic_bignum;
8821
8822 #if defined BFD_HOST_64_BIT
8823 v =
8824 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8825 << LITTLENUM_NUMBER_OF_BITS)
8826 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8827 << LITTLENUM_NUMBER_OF_BITS)
8828 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8829 << LITTLENUM_NUMBER_OF_BITS)
8830 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8831 #else
8832 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8833 | (l[0] & LITTLENUM_MASK);
8834 #endif
8835 }
8836 else
8837 v = inst.relocs[0].exp.X_add_number;
8838
8839 if (!inst.operands[i].issingle)
8840 {
8841 if (thumb_p)
8842 {
8843 /* LDR should not use lead in a flag-setting instruction being
8844 chosen so we do not check whether movs can be used. */
8845
8846 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8847 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8848 && inst.operands[i].reg != 13
8849 && inst.operands[i].reg != 15)
8850 {
8851 /* Check if on thumb2 it can be done with a mov.w, mvn or
8852 movw instruction. */
8853 unsigned int newimm;
8854 bfd_boolean isNegated;
8855
8856 newimm = encode_thumb32_immediate (v);
8857 if (newimm != (unsigned int) FAIL)
8858 isNegated = FALSE;
8859 else
8860 {
8861 newimm = encode_thumb32_immediate (~v);
8862 if (newimm != (unsigned int) FAIL)
8863 isNegated = TRUE;
8864 }
8865
8866 /* The number can be loaded with a mov.w or mvn
8867 instruction. */
8868 if (newimm != (unsigned int) FAIL
8869 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8870 {
8871 inst.instruction = (0xf04f0000 /* MOV.W. */
8872 | (inst.operands[i].reg << 8));
8873 /* Change to MOVN. */
8874 inst.instruction |= (isNegated ? 0x200000 : 0);
8875 inst.instruction |= (newimm & 0x800) << 15;
8876 inst.instruction |= (newimm & 0x700) << 4;
8877 inst.instruction |= (newimm & 0x0ff);
8878 return TRUE;
8879 }
8880 /* The number can be loaded with a movw instruction. */
8881 else if ((v & ~0xFFFF) == 0
8882 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8883 {
8884 int imm = v & 0xFFFF;
8885
8886 inst.instruction = 0xf2400000; /* MOVW. */
8887 inst.instruction |= (inst.operands[i].reg << 8);
8888 inst.instruction |= (imm & 0xf000) << 4;
8889 inst.instruction |= (imm & 0x0800) << 15;
8890 inst.instruction |= (imm & 0x0700) << 4;
8891 inst.instruction |= (imm & 0x00ff);
8892 /* In case this replacement is being done on Armv8-M
8893 Baseline we need to make sure to disable the
8894 instruction size check, as otherwise GAS will reject
8895 the use of this T32 instruction. */
8896 inst.size_req = 0;
8897 return TRUE;
8898 }
8899 }
8900 }
8901 else if (arm_p)
8902 {
8903 int value = encode_arm_immediate (v);
8904
8905 if (value != FAIL)
8906 {
8907 /* This can be done with a mov instruction. */
8908 inst.instruction &= LITERAL_MASK;
8909 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8910 inst.instruction |= value & 0xfff;
8911 return TRUE;
8912 }
8913
8914 value = encode_arm_immediate (~ v);
8915 if (value != FAIL)
8916 {
8917 /* This can be done with a mvn instruction. */
8918 inst.instruction &= LITERAL_MASK;
8919 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8920 inst.instruction |= value & 0xfff;
8921 return TRUE;
8922 }
8923 }
8924 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8925 {
8926 int op = 0;
8927 unsigned immbits = 0;
8928 unsigned immlo = inst.operands[1].imm;
8929 unsigned immhi = inst.operands[1].regisimm
8930 ? inst.operands[1].reg
8931 : inst.relocs[0].exp.X_unsigned
8932 ? 0
8933 : ((bfd_int64_t)((int) immlo)) >> 32;
8934 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8935 &op, 64, NT_invtype);
8936
8937 if (cmode == FAIL)
8938 {
8939 neon_invert_size (&immlo, &immhi, 64);
8940 op = !op;
8941 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8942 &op, 64, NT_invtype);
8943 }
8944
8945 if (cmode != FAIL)
8946 {
8947 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8948 | (1 << 23)
8949 | (cmode << 8)
8950 | (op << 5)
8951 | (1 << 4);
8952
8953 /* Fill other bits in vmov encoding for both thumb and arm. */
8954 if (thumb_mode)
8955 inst.instruction |= (0x7U << 29) | (0xF << 24);
8956 else
8957 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8958 neon_write_immbits (immbits);
8959 return TRUE;
8960 }
8961 }
8962 }
8963
8964 if (t == CONST_VEC)
8965 {
8966 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8967 if (inst.operands[i].issingle
8968 && is_quarter_float (inst.operands[1].imm)
8969 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8970 {
8971 inst.operands[1].imm =
8972 neon_qfloat_bits (v);
8973 do_vfp_nsyn_opcode ("fconsts");
8974 return TRUE;
8975 }
8976
8977 /* If our host does not support a 64-bit type then we cannot perform
8978 the following optimization. This mean that there will be a
8979 discrepancy between the output produced by an assembler built for
8980 a 32-bit-only host and the output produced from a 64-bit host, but
8981 this cannot be helped. */
8982 #if defined BFD_HOST_64_BIT
8983 else if (!inst.operands[1].issingle
8984 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8985 {
8986 if (is_double_a_single (v)
8987 && is_quarter_float (double_to_single (v)))
8988 {
8989 inst.operands[1].imm =
8990 neon_qfloat_bits (double_to_single (v));
8991 do_vfp_nsyn_opcode ("fconstd");
8992 return TRUE;
8993 }
8994 }
8995 #endif
8996 }
8997 }
8998
8999 if (add_to_lit_pool ((!inst.operands[i].isvec
9000 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
9001 return TRUE;
9002
9003 inst.operands[1].reg = REG_PC;
9004 inst.operands[1].isreg = 1;
9005 inst.operands[1].preind = 1;
9006 inst.relocs[0].pc_rel = 1;
9007 inst.relocs[0].type = (thumb_p
9008 ? BFD_RELOC_ARM_THUMB_OFFSET
9009 : (mode_3
9010 ? BFD_RELOC_ARM_HWLITERAL
9011 : BFD_RELOC_ARM_LITERAL));
9012 return FALSE;
9013 }
9014
9015 /* inst.operands[i] was set up by parse_address. Encode it into an
9016 ARM-format instruction. Reject all forms which cannot be encoded
9017 into a coprocessor load/store instruction. If wb_ok is false,
9018 reject use of writeback; if unind_ok is false, reject use of
9019 unindexed addressing. If reloc_override is not 0, use it instead
9020 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9021 (in which case it is preserved). */
9022
9023 static int
9024 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9025 {
9026 if (!inst.operands[i].isreg)
9027 {
9028 /* PR 18256 */
9029 if (! inst.operands[0].isvec)
9030 {
9031 inst.error = _("invalid co-processor operand");
9032 return FAIL;
9033 }
9034 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
9035 return SUCCESS;
9036 }
9037
9038 inst.instruction |= inst.operands[i].reg << 16;
9039
9040 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9041
9042 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9043 {
9044 gas_assert (!inst.operands[i].writeback);
9045 if (!unind_ok)
9046 {
9047 inst.error = _("instruction does not support unindexed addressing");
9048 return FAIL;
9049 }
9050 inst.instruction |= inst.operands[i].imm;
9051 inst.instruction |= INDEX_UP;
9052 return SUCCESS;
9053 }
9054
9055 if (inst.operands[i].preind)
9056 inst.instruction |= PRE_INDEX;
9057
9058 if (inst.operands[i].writeback)
9059 {
9060 if (inst.operands[i].reg == REG_PC)
9061 {
9062 inst.error = _("pc may not be used with write-back");
9063 return FAIL;
9064 }
9065 if (!wb_ok)
9066 {
9067 inst.error = _("instruction does not support writeback");
9068 return FAIL;
9069 }
9070 inst.instruction |= WRITE_BACK;
9071 }
9072
9073 if (reloc_override)
9074 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9075 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9076 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9077 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9078 {
9079 if (thumb_mode)
9080 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9081 else
9082 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9083 }
9084
9085 /* Prefer + for zero encoded value. */
9086 if (!inst.operands[i].negative)
9087 inst.instruction |= INDEX_UP;
9088
9089 return SUCCESS;
9090 }
9091
9092 /* Functions for instruction encoding, sorted by sub-architecture.
9093 First some generics; their names are taken from the conventional
9094 bit positions for register arguments in ARM format instructions. */
9095
9096 static void
9097 do_noargs (void)
9098 {
9099 }
9100
9101 static void
9102 do_rd (void)
9103 {
9104 inst.instruction |= inst.operands[0].reg << 12;
9105 }
9106
9107 static void
9108 do_rn (void)
9109 {
9110 inst.instruction |= inst.operands[0].reg << 16;
9111 }
9112
9113 static void
9114 do_rd_rm (void)
9115 {
9116 inst.instruction |= inst.operands[0].reg << 12;
9117 inst.instruction |= inst.operands[1].reg;
9118 }
9119
9120 static void
9121 do_rm_rn (void)
9122 {
9123 inst.instruction |= inst.operands[0].reg;
9124 inst.instruction |= inst.operands[1].reg << 16;
9125 }
9126
9127 static void
9128 do_rd_rn (void)
9129 {
9130 inst.instruction |= inst.operands[0].reg << 12;
9131 inst.instruction |= inst.operands[1].reg << 16;
9132 }
9133
9134 static void
9135 do_rn_rd (void)
9136 {
9137 inst.instruction |= inst.operands[0].reg << 16;
9138 inst.instruction |= inst.operands[1].reg << 12;
9139 }
9140
9141 static void
9142 do_tt (void)
9143 {
9144 inst.instruction |= inst.operands[0].reg << 8;
9145 inst.instruction |= inst.operands[1].reg << 16;
9146 }
9147
9148 static bfd_boolean
9149 check_obsolete (const arm_feature_set *feature, const char *msg)
9150 {
9151 if (ARM_CPU_IS_ANY (cpu_variant))
9152 {
9153 as_tsktsk ("%s", msg);
9154 return TRUE;
9155 }
9156 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9157 {
9158 as_bad ("%s", msg);
9159 return TRUE;
9160 }
9161
9162 return FALSE;
9163 }
9164
9165 static void
9166 do_rd_rm_rn (void)
9167 {
9168 unsigned Rn = inst.operands[2].reg;
9169 /* Enforce restrictions on SWP instruction. */
9170 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9171 {
9172 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9173 _("Rn must not overlap other operands"));
9174
9175 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9176 */
9177 if (!check_obsolete (&arm_ext_v8,
9178 _("swp{b} use is obsoleted for ARMv8 and later"))
9179 && warn_on_deprecated
9180 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9181 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9182 }
9183
9184 inst.instruction |= inst.operands[0].reg << 12;
9185 inst.instruction |= inst.operands[1].reg;
9186 inst.instruction |= Rn << 16;
9187 }
9188
9189 static void
9190 do_rd_rn_rm (void)
9191 {
9192 inst.instruction |= inst.operands[0].reg << 12;
9193 inst.instruction |= inst.operands[1].reg << 16;
9194 inst.instruction |= inst.operands[2].reg;
9195 }
9196
9197 static void
9198 do_rm_rd_rn (void)
9199 {
9200 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9201 constraint (((inst.relocs[0].exp.X_op != O_constant
9202 && inst.relocs[0].exp.X_op != O_illegal)
9203 || inst.relocs[0].exp.X_add_number != 0),
9204 BAD_ADDR_MODE);
9205 inst.instruction |= inst.operands[0].reg;
9206 inst.instruction |= inst.operands[1].reg << 12;
9207 inst.instruction |= inst.operands[2].reg << 16;
9208 }
9209
9210 static void
9211 do_imm0 (void)
9212 {
9213 inst.instruction |= inst.operands[0].imm;
9214 }
9215
9216 static void
9217 do_rd_cpaddr (void)
9218 {
9219 inst.instruction |= inst.operands[0].reg << 12;
9220 encode_arm_cp_address (1, TRUE, TRUE, 0);
9221 }
9222
9223 /* ARM instructions, in alphabetical order by function name (except
9224 that wrapper functions appear immediately after the function they
9225 wrap). */
9226
9227 /* This is a pseudo-op of the form "adr rd, label" to be converted
9228 into a relative address of the form "add rd, pc, #label-.-8". */
9229
9230 static void
9231 do_adr (void)
9232 {
9233 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9234
9235 /* Frag hacking will turn this into a sub instruction if the offset turns
9236 out to be negative. */
9237 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9238 inst.relocs[0].pc_rel = 1;
9239 inst.relocs[0].exp.X_add_number -= 8;
9240
9241 if (support_interwork
9242 && inst.relocs[0].exp.X_op == O_symbol
9243 && inst.relocs[0].exp.X_add_symbol != NULL
9244 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9245 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9246 inst.relocs[0].exp.X_add_number |= 1;
9247 }
9248
9249 /* This is a pseudo-op of the form "adrl rd, label" to be converted
9250 into a relative address of the form:
9251 add rd, pc, #low(label-.-8)"
9252 add rd, rd, #high(label-.-8)" */
9253
9254 static void
9255 do_adrl (void)
9256 {
9257 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9258
9259 /* Frag hacking will turn this into a sub instruction if the offset turns
9260 out to be negative. */
9261 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9262 inst.relocs[0].pc_rel = 1;
9263 inst.size = INSN_SIZE * 2;
9264 inst.relocs[0].exp.X_add_number -= 8;
9265
9266 if (support_interwork
9267 && inst.relocs[0].exp.X_op == O_symbol
9268 && inst.relocs[0].exp.X_add_symbol != NULL
9269 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9270 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9271 inst.relocs[0].exp.X_add_number |= 1;
9272 }
9273
9274 static void
9275 do_arit (void)
9276 {
9277 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9278 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9279 THUMB1_RELOC_ONLY);
9280 if (!inst.operands[1].present)
9281 inst.operands[1].reg = inst.operands[0].reg;
9282 inst.instruction |= inst.operands[0].reg << 12;
9283 inst.instruction |= inst.operands[1].reg << 16;
9284 encode_arm_shifter_operand (2);
9285 }
9286
9287 static void
9288 do_barrier (void)
9289 {
9290 if (inst.operands[0].present)
9291 inst.instruction |= inst.operands[0].imm;
9292 else
9293 inst.instruction |= 0xf;
9294 }
9295
9296 static void
9297 do_bfc (void)
9298 {
9299 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9300 constraint (msb > 32, _("bit-field extends past end of register"));
9301 /* The instruction encoding stores the LSB and MSB,
9302 not the LSB and width. */
9303 inst.instruction |= inst.operands[0].reg << 12;
9304 inst.instruction |= inst.operands[1].imm << 7;
9305 inst.instruction |= (msb - 1) << 16;
9306 }
9307
9308 static void
9309 do_bfi (void)
9310 {
9311 unsigned int msb;
9312
9313 /* #0 in second position is alternative syntax for bfc, which is
9314 the same instruction but with REG_PC in the Rm field. */
9315 if (!inst.operands[1].isreg)
9316 inst.operands[1].reg = REG_PC;
9317
9318 msb = inst.operands[2].imm + inst.operands[3].imm;
9319 constraint (msb > 32, _("bit-field extends past end of register"));
9320 /* The instruction encoding stores the LSB and MSB,
9321 not the LSB and width. */
9322 inst.instruction |= inst.operands[0].reg << 12;
9323 inst.instruction |= inst.operands[1].reg;
9324 inst.instruction |= inst.operands[2].imm << 7;
9325 inst.instruction |= (msb - 1) << 16;
9326 }
9327
9328 static void
9329 do_bfx (void)
9330 {
9331 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9332 _("bit-field extends past end of register"));
9333 inst.instruction |= inst.operands[0].reg << 12;
9334 inst.instruction |= inst.operands[1].reg;
9335 inst.instruction |= inst.operands[2].imm << 7;
9336 inst.instruction |= (inst.operands[3].imm - 1) << 16;
9337 }
9338
9339 /* ARM V5 breakpoint instruction (argument parse)
9340 BKPT <16 bit unsigned immediate>
9341 Instruction is not conditional.
9342 The bit pattern given in insns[] has the COND_ALWAYS condition,
9343 and it is an error if the caller tried to override that. */
9344
9345 static void
9346 do_bkpt (void)
9347 {
9348 /* Top 12 of 16 bits to bits 19:8. */
9349 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9350
9351 /* Bottom 4 of 16 bits to bits 3:0. */
9352 inst.instruction |= inst.operands[0].imm & 0xf;
9353 }
9354
9355 static void
9356 encode_branch (int default_reloc)
9357 {
9358 if (inst.operands[0].hasreloc)
9359 {
9360 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9361 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9362 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9363 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9364 ? BFD_RELOC_ARM_PLT32
9365 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9366 }
9367 else
9368 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9369 inst.relocs[0].pc_rel = 1;
9370 }
9371
9372 static void
9373 do_branch (void)
9374 {
9375 #ifdef OBJ_ELF
9376 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9377 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9378 else
9379 #endif
9380 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9381 }
9382
9383 static void
9384 do_bl (void)
9385 {
9386 #ifdef OBJ_ELF
9387 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9388 {
9389 if (inst.cond == COND_ALWAYS)
9390 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9391 else
9392 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9393 }
9394 else
9395 #endif
9396 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9397 }
9398
9399 /* ARM V5 branch-link-exchange instruction (argument parse)
9400 BLX <target_addr> ie BLX(1)
9401 BLX{<condition>} <Rm> ie BLX(2)
9402 Unfortunately, there are two different opcodes for this mnemonic.
9403 So, the insns[].value is not used, and the code here zaps values
9404 into inst.instruction.
9405 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9406
9407 static void
9408 do_blx (void)
9409 {
9410 if (inst.operands[0].isreg)
9411 {
9412 /* Arg is a register; the opcode provided by insns[] is correct.
9413 It is not illegal to do "blx pc", just useless. */
9414 if (inst.operands[0].reg == REG_PC)
9415 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9416
9417 inst.instruction |= inst.operands[0].reg;
9418 }
9419 else
9420 {
9421 /* Arg is an address; this instruction cannot be executed
9422 conditionally, and the opcode must be adjusted.
9423 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9424 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9425 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9426 inst.instruction = 0xfa000000;
9427 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9428 }
9429 }
9430
9431 static void
9432 do_bx (void)
9433 {
9434 bfd_boolean want_reloc;
9435
9436 if (inst.operands[0].reg == REG_PC)
9437 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9438
9439 inst.instruction |= inst.operands[0].reg;
9440 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9441 it is for ARMv4t or earlier. */
9442 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9443 if (!ARM_FEATURE_ZERO (selected_object_arch)
9444 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9445 want_reloc = TRUE;
9446
9447 #ifdef OBJ_ELF
9448 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9449 #endif
9450 want_reloc = FALSE;
9451
9452 if (want_reloc)
9453 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9454 }
9455
9456
9457 /* ARM v5TEJ. Jump to Jazelle code. */
9458
9459 static void
9460 do_bxj (void)
9461 {
9462 if (inst.operands[0].reg == REG_PC)
9463 as_tsktsk (_("use of r15 in bxj is not really useful"));
9464
9465 inst.instruction |= inst.operands[0].reg;
9466 }
9467
9468 /* Co-processor data operation:
9469 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9470 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9471 static void
9472 do_cdp (void)
9473 {
9474 inst.instruction |= inst.operands[0].reg << 8;
9475 inst.instruction |= inst.operands[1].imm << 20;
9476 inst.instruction |= inst.operands[2].reg << 12;
9477 inst.instruction |= inst.operands[3].reg << 16;
9478 inst.instruction |= inst.operands[4].reg;
9479 inst.instruction |= inst.operands[5].imm << 5;
9480 }
9481
9482 static void
9483 do_cmp (void)
9484 {
9485 inst.instruction |= inst.operands[0].reg << 16;
9486 encode_arm_shifter_operand (1);
9487 }
9488
9489 /* Transfer between coprocessor and ARM registers.
9490 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9491 MRC2
9492 MCR{cond}
9493 MCR2
9494
9495 No special properties. */
9496
9497 struct deprecated_coproc_regs_s
9498 {
9499 unsigned cp;
9500 int opc1;
9501 unsigned crn;
9502 unsigned crm;
9503 int opc2;
9504 arm_feature_set deprecated;
9505 arm_feature_set obsoleted;
9506 const char *dep_msg;
9507 const char *obs_msg;
9508 };
9509
9510 #define DEPR_ACCESS_V8 \
9511 N_("This coprocessor register access is deprecated in ARMv8")
9512
9513 /* Table of all deprecated coprocessor registers. */
9514 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9515 {
9516 {15, 0, 7, 10, 5, /* CP15DMB. */
9517 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9518 DEPR_ACCESS_V8, NULL},
9519 {15, 0, 7, 10, 4, /* CP15DSB. */
9520 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9521 DEPR_ACCESS_V8, NULL},
9522 {15, 0, 7, 5, 4, /* CP15ISB. */
9523 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9524 DEPR_ACCESS_V8, NULL},
9525 {14, 6, 1, 0, 0, /* TEEHBR. */
9526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9527 DEPR_ACCESS_V8, NULL},
9528 {14, 6, 0, 0, 0, /* TEECR. */
9529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9530 DEPR_ACCESS_V8, NULL},
9531 };
9532
9533 #undef DEPR_ACCESS_V8
9534
9535 static const size_t deprecated_coproc_reg_count =
9536 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9537
9538 static void
9539 do_co_reg (void)
9540 {
9541 unsigned Rd;
9542 size_t i;
9543
9544 Rd = inst.operands[2].reg;
9545 if (thumb_mode)
9546 {
9547 if (inst.instruction == 0xee000010
9548 || inst.instruction == 0xfe000010)
9549 /* MCR, MCR2 */
9550 reject_bad_reg (Rd);
9551 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9552 /* MRC, MRC2 */
9553 constraint (Rd == REG_SP, BAD_SP);
9554 }
9555 else
9556 {
9557 /* MCR */
9558 if (inst.instruction == 0xe000010)
9559 constraint (Rd == REG_PC, BAD_PC);
9560 }
9561
9562 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9563 {
9564 const struct deprecated_coproc_regs_s *r =
9565 deprecated_coproc_regs + i;
9566
9567 if (inst.operands[0].reg == r->cp
9568 && inst.operands[1].imm == r->opc1
9569 && inst.operands[3].reg == r->crn
9570 && inst.operands[4].reg == r->crm
9571 && inst.operands[5].imm == r->opc2)
9572 {
9573 if (! ARM_CPU_IS_ANY (cpu_variant)
9574 && warn_on_deprecated
9575 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9576 as_tsktsk ("%s", r->dep_msg);
9577 }
9578 }
9579
9580 inst.instruction |= inst.operands[0].reg << 8;
9581 inst.instruction |= inst.operands[1].imm << 21;
9582 inst.instruction |= Rd << 12;
9583 inst.instruction |= inst.operands[3].reg << 16;
9584 inst.instruction |= inst.operands[4].reg;
9585 inst.instruction |= inst.operands[5].imm << 5;
9586 }
9587
9588 /* Transfer between coprocessor register and pair of ARM registers.
9589 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9590 MCRR2
9591 MRRC{cond}
9592 MRRC2
9593
9594 Two XScale instructions are special cases of these:
9595
9596 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9597 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9598
9599 Result unpredictable if Rd or Rn is R15. */
9600
9601 static void
9602 do_co_reg2c (void)
9603 {
9604 unsigned Rd, Rn;
9605
9606 Rd = inst.operands[2].reg;
9607 Rn = inst.operands[3].reg;
9608
9609 if (thumb_mode)
9610 {
9611 reject_bad_reg (Rd);
9612 reject_bad_reg (Rn);
9613 }
9614 else
9615 {
9616 constraint (Rd == REG_PC, BAD_PC);
9617 constraint (Rn == REG_PC, BAD_PC);
9618 }
9619
9620 /* Only check the MRRC{2} variants. */
9621 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9622 {
9623 /* If Rd == Rn, error that the operation is
9624 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9625 constraint (Rd == Rn, BAD_OVERLAP);
9626 }
9627
9628 inst.instruction |= inst.operands[0].reg << 8;
9629 inst.instruction |= inst.operands[1].imm << 4;
9630 inst.instruction |= Rd << 12;
9631 inst.instruction |= Rn << 16;
9632 inst.instruction |= inst.operands[4].reg;
9633 }
9634
9635 static void
9636 do_cpsi (void)
9637 {
9638 inst.instruction |= inst.operands[0].imm << 6;
9639 if (inst.operands[1].present)
9640 {
9641 inst.instruction |= CPSI_MMOD;
9642 inst.instruction |= inst.operands[1].imm;
9643 }
9644 }
9645
9646 static void
9647 do_dbg (void)
9648 {
9649 inst.instruction |= inst.operands[0].imm;
9650 }
9651
9652 static void
9653 do_div (void)
9654 {
9655 unsigned Rd, Rn, Rm;
9656
9657 Rd = inst.operands[0].reg;
9658 Rn = (inst.operands[1].present
9659 ? inst.operands[1].reg : Rd);
9660 Rm = inst.operands[2].reg;
9661
9662 constraint ((Rd == REG_PC), BAD_PC);
9663 constraint ((Rn == REG_PC), BAD_PC);
9664 constraint ((Rm == REG_PC), BAD_PC);
9665
9666 inst.instruction |= Rd << 16;
9667 inst.instruction |= Rn << 0;
9668 inst.instruction |= Rm << 8;
9669 }
9670
9671 static void
9672 do_it (void)
9673 {
9674 /* There is no IT instruction in ARM mode. We
9675 process it to do the validation as if in
9676 thumb mode, just in case the code gets
9677 assembled for thumb using the unified syntax. */
9678
9679 inst.size = 0;
9680 if (unified_syntax)
9681 {
9682 set_pred_insn_type (IT_INSN);
9683 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9684 now_pred.cc = inst.operands[0].imm;
9685 }
9686 }
9687
9688 /* If there is only one register in the register list,
9689 then return its register number. Otherwise return -1. */
9690 static int
9691 only_one_reg_in_list (int range)
9692 {
9693 int i = ffs (range) - 1;
9694 return (i > 15 || range != (1 << i)) ? -1 : i;
9695 }
9696
9697 static void
9698 encode_ldmstm(int from_push_pop_mnem)
9699 {
9700 int base_reg = inst.operands[0].reg;
9701 int range = inst.operands[1].imm;
9702 int one_reg;
9703
9704 inst.instruction |= base_reg << 16;
9705 inst.instruction |= range;
9706
9707 if (inst.operands[1].writeback)
9708 inst.instruction |= LDM_TYPE_2_OR_3;
9709
9710 if (inst.operands[0].writeback)
9711 {
9712 inst.instruction |= WRITE_BACK;
9713 /* Check for unpredictable uses of writeback. */
9714 if (inst.instruction & LOAD_BIT)
9715 {
9716 /* Not allowed in LDM type 2. */
9717 if ((inst.instruction & LDM_TYPE_2_OR_3)
9718 && ((range & (1 << REG_PC)) == 0))
9719 as_warn (_("writeback of base register is UNPREDICTABLE"));
9720 /* Only allowed if base reg not in list for other types. */
9721 else if (range & (1 << base_reg))
9722 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9723 }
9724 else /* STM. */
9725 {
9726 /* Not allowed for type 2. */
9727 if (inst.instruction & LDM_TYPE_2_OR_3)
9728 as_warn (_("writeback of base register is UNPREDICTABLE"));
9729 /* Only allowed if base reg not in list, or first in list. */
9730 else if ((range & (1 << base_reg))
9731 && (range & ((1 << base_reg) - 1)))
9732 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9733 }
9734 }
9735
9736 /* If PUSH/POP has only one register, then use the A2 encoding. */
9737 one_reg = only_one_reg_in_list (range);
9738 if (from_push_pop_mnem && one_reg >= 0)
9739 {
9740 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9741
9742 if (is_push && one_reg == 13 /* SP */)
9743 /* PR 22483: The A2 encoding cannot be used when
9744 pushing the stack pointer as this is UNPREDICTABLE. */
9745 return;
9746
9747 inst.instruction &= A_COND_MASK;
9748 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9749 inst.instruction |= one_reg << 12;
9750 }
9751 }
9752
9753 static void
9754 do_ldmstm (void)
9755 {
9756 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9757 }
9758
9759 /* ARMv5TE load-consecutive (argument parse)
9760 Mode is like LDRH.
9761
9762 LDRccD R, mode
9763 STRccD R, mode. */
9764
9765 static void
9766 do_ldrd (void)
9767 {
9768 constraint (inst.operands[0].reg % 2 != 0,
9769 _("first transfer register must be even"));
9770 constraint (inst.operands[1].present
9771 && inst.operands[1].reg != inst.operands[0].reg + 1,
9772 _("can only transfer two consecutive registers"));
9773 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9774 constraint (!inst.operands[2].isreg, _("'[' expected"));
9775
9776 if (!inst.operands[1].present)
9777 inst.operands[1].reg = inst.operands[0].reg + 1;
9778
9779 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9780 register and the first register written; we have to diagnose
9781 overlap between the base and the second register written here. */
9782
9783 if (inst.operands[2].reg == inst.operands[1].reg
9784 && (inst.operands[2].writeback || inst.operands[2].postind))
9785 as_warn (_("base register written back, and overlaps "
9786 "second transfer register"));
9787
9788 if (!(inst.instruction & V4_STR_BIT))
9789 {
9790 /* For an index-register load, the index register must not overlap the
9791 destination (even if not write-back). */
9792 if (inst.operands[2].immisreg
9793 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9794 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9795 as_warn (_("index register overlaps transfer register"));
9796 }
9797 inst.instruction |= inst.operands[0].reg << 12;
9798 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9799 }
9800
9801 static void
9802 do_ldrex (void)
9803 {
9804 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9805 || inst.operands[1].postind || inst.operands[1].writeback
9806 || inst.operands[1].immisreg || inst.operands[1].shifted
9807 || inst.operands[1].negative
9808 /* This can arise if the programmer has written
9809 strex rN, rM, foo
9810 or if they have mistakenly used a register name as the last
9811 operand, eg:
9812 strex rN, rM, rX
9813 It is very difficult to distinguish between these two cases
9814 because "rX" might actually be a label. ie the register
9815 name has been occluded by a symbol of the same name. So we
9816 just generate a general 'bad addressing mode' type error
9817 message and leave it up to the programmer to discover the
9818 true cause and fix their mistake. */
9819 || (inst.operands[1].reg == REG_PC),
9820 BAD_ADDR_MODE);
9821
9822 constraint (inst.relocs[0].exp.X_op != O_constant
9823 || inst.relocs[0].exp.X_add_number != 0,
9824 _("offset must be zero in ARM encoding"));
9825
9826 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9827
9828 inst.instruction |= inst.operands[0].reg << 12;
9829 inst.instruction |= inst.operands[1].reg << 16;
9830 inst.relocs[0].type = BFD_RELOC_UNUSED;
9831 }
9832
9833 static void
9834 do_ldrexd (void)
9835 {
9836 constraint (inst.operands[0].reg % 2 != 0,
9837 _("even register required"));
9838 constraint (inst.operands[1].present
9839 && inst.operands[1].reg != inst.operands[0].reg + 1,
9840 _("can only load two consecutive registers"));
9841 /* If op 1 were present and equal to PC, this function wouldn't
9842 have been called in the first place. */
9843 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9844
9845 inst.instruction |= inst.operands[0].reg << 12;
9846 inst.instruction |= inst.operands[2].reg << 16;
9847 }
9848
9849 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9850 which is not a multiple of four is UNPREDICTABLE. */
9851 static void
9852 check_ldr_r15_aligned (void)
9853 {
9854 constraint (!(inst.operands[1].immisreg)
9855 && (inst.operands[0].reg == REG_PC
9856 && inst.operands[1].reg == REG_PC
9857 && (inst.relocs[0].exp.X_add_number & 0x3)),
9858 _("ldr to register 15 must be 4-byte aligned"));
9859 }
9860
9861 static void
9862 do_ldst (void)
9863 {
9864 inst.instruction |= inst.operands[0].reg << 12;
9865 if (!inst.operands[1].isreg)
9866 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9867 return;
9868 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9869 check_ldr_r15_aligned ();
9870 }
9871
9872 static void
9873 do_ldstt (void)
9874 {
9875 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9876 reject [Rn,...]. */
9877 if (inst.operands[1].preind)
9878 {
9879 constraint (inst.relocs[0].exp.X_op != O_constant
9880 || inst.relocs[0].exp.X_add_number != 0,
9881 _("this instruction requires a post-indexed address"));
9882
9883 inst.operands[1].preind = 0;
9884 inst.operands[1].postind = 1;
9885 inst.operands[1].writeback = 1;
9886 }
9887 inst.instruction |= inst.operands[0].reg << 12;
9888 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9889 }
9890
9891 /* Halfword and signed-byte load/store operations. */
9892
9893 static void
9894 do_ldstv4 (void)
9895 {
9896 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9897 inst.instruction |= inst.operands[0].reg << 12;
9898 if (!inst.operands[1].isreg)
9899 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9900 return;
9901 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9902 }
9903
9904 static void
9905 do_ldsttv4 (void)
9906 {
9907 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9908 reject [Rn,...]. */
9909 if (inst.operands[1].preind)
9910 {
9911 constraint (inst.relocs[0].exp.X_op != O_constant
9912 || inst.relocs[0].exp.X_add_number != 0,
9913 _("this instruction requires a post-indexed address"));
9914
9915 inst.operands[1].preind = 0;
9916 inst.operands[1].postind = 1;
9917 inst.operands[1].writeback = 1;
9918 }
9919 inst.instruction |= inst.operands[0].reg << 12;
9920 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9921 }
9922
9923 /* Co-processor register load/store.
9924 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9925 static void
9926 do_lstc (void)
9927 {
9928 inst.instruction |= inst.operands[0].reg << 8;
9929 inst.instruction |= inst.operands[1].reg << 12;
9930 encode_arm_cp_address (2, TRUE, TRUE, 0);
9931 }
9932
9933 static void
9934 do_mlas (void)
9935 {
9936 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9937 if (inst.operands[0].reg == inst.operands[1].reg
9938 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9939 && !(inst.instruction & 0x00400000))
9940 as_tsktsk (_("Rd and Rm should be different in mla"));
9941
9942 inst.instruction |= inst.operands[0].reg << 16;
9943 inst.instruction |= inst.operands[1].reg;
9944 inst.instruction |= inst.operands[2].reg << 8;
9945 inst.instruction |= inst.operands[3].reg << 12;
9946 }
9947
9948 static void
9949 do_mov (void)
9950 {
9951 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9952 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9953 THUMB1_RELOC_ONLY);
9954 inst.instruction |= inst.operands[0].reg << 12;
9955 encode_arm_shifter_operand (1);
9956 }
9957
9958 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9959 static void
9960 do_mov16 (void)
9961 {
9962 bfd_vma imm;
9963 bfd_boolean top;
9964
9965 top = (inst.instruction & 0x00400000) != 0;
9966 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9967 _(":lower16: not allowed in this instruction"));
9968 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9969 _(":upper16: not allowed in this instruction"));
9970 inst.instruction |= inst.operands[0].reg << 12;
9971 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9972 {
9973 imm = inst.relocs[0].exp.X_add_number;
9974 /* The value is in two pieces: 0:11, 16:19. */
9975 inst.instruction |= (imm & 0x00000fff);
9976 inst.instruction |= (imm & 0x0000f000) << 4;
9977 }
9978 }
9979
9980 static int
9981 do_vfp_nsyn_mrs (void)
9982 {
9983 if (inst.operands[0].isvec)
9984 {
9985 if (inst.operands[1].reg != 1)
9986 first_error (_("operand 1 must be FPSCR"));
9987 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9988 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9989 do_vfp_nsyn_opcode ("fmstat");
9990 }
9991 else if (inst.operands[1].isvec)
9992 do_vfp_nsyn_opcode ("fmrx");
9993 else
9994 return FAIL;
9995
9996 return SUCCESS;
9997 }
9998
9999 static int
10000 do_vfp_nsyn_msr (void)
10001 {
10002 if (inst.operands[0].isvec)
10003 do_vfp_nsyn_opcode ("fmxr");
10004 else
10005 return FAIL;
10006
10007 return SUCCESS;
10008 }
10009
10010 static void
10011 do_vmrs (void)
10012 {
10013 unsigned Rt = inst.operands[0].reg;
10014
10015 if (thumb_mode && Rt == REG_SP)
10016 {
10017 inst.error = BAD_SP;
10018 return;
10019 }
10020
10021 switch (inst.operands[1].reg)
10022 {
10023 /* MVFR2 is only valid for Armv8-A. */
10024 case 5:
10025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10026 _(BAD_FPU));
10027 break;
10028
10029 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10030 case 1: /* fpscr. */
10031 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10032 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10033 _(BAD_FPU));
10034 break;
10035
10036 case 14: /* fpcxt_ns. */
10037 case 15: /* fpcxt_s. */
10038 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10039 _("selected processor does not support instruction"));
10040 break;
10041
10042 case 2: /* fpscr_nzcvqc. */
10043 case 12: /* vpr. */
10044 case 13: /* p0. */
10045 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10046 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10047 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10048 _("selected processor does not support instruction"));
10049 if (inst.operands[0].reg != 2
10050 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10051 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10052 break;
10053
10054 default:
10055 break;
10056 }
10057
10058 /* APSR_ sets isvec. All other refs to PC are illegal. */
10059 if (!inst.operands[0].isvec && Rt == REG_PC)
10060 {
10061 inst.error = BAD_PC;
10062 return;
10063 }
10064
10065 /* If we get through parsing the register name, we just insert the number
10066 generated into the instruction without further validation. */
10067 inst.instruction |= (inst.operands[1].reg << 16);
10068 inst.instruction |= (Rt << 12);
10069 }
10070
10071 static void
10072 do_vmsr (void)
10073 {
10074 unsigned Rt = inst.operands[1].reg;
10075
10076 if (thumb_mode)
10077 reject_bad_reg (Rt);
10078 else if (Rt == REG_PC)
10079 {
10080 inst.error = BAD_PC;
10081 return;
10082 }
10083
10084 switch (inst.operands[0].reg)
10085 {
10086 /* MVFR2 is only valid for Armv8-A. */
10087 case 5:
10088 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10089 _(BAD_FPU));
10090 break;
10091
10092 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10093 case 1: /* fpcr. */
10094 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10095 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10096 _(BAD_FPU));
10097 break;
10098
10099 case 14: /* fpcxt_ns. */
10100 case 15: /* fpcxt_s. */
10101 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10102 _("selected processor does not support instruction"));
10103 break;
10104
10105 case 2: /* fpscr_nzcvqc. */
10106 case 12: /* vpr. */
10107 case 13: /* p0. */
10108 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10109 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10110 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10111 _("selected processor does not support instruction"));
10112 if (inst.operands[0].reg != 2
10113 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10114 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10115 break;
10116
10117 default:
10118 break;
10119 }
10120
10121 /* If we get through parsing the register name, we just insert the number
10122 generated into the instruction without further validation. */
10123 inst.instruction |= (inst.operands[0].reg << 16);
10124 inst.instruction |= (Rt << 12);
10125 }
10126
10127 static void
10128 do_mrs (void)
10129 {
10130 unsigned br;
10131
10132 if (do_vfp_nsyn_mrs () == SUCCESS)
10133 return;
10134
10135 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10136 inst.instruction |= inst.operands[0].reg << 12;
10137
10138 if (inst.operands[1].isreg)
10139 {
10140 br = inst.operands[1].reg;
10141 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10142 as_bad (_("bad register for mrs"));
10143 }
10144 else
10145 {
10146 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10147 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10148 != (PSR_c|PSR_f),
10149 _("'APSR', 'CPSR' or 'SPSR' expected"));
10150 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10151 }
10152
10153 inst.instruction |= br;
10154 }
10155
10156 /* Two possible forms:
10157 "{C|S}PSR_<field>, Rm",
10158 "{C|S}PSR_f, #expression". */
10159
10160 static void
10161 do_msr (void)
10162 {
10163 if (do_vfp_nsyn_msr () == SUCCESS)
10164 return;
10165
10166 inst.instruction |= inst.operands[0].imm;
10167 if (inst.operands[1].isreg)
10168 inst.instruction |= inst.operands[1].reg;
10169 else
10170 {
10171 inst.instruction |= INST_IMMEDIATE;
10172 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10173 inst.relocs[0].pc_rel = 0;
10174 }
10175 }
10176
10177 static void
10178 do_mul (void)
10179 {
10180 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10181
10182 if (!inst.operands[2].present)
10183 inst.operands[2].reg = inst.operands[0].reg;
10184 inst.instruction |= inst.operands[0].reg << 16;
10185 inst.instruction |= inst.operands[1].reg;
10186 inst.instruction |= inst.operands[2].reg << 8;
10187
10188 if (inst.operands[0].reg == inst.operands[1].reg
10189 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10190 as_tsktsk (_("Rd and Rm should be different in mul"));
10191 }
10192
10193 /* Long Multiply Parser
10194 UMULL RdLo, RdHi, Rm, Rs
10195 SMULL RdLo, RdHi, Rm, Rs
10196 UMLAL RdLo, RdHi, Rm, Rs
10197 SMLAL RdLo, RdHi, Rm, Rs. */
10198
10199 static void
10200 do_mull (void)
10201 {
10202 inst.instruction |= inst.operands[0].reg << 12;
10203 inst.instruction |= inst.operands[1].reg << 16;
10204 inst.instruction |= inst.operands[2].reg;
10205 inst.instruction |= inst.operands[3].reg << 8;
10206
10207 /* rdhi and rdlo must be different. */
10208 if (inst.operands[0].reg == inst.operands[1].reg)
10209 as_tsktsk (_("rdhi and rdlo must be different"));
10210
10211 /* rdhi, rdlo and rm must all be different before armv6. */
10212 if ((inst.operands[0].reg == inst.operands[2].reg
10213 || inst.operands[1].reg == inst.operands[2].reg)
10214 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10215 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10216 }
10217
10218 static void
10219 do_nop (void)
10220 {
10221 if (inst.operands[0].present
10222 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10223 {
10224 /* Architectural NOP hints are CPSR sets with no bits selected. */
10225 inst.instruction &= 0xf0000000;
10226 inst.instruction |= 0x0320f000;
10227 if (inst.operands[0].present)
10228 inst.instruction |= inst.operands[0].imm;
10229 }
10230 }
10231
10232 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10233 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10234 Condition defaults to COND_ALWAYS.
10235 Error if Rd, Rn or Rm are R15. */
10236
10237 static void
10238 do_pkhbt (void)
10239 {
10240 inst.instruction |= inst.operands[0].reg << 12;
10241 inst.instruction |= inst.operands[1].reg << 16;
10242 inst.instruction |= inst.operands[2].reg;
10243 if (inst.operands[3].present)
10244 encode_arm_shift (3);
10245 }
10246
10247 /* ARM V6 PKHTB (Argument Parse). */
10248
10249 static void
10250 do_pkhtb (void)
10251 {
10252 if (!inst.operands[3].present)
10253 {
10254 /* If the shift specifier is omitted, turn the instruction
10255 into pkhbt rd, rm, rn. */
10256 inst.instruction &= 0xfff00010;
10257 inst.instruction |= inst.operands[0].reg << 12;
10258 inst.instruction |= inst.operands[1].reg;
10259 inst.instruction |= inst.operands[2].reg << 16;
10260 }
10261 else
10262 {
10263 inst.instruction |= inst.operands[0].reg << 12;
10264 inst.instruction |= inst.operands[1].reg << 16;
10265 inst.instruction |= inst.operands[2].reg;
10266 encode_arm_shift (3);
10267 }
10268 }
10269
10270 /* ARMv5TE: Preload-Cache
10271 MP Extensions: Preload for write
10272
10273 PLD(W) <addr_mode>
10274
10275 Syntactically, like LDR with B=1, W=0, L=1. */
10276
10277 static void
10278 do_pld (void)
10279 {
10280 constraint (!inst.operands[0].isreg,
10281 _("'[' expected after PLD mnemonic"));
10282 constraint (inst.operands[0].postind,
10283 _("post-indexed expression used in preload instruction"));
10284 constraint (inst.operands[0].writeback,
10285 _("writeback used in preload instruction"));
10286 constraint (!inst.operands[0].preind,
10287 _("unindexed addressing used in preload instruction"));
10288 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10289 }
10290
10291 /* ARMv7: PLI <addr_mode> */
10292 static void
10293 do_pli (void)
10294 {
10295 constraint (!inst.operands[0].isreg,
10296 _("'[' expected after PLI mnemonic"));
10297 constraint (inst.operands[0].postind,
10298 _("post-indexed expression used in preload instruction"));
10299 constraint (inst.operands[0].writeback,
10300 _("writeback used in preload instruction"));
10301 constraint (!inst.operands[0].preind,
10302 _("unindexed addressing used in preload instruction"));
10303 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10304 inst.instruction &= ~PRE_INDEX;
10305 }
10306
10307 static void
10308 do_push_pop (void)
10309 {
10310 constraint (inst.operands[0].writeback,
10311 _("push/pop do not support {reglist}^"));
10312 inst.operands[1] = inst.operands[0];
10313 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10314 inst.operands[0].isreg = 1;
10315 inst.operands[0].writeback = 1;
10316 inst.operands[0].reg = REG_SP;
10317 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
10318 }
10319
10320 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10321 word at the specified address and the following word
10322 respectively.
10323 Unconditionally executed.
10324 Error if Rn is R15. */
10325
10326 static void
10327 do_rfe (void)
10328 {
10329 inst.instruction |= inst.operands[0].reg << 16;
10330 if (inst.operands[0].writeback)
10331 inst.instruction |= WRITE_BACK;
10332 }
10333
10334 /* ARM V6 ssat (argument parse). */
10335
10336 static void
10337 do_ssat (void)
10338 {
10339 inst.instruction |= inst.operands[0].reg << 12;
10340 inst.instruction |= (inst.operands[1].imm - 1) << 16;
10341 inst.instruction |= inst.operands[2].reg;
10342
10343 if (inst.operands[3].present)
10344 encode_arm_shift (3);
10345 }
10346
10347 /* ARM V6 usat (argument parse). */
10348
10349 static void
10350 do_usat (void)
10351 {
10352 inst.instruction |= inst.operands[0].reg << 12;
10353 inst.instruction |= inst.operands[1].imm << 16;
10354 inst.instruction |= inst.operands[2].reg;
10355
10356 if (inst.operands[3].present)
10357 encode_arm_shift (3);
10358 }
10359
10360 /* ARM V6 ssat16 (argument parse). */
10361
10362 static void
10363 do_ssat16 (void)
10364 {
10365 inst.instruction |= inst.operands[0].reg << 12;
10366 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10367 inst.instruction |= inst.operands[2].reg;
10368 }
10369
10370 static void
10371 do_usat16 (void)
10372 {
10373 inst.instruction |= inst.operands[0].reg << 12;
10374 inst.instruction |= inst.operands[1].imm << 16;
10375 inst.instruction |= inst.operands[2].reg;
10376 }
10377
10378 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
10379 preserving the other bits.
10380
10381 setend <endian_specifier>, where <endian_specifier> is either
10382 BE or LE. */
10383
10384 static void
10385 do_setend (void)
10386 {
10387 if (warn_on_deprecated
10388 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10389 as_tsktsk (_("setend use is deprecated for ARMv8"));
10390
10391 if (inst.operands[0].imm)
10392 inst.instruction |= 0x200;
10393 }
10394
10395 static void
10396 do_shift (void)
10397 {
10398 unsigned int Rm = (inst.operands[1].present
10399 ? inst.operands[1].reg
10400 : inst.operands[0].reg);
10401
10402 inst.instruction |= inst.operands[0].reg << 12;
10403 inst.instruction |= Rm;
10404 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
10405 {
10406 inst.instruction |= inst.operands[2].reg << 8;
10407 inst.instruction |= SHIFT_BY_REG;
10408 /* PR 12854: Error on extraneous shifts. */
10409 constraint (inst.operands[2].shifted,
10410 _("extraneous shift as part of operand to shift insn"));
10411 }
10412 else
10413 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10414 }
10415
10416 static void
10417 do_smc (void)
10418 {
10419 unsigned int value = inst.relocs[0].exp.X_add_number;
10420 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10421
10422 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10423 inst.relocs[0].pc_rel = 0;
10424 }
10425
10426 static void
10427 do_hvc (void)
10428 {
10429 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10430 inst.relocs[0].pc_rel = 0;
10431 }
10432
10433 static void
10434 do_swi (void)
10435 {
10436 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10437 inst.relocs[0].pc_rel = 0;
10438 }
10439
10440 static void
10441 do_setpan (void)
10442 {
10443 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10444 _("selected processor does not support SETPAN instruction"));
10445
10446 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10447 }
10448
10449 static void
10450 do_t_setpan (void)
10451 {
10452 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10453 _("selected processor does not support SETPAN instruction"));
10454
10455 inst.instruction |= (inst.operands[0].imm << 3);
10456 }
10457
10458 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10459 SMLAxy{cond} Rd,Rm,Rs,Rn
10460 SMLAWy{cond} Rd,Rm,Rs,Rn
10461 Error if any register is R15. */
10462
10463 static void
10464 do_smla (void)
10465 {
10466 inst.instruction |= inst.operands[0].reg << 16;
10467 inst.instruction |= inst.operands[1].reg;
10468 inst.instruction |= inst.operands[2].reg << 8;
10469 inst.instruction |= inst.operands[3].reg << 12;
10470 }
10471
10472 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10473 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10474 Error if any register is R15.
10475 Warning if Rdlo == Rdhi. */
10476
10477 static void
10478 do_smlal (void)
10479 {
10480 inst.instruction |= inst.operands[0].reg << 12;
10481 inst.instruction |= inst.operands[1].reg << 16;
10482 inst.instruction |= inst.operands[2].reg;
10483 inst.instruction |= inst.operands[3].reg << 8;
10484
10485 if (inst.operands[0].reg == inst.operands[1].reg)
10486 as_tsktsk (_("rdhi and rdlo must be different"));
10487 }
10488
10489 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10490 SMULxy{cond} Rd,Rm,Rs
10491 Error if any register is R15. */
10492
10493 static void
10494 do_smul (void)
10495 {
10496 inst.instruction |= inst.operands[0].reg << 16;
10497 inst.instruction |= inst.operands[1].reg;
10498 inst.instruction |= inst.operands[2].reg << 8;
10499 }
10500
10501 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10502 the same for both ARM and Thumb-2. */
10503
10504 static void
10505 do_srs (void)
10506 {
10507 int reg;
10508
10509 if (inst.operands[0].present)
10510 {
10511 reg = inst.operands[0].reg;
10512 constraint (reg != REG_SP, _("SRS base register must be r13"));
10513 }
10514 else
10515 reg = REG_SP;
10516
10517 inst.instruction |= reg << 16;
10518 inst.instruction |= inst.operands[1].imm;
10519 if (inst.operands[0].writeback || inst.operands[1].writeback)
10520 inst.instruction |= WRITE_BACK;
10521 }
10522
10523 /* ARM V6 strex (argument parse). */
10524
10525 static void
10526 do_strex (void)
10527 {
10528 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10529 || inst.operands[2].postind || inst.operands[2].writeback
10530 || inst.operands[2].immisreg || inst.operands[2].shifted
10531 || inst.operands[2].negative
10532 /* See comment in do_ldrex(). */
10533 || (inst.operands[2].reg == REG_PC),
10534 BAD_ADDR_MODE);
10535
10536 constraint (inst.operands[0].reg == inst.operands[1].reg
10537 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10538
10539 constraint (inst.relocs[0].exp.X_op != O_constant
10540 || inst.relocs[0].exp.X_add_number != 0,
10541 _("offset must be zero in ARM encoding"));
10542
10543 inst.instruction |= inst.operands[0].reg << 12;
10544 inst.instruction |= inst.operands[1].reg;
10545 inst.instruction |= inst.operands[2].reg << 16;
10546 inst.relocs[0].type = BFD_RELOC_UNUSED;
10547 }
10548
10549 static void
10550 do_t_strexbh (void)
10551 {
10552 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10553 || inst.operands[2].postind || inst.operands[2].writeback
10554 || inst.operands[2].immisreg || inst.operands[2].shifted
10555 || inst.operands[2].negative,
10556 BAD_ADDR_MODE);
10557
10558 constraint (inst.operands[0].reg == inst.operands[1].reg
10559 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10560
10561 do_rm_rd_rn ();
10562 }
10563
10564 static void
10565 do_strexd (void)
10566 {
10567 constraint (inst.operands[1].reg % 2 != 0,
10568 _("even register required"));
10569 constraint (inst.operands[2].present
10570 && inst.operands[2].reg != inst.operands[1].reg + 1,
10571 _("can only store two consecutive registers"));
10572 /* If op 2 were present and equal to PC, this function wouldn't
10573 have been called in the first place. */
10574 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10575
10576 constraint (inst.operands[0].reg == inst.operands[1].reg
10577 || inst.operands[0].reg == inst.operands[1].reg + 1
10578 || inst.operands[0].reg == inst.operands[3].reg,
10579 BAD_OVERLAP);
10580
10581 inst.instruction |= inst.operands[0].reg << 12;
10582 inst.instruction |= inst.operands[1].reg;
10583 inst.instruction |= inst.operands[3].reg << 16;
10584 }
10585
10586 /* ARM V8 STRL. */
10587 static void
10588 do_stlex (void)
10589 {
10590 constraint (inst.operands[0].reg == inst.operands[1].reg
10591 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10592
10593 do_rd_rm_rn ();
10594 }
10595
10596 static void
10597 do_t_stlex (void)
10598 {
10599 constraint (inst.operands[0].reg == inst.operands[1].reg
10600 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10601
10602 do_rm_rd_rn ();
10603 }
10604
10605 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10606 extends it to 32-bits, and adds the result to a value in another
10607 register. You can specify a rotation by 0, 8, 16, or 24 bits
10608 before extracting the 16-bit value.
10609 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10610 Condition defaults to COND_ALWAYS.
10611 Error if any register uses R15. */
10612
10613 static void
10614 do_sxtah (void)
10615 {
10616 inst.instruction |= inst.operands[0].reg << 12;
10617 inst.instruction |= inst.operands[1].reg << 16;
10618 inst.instruction |= inst.operands[2].reg;
10619 inst.instruction |= inst.operands[3].imm << 10;
10620 }
10621
10622 /* ARM V6 SXTH.
10623
10624 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10625 Condition defaults to COND_ALWAYS.
10626 Error if any register uses R15. */
10627
10628 static void
10629 do_sxth (void)
10630 {
10631 inst.instruction |= inst.operands[0].reg << 12;
10632 inst.instruction |= inst.operands[1].reg;
10633 inst.instruction |= inst.operands[2].imm << 10;
10634 }
10635 \f
10636 /* VFP instructions. In a logical order: SP variant first, monad
10637 before dyad, arithmetic then move then load/store. */
10638
10639 static void
10640 do_vfp_sp_monadic (void)
10641 {
10642 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10643 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10644 _(BAD_FPU));
10645
10646 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10647 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10648 }
10649
10650 static void
10651 do_vfp_sp_dyadic (void)
10652 {
10653 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10654 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10655 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10656 }
10657
10658 static void
10659 do_vfp_sp_compare_z (void)
10660 {
10661 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10662 }
10663
10664 static void
10665 do_vfp_dp_sp_cvt (void)
10666 {
10667 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10668 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10669 }
10670
10671 static void
10672 do_vfp_sp_dp_cvt (void)
10673 {
10674 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10675 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10676 }
10677
10678 static void
10679 do_vfp_reg_from_sp (void)
10680 {
10681 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10682 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10683 _(BAD_FPU));
10684
10685 inst.instruction |= inst.operands[0].reg << 12;
10686 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10687 }
10688
10689 static void
10690 do_vfp_reg2_from_sp2 (void)
10691 {
10692 constraint (inst.operands[2].imm != 2,
10693 _("only two consecutive VFP SP registers allowed here"));
10694 inst.instruction |= inst.operands[0].reg << 12;
10695 inst.instruction |= inst.operands[1].reg << 16;
10696 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10697 }
10698
10699 static void
10700 do_vfp_sp_from_reg (void)
10701 {
10702 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10703 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10704 _(BAD_FPU));
10705
10706 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10707 inst.instruction |= inst.operands[1].reg << 12;
10708 }
10709
10710 static void
10711 do_vfp_sp2_from_reg2 (void)
10712 {
10713 constraint (inst.operands[0].imm != 2,
10714 _("only two consecutive VFP SP registers allowed here"));
10715 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10716 inst.instruction |= inst.operands[1].reg << 12;
10717 inst.instruction |= inst.operands[2].reg << 16;
10718 }
10719
10720 static void
10721 do_vfp_sp_ldst (void)
10722 {
10723 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10724 encode_arm_cp_address (1, FALSE, TRUE, 0);
10725 }
10726
10727 static void
10728 do_vfp_dp_ldst (void)
10729 {
10730 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10731 encode_arm_cp_address (1, FALSE, TRUE, 0);
10732 }
10733
10734
10735 static void
10736 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10737 {
10738 if (inst.operands[0].writeback)
10739 inst.instruction |= WRITE_BACK;
10740 else
10741 constraint (ldstm_type != VFP_LDSTMIA,
10742 _("this addressing mode requires base-register writeback"));
10743 inst.instruction |= inst.operands[0].reg << 16;
10744 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10745 inst.instruction |= inst.operands[1].imm;
10746 }
10747
10748 static void
10749 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10750 {
10751 int count;
10752
10753 if (inst.operands[0].writeback)
10754 inst.instruction |= WRITE_BACK;
10755 else
10756 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10757 _("this addressing mode requires base-register writeback"));
10758
10759 inst.instruction |= inst.operands[0].reg << 16;
10760 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10761
10762 count = inst.operands[1].imm << 1;
10763 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10764 count += 1;
10765
10766 inst.instruction |= count;
10767 }
10768
10769 static void
10770 do_vfp_sp_ldstmia (void)
10771 {
10772 vfp_sp_ldstm (VFP_LDSTMIA);
10773 }
10774
10775 static void
10776 do_vfp_sp_ldstmdb (void)
10777 {
10778 vfp_sp_ldstm (VFP_LDSTMDB);
10779 }
10780
10781 static void
10782 do_vfp_dp_ldstmia (void)
10783 {
10784 vfp_dp_ldstm (VFP_LDSTMIA);
10785 }
10786
10787 static void
10788 do_vfp_dp_ldstmdb (void)
10789 {
10790 vfp_dp_ldstm (VFP_LDSTMDB);
10791 }
10792
10793 static void
10794 do_vfp_xp_ldstmia (void)
10795 {
10796 vfp_dp_ldstm (VFP_LDSTMIAX);
10797 }
10798
10799 static void
10800 do_vfp_xp_ldstmdb (void)
10801 {
10802 vfp_dp_ldstm (VFP_LDSTMDBX);
10803 }
10804
10805 static void
10806 do_vfp_dp_rd_rm (void)
10807 {
10808 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10809 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10810 _(BAD_FPU));
10811
10812 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10813 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10814 }
10815
10816 static void
10817 do_vfp_dp_rn_rd (void)
10818 {
10819 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10820 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10821 }
10822
10823 static void
10824 do_vfp_dp_rd_rn (void)
10825 {
10826 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10827 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10828 }
10829
10830 static void
10831 do_vfp_dp_rd_rn_rm (void)
10832 {
10833 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10834 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10835 _(BAD_FPU));
10836
10837 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10838 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10839 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10840 }
10841
10842 static void
10843 do_vfp_dp_rd (void)
10844 {
10845 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10846 }
10847
10848 static void
10849 do_vfp_dp_rm_rd_rn (void)
10850 {
10851 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10852 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10853 _(BAD_FPU));
10854
10855 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10856 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10857 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10858 }
10859
10860 /* VFPv3 instructions. */
10861 static void
10862 do_vfp_sp_const (void)
10863 {
10864 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10865 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10866 inst.instruction |= (inst.operands[1].imm & 0x0f);
10867 }
10868
10869 static void
10870 do_vfp_dp_const (void)
10871 {
10872 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10873 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10874 inst.instruction |= (inst.operands[1].imm & 0x0f);
10875 }
10876
10877 static void
10878 vfp_conv (int srcsize)
10879 {
10880 int immbits = srcsize - inst.operands[1].imm;
10881
10882 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10883 {
10884 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10885 i.e. immbits must be in range 0 - 16. */
10886 inst.error = _("immediate value out of range, expected range [0, 16]");
10887 return;
10888 }
10889 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10890 {
10891 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10892 i.e. immbits must be in range 0 - 31. */
10893 inst.error = _("immediate value out of range, expected range [1, 32]");
10894 return;
10895 }
10896
10897 inst.instruction |= (immbits & 1) << 5;
10898 inst.instruction |= (immbits >> 1);
10899 }
10900
10901 static void
10902 do_vfp_sp_conv_16 (void)
10903 {
10904 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10905 vfp_conv (16);
10906 }
10907
10908 static void
10909 do_vfp_dp_conv_16 (void)
10910 {
10911 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10912 vfp_conv (16);
10913 }
10914
10915 static void
10916 do_vfp_sp_conv_32 (void)
10917 {
10918 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10919 vfp_conv (32);
10920 }
10921
10922 static void
10923 do_vfp_dp_conv_32 (void)
10924 {
10925 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10926 vfp_conv (32);
10927 }
10928 \f
10929 /* FPA instructions. Also in a logical order. */
10930
10931 static void
10932 do_fpa_cmp (void)
10933 {
10934 inst.instruction |= inst.operands[0].reg << 16;
10935 inst.instruction |= inst.operands[1].reg;
10936 }
10937
10938 static void
10939 do_fpa_ldmstm (void)
10940 {
10941 inst.instruction |= inst.operands[0].reg << 12;
10942 switch (inst.operands[1].imm)
10943 {
10944 case 1: inst.instruction |= CP_T_X; break;
10945 case 2: inst.instruction |= CP_T_Y; break;
10946 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10947 case 4: break;
10948 default: abort ();
10949 }
10950
10951 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10952 {
10953 /* The instruction specified "ea" or "fd", so we can only accept
10954 [Rn]{!}. The instruction does not really support stacking or
10955 unstacking, so we have to emulate these by setting appropriate
10956 bits and offsets. */
10957 constraint (inst.relocs[0].exp.X_op != O_constant
10958 || inst.relocs[0].exp.X_add_number != 0,
10959 _("this instruction does not support indexing"));
10960
10961 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10962 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10963
10964 if (!(inst.instruction & INDEX_UP))
10965 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10966
10967 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10968 {
10969 inst.operands[2].preind = 0;
10970 inst.operands[2].postind = 1;
10971 }
10972 }
10973
10974 encode_arm_cp_address (2, TRUE, TRUE, 0);
10975 }
10976 \f
10977 /* iWMMXt instructions: strictly in alphabetical order. */
10978
10979 static void
10980 do_iwmmxt_tandorc (void)
10981 {
10982 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10983 }
10984
10985 static void
10986 do_iwmmxt_textrc (void)
10987 {
10988 inst.instruction |= inst.operands[0].reg << 12;
10989 inst.instruction |= inst.operands[1].imm;
10990 }
10991
10992 static void
10993 do_iwmmxt_textrm (void)
10994 {
10995 inst.instruction |= inst.operands[0].reg << 12;
10996 inst.instruction |= inst.operands[1].reg << 16;
10997 inst.instruction |= inst.operands[2].imm;
10998 }
10999
11000 static void
11001 do_iwmmxt_tinsr (void)
11002 {
11003 inst.instruction |= inst.operands[0].reg << 16;
11004 inst.instruction |= inst.operands[1].reg << 12;
11005 inst.instruction |= inst.operands[2].imm;
11006 }
11007
11008 static void
11009 do_iwmmxt_tmia (void)
11010 {
11011 inst.instruction |= inst.operands[0].reg << 5;
11012 inst.instruction |= inst.operands[1].reg;
11013 inst.instruction |= inst.operands[2].reg << 12;
11014 }
11015
11016 static void
11017 do_iwmmxt_waligni (void)
11018 {
11019 inst.instruction |= inst.operands[0].reg << 12;
11020 inst.instruction |= inst.operands[1].reg << 16;
11021 inst.instruction |= inst.operands[2].reg;
11022 inst.instruction |= inst.operands[3].imm << 20;
11023 }
11024
11025 static void
11026 do_iwmmxt_wmerge (void)
11027 {
11028 inst.instruction |= inst.operands[0].reg << 12;
11029 inst.instruction |= inst.operands[1].reg << 16;
11030 inst.instruction |= inst.operands[2].reg;
11031 inst.instruction |= inst.operands[3].imm << 21;
11032 }
11033
11034 static void
11035 do_iwmmxt_wmov (void)
11036 {
11037 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
11038 inst.instruction |= inst.operands[0].reg << 12;
11039 inst.instruction |= inst.operands[1].reg << 16;
11040 inst.instruction |= inst.operands[1].reg;
11041 }
11042
11043 static void
11044 do_iwmmxt_wldstbh (void)
11045 {
11046 int reloc;
11047 inst.instruction |= inst.operands[0].reg << 12;
11048 if (thumb_mode)
11049 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11050 else
11051 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11052 encode_arm_cp_address (1, TRUE, FALSE, reloc);
11053 }
11054
11055 static void
11056 do_iwmmxt_wldstw (void)
11057 {
11058 /* RIWR_RIWC clears .isreg for a control register. */
11059 if (!inst.operands[0].isreg)
11060 {
11061 constraint (inst.cond != COND_ALWAYS, BAD_COND);
11062 inst.instruction |= 0xf0000000;
11063 }
11064
11065 inst.instruction |= inst.operands[0].reg << 12;
11066 encode_arm_cp_address (1, TRUE, TRUE, 0);
11067 }
11068
11069 static void
11070 do_iwmmxt_wldstd (void)
11071 {
11072 inst.instruction |= inst.operands[0].reg << 12;
11073 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11074 && inst.operands[1].immisreg)
11075 {
11076 inst.instruction &= ~0x1a000ff;
11077 inst.instruction |= (0xfU << 28);
11078 if (inst.operands[1].preind)
11079 inst.instruction |= PRE_INDEX;
11080 if (!inst.operands[1].negative)
11081 inst.instruction |= INDEX_UP;
11082 if (inst.operands[1].writeback)
11083 inst.instruction |= WRITE_BACK;
11084 inst.instruction |= inst.operands[1].reg << 16;
11085 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11086 inst.instruction |= inst.operands[1].imm;
11087 }
11088 else
11089 encode_arm_cp_address (1, TRUE, FALSE, 0);
11090 }
11091
11092 static void
11093 do_iwmmxt_wshufh (void)
11094 {
11095 inst.instruction |= inst.operands[0].reg << 12;
11096 inst.instruction |= inst.operands[1].reg << 16;
11097 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11098 inst.instruction |= (inst.operands[2].imm & 0x0f);
11099 }
11100
11101 static void
11102 do_iwmmxt_wzero (void)
11103 {
11104 /* WZERO reg is an alias for WANDN reg, reg, reg. */
11105 inst.instruction |= inst.operands[0].reg;
11106 inst.instruction |= inst.operands[0].reg << 12;
11107 inst.instruction |= inst.operands[0].reg << 16;
11108 }
11109
11110 static void
11111 do_iwmmxt_wrwrwr_or_imm5 (void)
11112 {
11113 if (inst.operands[2].isreg)
11114 do_rd_rn_rm ();
11115 else {
11116 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11117 _("immediate operand requires iWMMXt2"));
11118 do_rd_rn ();
11119 if (inst.operands[2].imm == 0)
11120 {
11121 switch ((inst.instruction >> 20) & 0xf)
11122 {
11123 case 4:
11124 case 5:
11125 case 6:
11126 case 7:
11127 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
11128 inst.operands[2].imm = 16;
11129 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11130 break;
11131 case 8:
11132 case 9:
11133 case 10:
11134 case 11:
11135 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
11136 inst.operands[2].imm = 32;
11137 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11138 break;
11139 case 12:
11140 case 13:
11141 case 14:
11142 case 15:
11143 {
11144 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
11145 unsigned long wrn;
11146 wrn = (inst.instruction >> 16) & 0xf;
11147 inst.instruction &= 0xff0fff0f;
11148 inst.instruction |= wrn;
11149 /* Bail out here; the instruction is now assembled. */
11150 return;
11151 }
11152 }
11153 }
11154 /* Map 32 -> 0, etc. */
11155 inst.operands[2].imm &= 0x1f;
11156 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11157 }
11158 }
11159 \f
11160 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
11161 operations first, then control, shift, and load/store. */
11162
11163 /* Insns like "foo X,Y,Z". */
11164
11165 static void
11166 do_mav_triple (void)
11167 {
11168 inst.instruction |= inst.operands[0].reg << 16;
11169 inst.instruction |= inst.operands[1].reg;
11170 inst.instruction |= inst.operands[2].reg << 12;
11171 }
11172
11173 /* Insns like "foo W,X,Y,Z".
11174 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
11175
11176 static void
11177 do_mav_quad (void)
11178 {
11179 inst.instruction |= inst.operands[0].reg << 5;
11180 inst.instruction |= inst.operands[1].reg << 12;
11181 inst.instruction |= inst.operands[2].reg << 16;
11182 inst.instruction |= inst.operands[3].reg;
11183 }
11184
11185 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
11186 static void
11187 do_mav_dspsc (void)
11188 {
11189 inst.instruction |= inst.operands[1].reg << 12;
11190 }
11191
11192 /* Maverick shift immediate instructions.
11193 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11194 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
11195
11196 static void
11197 do_mav_shift (void)
11198 {
11199 int imm = inst.operands[2].imm;
11200
11201 inst.instruction |= inst.operands[0].reg << 12;
11202 inst.instruction |= inst.operands[1].reg << 16;
11203
11204 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11205 Bits 5-7 of the insn should have bits 4-6 of the immediate.
11206 Bit 4 should be 0. */
11207 imm = (imm & 0xf) | ((imm & 0x70) << 1);
11208
11209 inst.instruction |= imm;
11210 }
11211 \f
11212 /* XScale instructions. Also sorted arithmetic before move. */
11213
11214 /* Xscale multiply-accumulate (argument parse)
11215 MIAcc acc0,Rm,Rs
11216 MIAPHcc acc0,Rm,Rs
11217 MIAxycc acc0,Rm,Rs. */
11218
11219 static void
11220 do_xsc_mia (void)
11221 {
11222 inst.instruction |= inst.operands[1].reg;
11223 inst.instruction |= inst.operands[2].reg << 12;
11224 }
11225
11226 /* Xscale move-accumulator-register (argument parse)
11227
11228 MARcc acc0,RdLo,RdHi. */
11229
11230 static void
11231 do_xsc_mar (void)
11232 {
11233 inst.instruction |= inst.operands[1].reg << 12;
11234 inst.instruction |= inst.operands[2].reg << 16;
11235 }
11236
11237 /* Xscale move-register-accumulator (argument parse)
11238
11239 MRAcc RdLo,RdHi,acc0. */
11240
11241 static void
11242 do_xsc_mra (void)
11243 {
11244 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11245 inst.instruction |= inst.operands[0].reg << 12;
11246 inst.instruction |= inst.operands[1].reg << 16;
11247 }
11248 \f
11249 /* Encoding functions relevant only to Thumb. */
11250
11251 /* inst.operands[i] is a shifted-register operand; encode
11252 it into inst.instruction in the format used by Thumb32. */
11253
11254 static void
11255 encode_thumb32_shifted_operand (int i)
11256 {
11257 unsigned int value = inst.relocs[0].exp.X_add_number;
11258 unsigned int shift = inst.operands[i].shift_kind;
11259
11260 constraint (inst.operands[i].immisreg,
11261 _("shift by register not allowed in thumb mode"));
11262 inst.instruction |= inst.operands[i].reg;
11263 if (shift == SHIFT_RRX)
11264 inst.instruction |= SHIFT_ROR << 4;
11265 else
11266 {
11267 constraint (inst.relocs[0].exp.X_op != O_constant,
11268 _("expression too complex"));
11269
11270 constraint (value > 32
11271 || (value == 32 && (shift == SHIFT_LSL
11272 || shift == SHIFT_ROR)),
11273 _("shift expression is too large"));
11274
11275 if (value == 0)
11276 shift = SHIFT_LSL;
11277 else if (value == 32)
11278 value = 0;
11279
11280 inst.instruction |= shift << 4;
11281 inst.instruction |= (value & 0x1c) << 10;
11282 inst.instruction |= (value & 0x03) << 6;
11283 }
11284 }
11285
11286
11287 /* inst.operands[i] was set up by parse_address. Encode it into a
11288 Thumb32 format load or store instruction. Reject forms that cannot
11289 be used with such instructions. If is_t is true, reject forms that
11290 cannot be used with a T instruction; if is_d is true, reject forms
11291 that cannot be used with a D instruction. If it is a store insn,
11292 reject PC in Rn. */
11293
11294 static void
11295 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
11296 {
11297 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
11298
11299 constraint (!inst.operands[i].isreg,
11300 _("Instruction does not support =N addresses"));
11301
11302 inst.instruction |= inst.operands[i].reg << 16;
11303 if (inst.operands[i].immisreg)
11304 {
11305 constraint (is_pc, BAD_PC_ADDRESSING);
11306 constraint (is_t || is_d, _("cannot use register index with this instruction"));
11307 constraint (inst.operands[i].negative,
11308 _("Thumb does not support negative register indexing"));
11309 constraint (inst.operands[i].postind,
11310 _("Thumb does not support register post-indexing"));
11311 constraint (inst.operands[i].writeback,
11312 _("Thumb does not support register indexing with writeback"));
11313 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11314 _("Thumb supports only LSL in shifted register indexing"));
11315
11316 inst.instruction |= inst.operands[i].imm;
11317 if (inst.operands[i].shifted)
11318 {
11319 constraint (inst.relocs[0].exp.X_op != O_constant,
11320 _("expression too complex"));
11321 constraint (inst.relocs[0].exp.X_add_number < 0
11322 || inst.relocs[0].exp.X_add_number > 3,
11323 _("shift out of range"));
11324 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11325 }
11326 inst.relocs[0].type = BFD_RELOC_UNUSED;
11327 }
11328 else if (inst.operands[i].preind)
11329 {
11330 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11331 constraint (is_t && inst.operands[i].writeback,
11332 _("cannot use writeback with this instruction"));
11333 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11334 BAD_PC_ADDRESSING);
11335
11336 if (is_d)
11337 {
11338 inst.instruction |= 0x01000000;
11339 if (inst.operands[i].writeback)
11340 inst.instruction |= 0x00200000;
11341 }
11342 else
11343 {
11344 inst.instruction |= 0x00000c00;
11345 if (inst.operands[i].writeback)
11346 inst.instruction |= 0x00000100;
11347 }
11348 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11349 }
11350 else if (inst.operands[i].postind)
11351 {
11352 gas_assert (inst.operands[i].writeback);
11353 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11354 constraint (is_t, _("cannot use post-indexing with this instruction"));
11355
11356 if (is_d)
11357 inst.instruction |= 0x00200000;
11358 else
11359 inst.instruction |= 0x00000900;
11360 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11361 }
11362 else /* unindexed - only for coprocessor */
11363 inst.error = _("instruction does not accept unindexed addressing");
11364 }
11365
11366 /* Table of Thumb instructions which exist in 16- and/or 32-bit
11367 encodings (the latter only in post-V6T2 cores). The index is the
11368 value used in the insns table below. When there is more than one
11369 possible 16-bit encoding for the instruction, this table always
11370 holds variant (1).
11371 Also contains several pseudo-instructions used during relaxation. */
11372 #define T16_32_TAB \
11373 X(_adc, 4140, eb400000), \
11374 X(_adcs, 4140, eb500000), \
11375 X(_add, 1c00, eb000000), \
11376 X(_adds, 1c00, eb100000), \
11377 X(_addi, 0000, f1000000), \
11378 X(_addis, 0000, f1100000), \
11379 X(_add_pc,000f, f20f0000), \
11380 X(_add_sp,000d, f10d0000), \
11381 X(_adr, 000f, f20f0000), \
11382 X(_and, 4000, ea000000), \
11383 X(_ands, 4000, ea100000), \
11384 X(_asr, 1000, fa40f000), \
11385 X(_asrs, 1000, fa50f000), \
11386 X(_b, e000, f000b000), \
11387 X(_bcond, d000, f0008000), \
11388 X(_bf, 0000, f040e001), \
11389 X(_bfcsel,0000, f000e001), \
11390 X(_bfx, 0000, f060e001), \
11391 X(_bfl, 0000, f000c001), \
11392 X(_bflx, 0000, f070e001), \
11393 X(_bic, 4380, ea200000), \
11394 X(_bics, 4380, ea300000), \
11395 X(_cinc, 0000, ea509000), \
11396 X(_cinv, 0000, ea50a000), \
11397 X(_cmn, 42c0, eb100f00), \
11398 X(_cmp, 2800, ebb00f00), \
11399 X(_cneg, 0000, ea50b000), \
11400 X(_cpsie, b660, f3af8400), \
11401 X(_cpsid, b670, f3af8600), \
11402 X(_cpy, 4600, ea4f0000), \
11403 X(_csel, 0000, ea508000), \
11404 X(_cset, 0000, ea5f900f), \
11405 X(_csetm, 0000, ea5fa00f), \
11406 X(_csinc, 0000, ea509000), \
11407 X(_csinv, 0000, ea50a000), \
11408 X(_csneg, 0000, ea50b000), \
11409 X(_dec_sp,80dd, f1ad0d00), \
11410 X(_dls, 0000, f040e001), \
11411 X(_dlstp, 0000, f000e001), \
11412 X(_eor, 4040, ea800000), \
11413 X(_eors, 4040, ea900000), \
11414 X(_inc_sp,00dd, f10d0d00), \
11415 X(_lctp, 0000, f00fe001), \
11416 X(_ldmia, c800, e8900000), \
11417 X(_ldr, 6800, f8500000), \
11418 X(_ldrb, 7800, f8100000), \
11419 X(_ldrh, 8800, f8300000), \
11420 X(_ldrsb, 5600, f9100000), \
11421 X(_ldrsh, 5e00, f9300000), \
11422 X(_ldr_pc,4800, f85f0000), \
11423 X(_ldr_pc2,4800, f85f0000), \
11424 X(_ldr_sp,9800, f85d0000), \
11425 X(_le, 0000, f00fc001), \
11426 X(_letp, 0000, f01fc001), \
11427 X(_lsl, 0000, fa00f000), \
11428 X(_lsls, 0000, fa10f000), \
11429 X(_lsr, 0800, fa20f000), \
11430 X(_lsrs, 0800, fa30f000), \
11431 X(_mov, 2000, ea4f0000), \
11432 X(_movs, 2000, ea5f0000), \
11433 X(_mul, 4340, fb00f000), \
11434 X(_muls, 4340, ffffffff), /* no 32b muls */ \
11435 X(_mvn, 43c0, ea6f0000), \
11436 X(_mvns, 43c0, ea7f0000), \
11437 X(_neg, 4240, f1c00000), /* rsb #0 */ \
11438 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
11439 X(_orr, 4300, ea400000), \
11440 X(_orrs, 4300, ea500000), \
11441 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
11442 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
11443 X(_rev, ba00, fa90f080), \
11444 X(_rev16, ba40, fa90f090), \
11445 X(_revsh, bac0, fa90f0b0), \
11446 X(_ror, 41c0, fa60f000), \
11447 X(_rors, 41c0, fa70f000), \
11448 X(_sbc, 4180, eb600000), \
11449 X(_sbcs, 4180, eb700000), \
11450 X(_stmia, c000, e8800000), \
11451 X(_str, 6000, f8400000), \
11452 X(_strb, 7000, f8000000), \
11453 X(_strh, 8000, f8200000), \
11454 X(_str_sp,9000, f84d0000), \
11455 X(_sub, 1e00, eba00000), \
11456 X(_subs, 1e00, ebb00000), \
11457 X(_subi, 8000, f1a00000), \
11458 X(_subis, 8000, f1b00000), \
11459 X(_sxtb, b240, fa4ff080), \
11460 X(_sxth, b200, fa0ff080), \
11461 X(_tst, 4200, ea100f00), \
11462 X(_uxtb, b2c0, fa5ff080), \
11463 X(_uxth, b280, fa1ff080), \
11464 X(_nop, bf00, f3af8000), \
11465 X(_yield, bf10, f3af8001), \
11466 X(_wfe, bf20, f3af8002), \
11467 X(_wfi, bf30, f3af8003), \
11468 X(_wls, 0000, f040c001), \
11469 X(_wlstp, 0000, f000c001), \
11470 X(_sev, bf40, f3af8004), \
11471 X(_sevl, bf50, f3af8005), \
11472 X(_udf, de00, f7f0a000)
11473
11474 /* To catch errors in encoding functions, the codes are all offset by
11475 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11476 as 16-bit instructions. */
11477 #define X(a,b,c) T_MNEM##a
11478 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11479 #undef X
11480
11481 #define X(a,b,c) 0x##b
11482 static const unsigned short thumb_op16[] = { T16_32_TAB };
11483 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11484 #undef X
11485
11486 #define X(a,b,c) 0x##c
11487 static const unsigned int thumb_op32[] = { T16_32_TAB };
11488 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11489 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
11490 #undef X
11491 #undef T16_32_TAB
11492
11493 /* Thumb instruction encoders, in alphabetical order. */
11494
11495 /* ADDW or SUBW. */
11496
11497 static void
11498 do_t_add_sub_w (void)
11499 {
11500 int Rd, Rn;
11501
11502 Rd = inst.operands[0].reg;
11503 Rn = inst.operands[1].reg;
11504
11505 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11506 is the SP-{plus,minus}-immediate form of the instruction. */
11507 if (Rn == REG_SP)
11508 constraint (Rd == REG_PC, BAD_PC);
11509 else
11510 reject_bad_reg (Rd);
11511
11512 inst.instruction |= (Rn << 16) | (Rd << 8);
11513 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11514 }
11515
11516 /* Parse an add or subtract instruction. We get here with inst.instruction
11517 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11518
11519 static void
11520 do_t_add_sub (void)
11521 {
11522 int Rd, Rs, Rn;
11523
11524 Rd = inst.operands[0].reg;
11525 Rs = (inst.operands[1].present
11526 ? inst.operands[1].reg /* Rd, Rs, foo */
11527 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11528
11529 if (Rd == REG_PC)
11530 set_pred_insn_type_last ();
11531
11532 if (unified_syntax)
11533 {
11534 bfd_boolean flags;
11535 bfd_boolean narrow;
11536 int opcode;
11537
11538 flags = (inst.instruction == T_MNEM_adds
11539 || inst.instruction == T_MNEM_subs);
11540 if (flags)
11541 narrow = !in_pred_block ();
11542 else
11543 narrow = in_pred_block ();
11544 if (!inst.operands[2].isreg)
11545 {
11546 int add;
11547
11548 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11549 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11550
11551 add = (inst.instruction == T_MNEM_add
11552 || inst.instruction == T_MNEM_adds);
11553 opcode = 0;
11554 if (inst.size_req != 4)
11555 {
11556 /* Attempt to use a narrow opcode, with relaxation if
11557 appropriate. */
11558 if (Rd == REG_SP && Rs == REG_SP && !flags)
11559 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11560 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11561 opcode = T_MNEM_add_sp;
11562 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11563 opcode = T_MNEM_add_pc;
11564 else if (Rd <= 7 && Rs <= 7 && narrow)
11565 {
11566 if (flags)
11567 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11568 else
11569 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11570 }
11571 if (opcode)
11572 {
11573 inst.instruction = THUMB_OP16(opcode);
11574 inst.instruction |= (Rd << 4) | Rs;
11575 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11576 || (inst.relocs[0].type
11577 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11578 {
11579 if (inst.size_req == 2)
11580 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11581 else
11582 inst.relax = opcode;
11583 }
11584 }
11585 else
11586 constraint (inst.size_req == 2, BAD_HIREG);
11587 }
11588 if (inst.size_req == 4
11589 || (inst.size_req != 2 && !opcode))
11590 {
11591 constraint ((inst.relocs[0].type
11592 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11593 && (inst.relocs[0].type
11594 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11595 THUMB1_RELOC_ONLY);
11596 if (Rd == REG_PC)
11597 {
11598 constraint (add, BAD_PC);
11599 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11600 _("only SUBS PC, LR, #const allowed"));
11601 constraint (inst.relocs[0].exp.X_op != O_constant,
11602 _("expression too complex"));
11603 constraint (inst.relocs[0].exp.X_add_number < 0
11604 || inst.relocs[0].exp.X_add_number > 0xff,
11605 _("immediate value out of range"));
11606 inst.instruction = T2_SUBS_PC_LR
11607 | inst.relocs[0].exp.X_add_number;
11608 inst.relocs[0].type = BFD_RELOC_UNUSED;
11609 return;
11610 }
11611 else if (Rs == REG_PC)
11612 {
11613 /* Always use addw/subw. */
11614 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11615 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11616 }
11617 else
11618 {
11619 inst.instruction = THUMB_OP32 (inst.instruction);
11620 inst.instruction = (inst.instruction & 0xe1ffffff)
11621 | 0x10000000;
11622 if (flags)
11623 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11624 else
11625 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11626 }
11627 inst.instruction |= Rd << 8;
11628 inst.instruction |= Rs << 16;
11629 }
11630 }
11631 else
11632 {
11633 unsigned int value = inst.relocs[0].exp.X_add_number;
11634 unsigned int shift = inst.operands[2].shift_kind;
11635
11636 Rn = inst.operands[2].reg;
11637 /* See if we can do this with a 16-bit instruction. */
11638 if (!inst.operands[2].shifted && inst.size_req != 4)
11639 {
11640 if (Rd > 7 || Rs > 7 || Rn > 7)
11641 narrow = FALSE;
11642
11643 if (narrow)
11644 {
11645 inst.instruction = ((inst.instruction == T_MNEM_adds
11646 || inst.instruction == T_MNEM_add)
11647 ? T_OPCODE_ADD_R3
11648 : T_OPCODE_SUB_R3);
11649 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11650 return;
11651 }
11652
11653 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11654 {
11655 /* Thumb-1 cores (except v6-M) require at least one high
11656 register in a narrow non flag setting add. */
11657 if (Rd > 7 || Rn > 7
11658 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11659 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11660 {
11661 if (Rd == Rn)
11662 {
11663 Rn = Rs;
11664 Rs = Rd;
11665 }
11666 inst.instruction = T_OPCODE_ADD_HI;
11667 inst.instruction |= (Rd & 8) << 4;
11668 inst.instruction |= (Rd & 7);
11669 inst.instruction |= Rn << 3;
11670 return;
11671 }
11672 }
11673 }
11674
11675 constraint (Rd == REG_PC, BAD_PC);
11676 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11677 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11678 constraint (Rs == REG_PC, BAD_PC);
11679 reject_bad_reg (Rn);
11680
11681 /* If we get here, it can't be done in 16 bits. */
11682 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11683 _("shift must be constant"));
11684 inst.instruction = THUMB_OP32 (inst.instruction);
11685 inst.instruction |= Rd << 8;
11686 inst.instruction |= Rs << 16;
11687 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11688 _("shift value over 3 not allowed in thumb mode"));
11689 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11690 _("only LSL shift allowed in thumb mode"));
11691 encode_thumb32_shifted_operand (2);
11692 }
11693 }
11694 else
11695 {
11696 constraint (inst.instruction == T_MNEM_adds
11697 || inst.instruction == T_MNEM_subs,
11698 BAD_THUMB32);
11699
11700 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11701 {
11702 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11703 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11704 BAD_HIREG);
11705
11706 inst.instruction = (inst.instruction == T_MNEM_add
11707 ? 0x0000 : 0x8000);
11708 inst.instruction |= (Rd << 4) | Rs;
11709 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11710 return;
11711 }
11712
11713 Rn = inst.operands[2].reg;
11714 constraint (inst.operands[2].shifted, _("unshifted register required"));
11715
11716 /* We now have Rd, Rs, and Rn set to registers. */
11717 if (Rd > 7 || Rs > 7 || Rn > 7)
11718 {
11719 /* Can't do this for SUB. */
11720 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11721 inst.instruction = T_OPCODE_ADD_HI;
11722 inst.instruction |= (Rd & 8) << 4;
11723 inst.instruction |= (Rd & 7);
11724 if (Rs == Rd)
11725 inst.instruction |= Rn << 3;
11726 else if (Rn == Rd)
11727 inst.instruction |= Rs << 3;
11728 else
11729 constraint (1, _("dest must overlap one source register"));
11730 }
11731 else
11732 {
11733 inst.instruction = (inst.instruction == T_MNEM_add
11734 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11735 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11736 }
11737 }
11738 }
11739
11740 static void
11741 do_t_adr (void)
11742 {
11743 unsigned Rd;
11744
11745 Rd = inst.operands[0].reg;
11746 reject_bad_reg (Rd);
11747
11748 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11749 {
11750 /* Defer to section relaxation. */
11751 inst.relax = inst.instruction;
11752 inst.instruction = THUMB_OP16 (inst.instruction);
11753 inst.instruction |= Rd << 4;
11754 }
11755 else if (unified_syntax && inst.size_req != 2)
11756 {
11757 /* Generate a 32-bit opcode. */
11758 inst.instruction = THUMB_OP32 (inst.instruction);
11759 inst.instruction |= Rd << 8;
11760 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11761 inst.relocs[0].pc_rel = 1;
11762 }
11763 else
11764 {
11765 /* Generate a 16-bit opcode. */
11766 inst.instruction = THUMB_OP16 (inst.instruction);
11767 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11768 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11769 inst.relocs[0].pc_rel = 1;
11770 inst.instruction |= Rd << 4;
11771 }
11772
11773 if (inst.relocs[0].exp.X_op == O_symbol
11774 && inst.relocs[0].exp.X_add_symbol != NULL
11775 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11776 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11777 inst.relocs[0].exp.X_add_number += 1;
11778 }
11779
11780 /* Arithmetic instructions for which there is just one 16-bit
11781 instruction encoding, and it allows only two low registers.
11782 For maximal compatibility with ARM syntax, we allow three register
11783 operands even when Thumb-32 instructions are not available, as long
11784 as the first two are identical. For instance, both "sbc r0,r1" and
11785 "sbc r0,r0,r1" are allowed. */
11786 static void
11787 do_t_arit3 (void)
11788 {
11789 int Rd, Rs, Rn;
11790
11791 Rd = inst.operands[0].reg;
11792 Rs = (inst.operands[1].present
11793 ? inst.operands[1].reg /* Rd, Rs, foo */
11794 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11795 Rn = inst.operands[2].reg;
11796
11797 reject_bad_reg (Rd);
11798 reject_bad_reg (Rs);
11799 if (inst.operands[2].isreg)
11800 reject_bad_reg (Rn);
11801
11802 if (unified_syntax)
11803 {
11804 if (!inst.operands[2].isreg)
11805 {
11806 /* For an immediate, we always generate a 32-bit opcode;
11807 section relaxation will shrink it later if possible. */
11808 inst.instruction = THUMB_OP32 (inst.instruction);
11809 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11810 inst.instruction |= Rd << 8;
11811 inst.instruction |= Rs << 16;
11812 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11813 }
11814 else
11815 {
11816 bfd_boolean narrow;
11817
11818 /* See if we can do this with a 16-bit instruction. */
11819 if (THUMB_SETS_FLAGS (inst.instruction))
11820 narrow = !in_pred_block ();
11821 else
11822 narrow = in_pred_block ();
11823
11824 if (Rd > 7 || Rn > 7 || Rs > 7)
11825 narrow = FALSE;
11826 if (inst.operands[2].shifted)
11827 narrow = FALSE;
11828 if (inst.size_req == 4)
11829 narrow = FALSE;
11830
11831 if (narrow
11832 && Rd == Rs)
11833 {
11834 inst.instruction = THUMB_OP16 (inst.instruction);
11835 inst.instruction |= Rd;
11836 inst.instruction |= Rn << 3;
11837 return;
11838 }
11839
11840 /* If we get here, it can't be done in 16 bits. */
11841 constraint (inst.operands[2].shifted
11842 && inst.operands[2].immisreg,
11843 _("shift must be constant"));
11844 inst.instruction = THUMB_OP32 (inst.instruction);
11845 inst.instruction |= Rd << 8;
11846 inst.instruction |= Rs << 16;
11847 encode_thumb32_shifted_operand (2);
11848 }
11849 }
11850 else
11851 {
11852 /* On its face this is a lie - the instruction does set the
11853 flags. However, the only supported mnemonic in this mode
11854 says it doesn't. */
11855 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11856
11857 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11858 _("unshifted register required"));
11859 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11860 constraint (Rd != Rs,
11861 _("dest and source1 must be the same register"));
11862
11863 inst.instruction = THUMB_OP16 (inst.instruction);
11864 inst.instruction |= Rd;
11865 inst.instruction |= Rn << 3;
11866 }
11867 }
11868
11869 /* Similarly, but for instructions where the arithmetic operation is
11870 commutative, so we can allow either of them to be different from
11871 the destination operand in a 16-bit instruction. For instance, all
11872 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11873 accepted. */
11874 static void
11875 do_t_arit3c (void)
11876 {
11877 int Rd, Rs, Rn;
11878
11879 Rd = inst.operands[0].reg;
11880 Rs = (inst.operands[1].present
11881 ? inst.operands[1].reg /* Rd, Rs, foo */
11882 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11883 Rn = inst.operands[2].reg;
11884
11885 reject_bad_reg (Rd);
11886 reject_bad_reg (Rs);
11887 if (inst.operands[2].isreg)
11888 reject_bad_reg (Rn);
11889
11890 if (unified_syntax)
11891 {
11892 if (!inst.operands[2].isreg)
11893 {
11894 /* For an immediate, we always generate a 32-bit opcode;
11895 section relaxation will shrink it later if possible. */
11896 inst.instruction = THUMB_OP32 (inst.instruction);
11897 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11898 inst.instruction |= Rd << 8;
11899 inst.instruction |= Rs << 16;
11900 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11901 }
11902 else
11903 {
11904 bfd_boolean narrow;
11905
11906 /* See if we can do this with a 16-bit instruction. */
11907 if (THUMB_SETS_FLAGS (inst.instruction))
11908 narrow = !in_pred_block ();
11909 else
11910 narrow = in_pred_block ();
11911
11912 if (Rd > 7 || Rn > 7 || Rs > 7)
11913 narrow = FALSE;
11914 if (inst.operands[2].shifted)
11915 narrow = FALSE;
11916 if (inst.size_req == 4)
11917 narrow = FALSE;
11918
11919 if (narrow)
11920 {
11921 if (Rd == Rs)
11922 {
11923 inst.instruction = THUMB_OP16 (inst.instruction);
11924 inst.instruction |= Rd;
11925 inst.instruction |= Rn << 3;
11926 return;
11927 }
11928 if (Rd == Rn)
11929 {
11930 inst.instruction = THUMB_OP16 (inst.instruction);
11931 inst.instruction |= Rd;
11932 inst.instruction |= Rs << 3;
11933 return;
11934 }
11935 }
11936
11937 /* If we get here, it can't be done in 16 bits. */
11938 constraint (inst.operands[2].shifted
11939 && inst.operands[2].immisreg,
11940 _("shift must be constant"));
11941 inst.instruction = THUMB_OP32 (inst.instruction);
11942 inst.instruction |= Rd << 8;
11943 inst.instruction |= Rs << 16;
11944 encode_thumb32_shifted_operand (2);
11945 }
11946 }
11947 else
11948 {
11949 /* On its face this is a lie - the instruction does set the
11950 flags. However, the only supported mnemonic in this mode
11951 says it doesn't. */
11952 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11953
11954 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11955 _("unshifted register required"));
11956 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11957
11958 inst.instruction = THUMB_OP16 (inst.instruction);
11959 inst.instruction |= Rd;
11960
11961 if (Rd == Rs)
11962 inst.instruction |= Rn << 3;
11963 else if (Rd == Rn)
11964 inst.instruction |= Rs << 3;
11965 else
11966 constraint (1, _("dest must overlap one source register"));
11967 }
11968 }
11969
11970 static void
11971 do_t_bfc (void)
11972 {
11973 unsigned Rd;
11974 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11975 constraint (msb > 32, _("bit-field extends past end of register"));
11976 /* The instruction encoding stores the LSB and MSB,
11977 not the LSB and width. */
11978 Rd = inst.operands[0].reg;
11979 reject_bad_reg (Rd);
11980 inst.instruction |= Rd << 8;
11981 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11982 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11983 inst.instruction |= msb - 1;
11984 }
11985
11986 static void
11987 do_t_bfi (void)
11988 {
11989 int Rd, Rn;
11990 unsigned int msb;
11991
11992 Rd = inst.operands[0].reg;
11993 reject_bad_reg (Rd);
11994
11995 /* #0 in second position is alternative syntax for bfc, which is
11996 the same instruction but with REG_PC in the Rm field. */
11997 if (!inst.operands[1].isreg)
11998 Rn = REG_PC;
11999 else
12000 {
12001 Rn = inst.operands[1].reg;
12002 reject_bad_reg (Rn);
12003 }
12004
12005 msb = inst.operands[2].imm + inst.operands[3].imm;
12006 constraint (msb > 32, _("bit-field extends past end of register"));
12007 /* The instruction encoding stores the LSB and MSB,
12008 not the LSB and width. */
12009 inst.instruction |= Rd << 8;
12010 inst.instruction |= Rn << 16;
12011 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12012 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12013 inst.instruction |= msb - 1;
12014 }
12015
12016 static void
12017 do_t_bfx (void)
12018 {
12019 unsigned Rd, Rn;
12020
12021 Rd = inst.operands[0].reg;
12022 Rn = inst.operands[1].reg;
12023
12024 reject_bad_reg (Rd);
12025 reject_bad_reg (Rn);
12026
12027 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12028 _("bit-field extends past end of register"));
12029 inst.instruction |= Rd << 8;
12030 inst.instruction |= Rn << 16;
12031 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12032 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12033 inst.instruction |= inst.operands[3].imm - 1;
12034 }
12035
12036 /* ARM V5 Thumb BLX (argument parse)
12037 BLX <target_addr> which is BLX(1)
12038 BLX <Rm> which is BLX(2)
12039 Unfortunately, there are two different opcodes for this mnemonic.
12040 So, the insns[].value is not used, and the code here zaps values
12041 into inst.instruction.
12042
12043 ??? How to take advantage of the additional two bits of displacement
12044 available in Thumb32 mode? Need new relocation? */
12045
12046 static void
12047 do_t_blx (void)
12048 {
12049 set_pred_insn_type_last ();
12050
12051 if (inst.operands[0].isreg)
12052 {
12053 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12054 /* We have a register, so this is BLX(2). */
12055 inst.instruction |= inst.operands[0].reg << 3;
12056 }
12057 else
12058 {
12059 /* No register. This must be BLX(1). */
12060 inst.instruction = 0xf000e800;
12061 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12062 }
12063 }
12064
12065 static void
12066 do_t_branch (void)
12067 {
12068 int opcode;
12069 int cond;
12070 bfd_reloc_code_real_type reloc;
12071
12072 cond = inst.cond;
12073 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12074
12075 if (in_pred_block ())
12076 {
12077 /* Conditional branches inside IT blocks are encoded as unconditional
12078 branches. */
12079 cond = COND_ALWAYS;
12080 }
12081 else
12082 cond = inst.cond;
12083
12084 if (cond != COND_ALWAYS)
12085 opcode = T_MNEM_bcond;
12086 else
12087 opcode = inst.instruction;
12088
12089 if (unified_syntax
12090 && (inst.size_req == 4
12091 || (inst.size_req != 2
12092 && (inst.operands[0].hasreloc
12093 || inst.relocs[0].exp.X_op == O_constant))))
12094 {
12095 inst.instruction = THUMB_OP32(opcode);
12096 if (cond == COND_ALWAYS)
12097 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12098 else
12099 {
12100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12101 _("selected architecture does not support "
12102 "wide conditional branch instruction"));
12103
12104 gas_assert (cond != 0xF);
12105 inst.instruction |= cond << 22;
12106 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12107 }
12108 }
12109 else
12110 {
12111 inst.instruction = THUMB_OP16(opcode);
12112 if (cond == COND_ALWAYS)
12113 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12114 else
12115 {
12116 inst.instruction |= cond << 8;
12117 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12118 }
12119 /* Allow section relaxation. */
12120 if (unified_syntax && inst.size_req != 2)
12121 inst.relax = opcode;
12122 }
12123 inst.relocs[0].type = reloc;
12124 inst.relocs[0].pc_rel = 1;
12125 }
12126
12127 /* Actually do the work for Thumb state bkpt and hlt. The only difference
12128 between the two is the maximum immediate allowed - which is passed in
12129 RANGE. */
12130 static void
12131 do_t_bkpt_hlt1 (int range)
12132 {
12133 constraint (inst.cond != COND_ALWAYS,
12134 _("instruction is always unconditional"));
12135 if (inst.operands[0].present)
12136 {
12137 constraint (inst.operands[0].imm > range,
12138 _("immediate value out of range"));
12139 inst.instruction |= inst.operands[0].imm;
12140 }
12141
12142 set_pred_insn_type (NEUTRAL_IT_INSN);
12143 }
12144
12145 static void
12146 do_t_hlt (void)
12147 {
12148 do_t_bkpt_hlt1 (63);
12149 }
12150
12151 static void
12152 do_t_bkpt (void)
12153 {
12154 do_t_bkpt_hlt1 (255);
12155 }
12156
12157 static void
12158 do_t_branch23 (void)
12159 {
12160 set_pred_insn_type_last ();
12161 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12162
12163 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12164 this file. We used to simply ignore the PLT reloc type here --
12165 the branch encoding is now needed to deal with TLSCALL relocs.
12166 So if we see a PLT reloc now, put it back to how it used to be to
12167 keep the preexisting behaviour. */
12168 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12169 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12170
12171 #if defined(OBJ_COFF)
12172 /* If the destination of the branch is a defined symbol which does not have
12173 the THUMB_FUNC attribute, then we must be calling a function which has
12174 the (interfacearm) attribute. We look for the Thumb entry point to that
12175 function and change the branch to refer to that function instead. */
12176 if ( inst.relocs[0].exp.X_op == O_symbol
12177 && inst.relocs[0].exp.X_add_symbol != NULL
12178 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12179 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12180 inst.relocs[0].exp.X_add_symbol
12181 = find_real_start (inst.relocs[0].exp.X_add_symbol);
12182 #endif
12183 }
12184
12185 static void
12186 do_t_bx (void)
12187 {
12188 set_pred_insn_type_last ();
12189 inst.instruction |= inst.operands[0].reg << 3;
12190 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
12191 should cause the alignment to be checked once it is known. This is
12192 because BX PC only works if the instruction is word aligned. */
12193 }
12194
12195 static void
12196 do_t_bxj (void)
12197 {
12198 int Rm;
12199
12200 set_pred_insn_type_last ();
12201 Rm = inst.operands[0].reg;
12202 reject_bad_reg (Rm);
12203 inst.instruction |= Rm << 16;
12204 }
12205
12206 static void
12207 do_t_clz (void)
12208 {
12209 unsigned Rd;
12210 unsigned Rm;
12211
12212 Rd = inst.operands[0].reg;
12213 Rm = inst.operands[1].reg;
12214
12215 reject_bad_reg (Rd);
12216 reject_bad_reg (Rm);
12217
12218 inst.instruction |= Rd << 8;
12219 inst.instruction |= Rm << 16;
12220 inst.instruction |= Rm;
12221 }
12222
12223 /* For the Armv8.1-M conditional instructions. */
12224 static void
12225 do_t_cond (void)
12226 {
12227 unsigned Rd, Rn, Rm;
12228 signed int cond;
12229
12230 constraint (inst.cond != COND_ALWAYS, BAD_COND);
12231
12232 Rd = inst.operands[0].reg;
12233 switch (inst.instruction)
12234 {
12235 case T_MNEM_csinc:
12236 case T_MNEM_csinv:
12237 case T_MNEM_csneg:
12238 case T_MNEM_csel:
12239 Rn = inst.operands[1].reg;
12240 Rm = inst.operands[2].reg;
12241 cond = inst.operands[3].imm;
12242 constraint (Rn == REG_SP, BAD_SP);
12243 constraint (Rm == REG_SP, BAD_SP);
12244 break;
12245
12246 case T_MNEM_cinc:
12247 case T_MNEM_cinv:
12248 case T_MNEM_cneg:
12249 Rn = inst.operands[1].reg;
12250 cond = inst.operands[2].imm;
12251 /* Invert the last bit to invert the cond. */
12252 cond = TOGGLE_BIT (cond, 0);
12253 constraint (Rn == REG_SP, BAD_SP);
12254 Rm = Rn;
12255 break;
12256
12257 case T_MNEM_csetm:
12258 case T_MNEM_cset:
12259 cond = inst.operands[1].imm;
12260 /* Invert the last bit to invert the cond. */
12261 cond = TOGGLE_BIT (cond, 0);
12262 Rn = REG_PC;
12263 Rm = REG_PC;
12264 break;
12265
12266 default: abort ();
12267 }
12268
12269 set_pred_insn_type (OUTSIDE_PRED_INSN);
12270 inst.instruction = THUMB_OP32 (inst.instruction);
12271 inst.instruction |= Rd << 8;
12272 inst.instruction |= Rn << 16;
12273 inst.instruction |= Rm;
12274 inst.instruction |= cond << 4;
12275 }
12276
12277 static void
12278 do_t_csdb (void)
12279 {
12280 set_pred_insn_type (OUTSIDE_PRED_INSN);
12281 }
12282
12283 static void
12284 do_t_cps (void)
12285 {
12286 set_pred_insn_type (OUTSIDE_PRED_INSN);
12287 inst.instruction |= inst.operands[0].imm;
12288 }
12289
12290 static void
12291 do_t_cpsi (void)
12292 {
12293 set_pred_insn_type (OUTSIDE_PRED_INSN);
12294 if (unified_syntax
12295 && (inst.operands[1].present || inst.size_req == 4)
12296 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12297 {
12298 unsigned int imod = (inst.instruction & 0x0030) >> 4;
12299 inst.instruction = 0xf3af8000;
12300 inst.instruction |= imod << 9;
12301 inst.instruction |= inst.operands[0].imm << 5;
12302 if (inst.operands[1].present)
12303 inst.instruction |= 0x100 | inst.operands[1].imm;
12304 }
12305 else
12306 {
12307 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12308 && (inst.operands[0].imm & 4),
12309 _("selected processor does not support 'A' form "
12310 "of this instruction"));
12311 constraint (inst.operands[1].present || inst.size_req == 4,
12312 _("Thumb does not support the 2-argument "
12313 "form of this instruction"));
12314 inst.instruction |= inst.operands[0].imm;
12315 }
12316 }
12317
12318 /* THUMB CPY instruction (argument parse). */
12319
12320 static void
12321 do_t_cpy (void)
12322 {
12323 if (inst.size_req == 4)
12324 {
12325 inst.instruction = THUMB_OP32 (T_MNEM_mov);
12326 inst.instruction |= inst.operands[0].reg << 8;
12327 inst.instruction |= inst.operands[1].reg;
12328 }
12329 else
12330 {
12331 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12332 inst.instruction |= (inst.operands[0].reg & 0x7);
12333 inst.instruction |= inst.operands[1].reg << 3;
12334 }
12335 }
12336
12337 static void
12338 do_t_cbz (void)
12339 {
12340 set_pred_insn_type (OUTSIDE_PRED_INSN);
12341 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12342 inst.instruction |= inst.operands[0].reg;
12343 inst.relocs[0].pc_rel = 1;
12344 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12345 }
12346
12347 static void
12348 do_t_dbg (void)
12349 {
12350 inst.instruction |= inst.operands[0].imm;
12351 }
12352
12353 static void
12354 do_t_div (void)
12355 {
12356 unsigned Rd, Rn, Rm;
12357
12358 Rd = inst.operands[0].reg;
12359 Rn = (inst.operands[1].present
12360 ? inst.operands[1].reg : Rd);
12361 Rm = inst.operands[2].reg;
12362
12363 reject_bad_reg (Rd);
12364 reject_bad_reg (Rn);
12365 reject_bad_reg (Rm);
12366
12367 inst.instruction |= Rd << 8;
12368 inst.instruction |= Rn << 16;
12369 inst.instruction |= Rm;
12370 }
12371
12372 static void
12373 do_t_hint (void)
12374 {
12375 if (unified_syntax && inst.size_req == 4)
12376 inst.instruction = THUMB_OP32 (inst.instruction);
12377 else
12378 inst.instruction = THUMB_OP16 (inst.instruction);
12379 }
12380
12381 static void
12382 do_t_it (void)
12383 {
12384 unsigned int cond = inst.operands[0].imm;
12385
12386 set_pred_insn_type (IT_INSN);
12387 now_pred.mask = (inst.instruction & 0xf) | 0x10;
12388 now_pred.cc = cond;
12389 now_pred.warn_deprecated = FALSE;
12390 now_pred.type = SCALAR_PRED;
12391
12392 /* If the condition is a negative condition, invert the mask. */
12393 if ((cond & 0x1) == 0x0)
12394 {
12395 unsigned int mask = inst.instruction & 0x000f;
12396
12397 if ((mask & 0x7) == 0)
12398 {
12399 /* No conversion needed. */
12400 now_pred.block_length = 1;
12401 }
12402 else if ((mask & 0x3) == 0)
12403 {
12404 mask ^= 0x8;
12405 now_pred.block_length = 2;
12406 }
12407 else if ((mask & 0x1) == 0)
12408 {
12409 mask ^= 0xC;
12410 now_pred.block_length = 3;
12411 }
12412 else
12413 {
12414 mask ^= 0xE;
12415 now_pred.block_length = 4;
12416 }
12417
12418 inst.instruction &= 0xfff0;
12419 inst.instruction |= mask;
12420 }
12421
12422 inst.instruction |= cond << 4;
12423 }
12424
12425 /* Helper function used for both push/pop and ldm/stm. */
12426 static void
12427 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
12428 bfd_boolean writeback)
12429 {
12430 bfd_boolean load, store;
12431
12432 gas_assert (base != -1 || !do_io);
12433 load = do_io && ((inst.instruction & (1 << 20)) != 0);
12434 store = do_io && !load;
12435
12436 if (mask & (1 << 13))
12437 inst.error = _("SP not allowed in register list");
12438
12439 if (do_io && (mask & (1 << base)) != 0
12440 && writeback)
12441 inst.error = _("having the base register in the register list when "
12442 "using write back is UNPREDICTABLE");
12443
12444 if (load)
12445 {
12446 if (mask & (1 << 15))
12447 {
12448 if (mask & (1 << 14))
12449 inst.error = _("LR and PC should not both be in register list");
12450 else
12451 set_pred_insn_type_last ();
12452 }
12453 }
12454 else if (store)
12455 {
12456 if (mask & (1 << 15))
12457 inst.error = _("PC not allowed in register list");
12458 }
12459
12460 if (do_io && ((mask & (mask - 1)) == 0))
12461 {
12462 /* Single register transfers implemented as str/ldr. */
12463 if (writeback)
12464 {
12465 if (inst.instruction & (1 << 23))
12466 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12467 else
12468 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12469 }
12470 else
12471 {
12472 if (inst.instruction & (1 << 23))
12473 inst.instruction = 0x00800000; /* ia -> [base] */
12474 else
12475 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12476 }
12477
12478 inst.instruction |= 0xf8400000;
12479 if (load)
12480 inst.instruction |= 0x00100000;
12481
12482 mask = ffs (mask) - 1;
12483 mask <<= 12;
12484 }
12485 else if (writeback)
12486 inst.instruction |= WRITE_BACK;
12487
12488 inst.instruction |= mask;
12489 if (do_io)
12490 inst.instruction |= base << 16;
12491 }
12492
12493 static void
12494 do_t_ldmstm (void)
12495 {
12496 /* This really doesn't seem worth it. */
12497 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12498 _("expression too complex"));
12499 constraint (inst.operands[1].writeback,
12500 _("Thumb load/store multiple does not support {reglist}^"));
12501
12502 if (unified_syntax)
12503 {
12504 bfd_boolean narrow;
12505 unsigned mask;
12506
12507 narrow = FALSE;
12508 /* See if we can use a 16-bit instruction. */
12509 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12510 && inst.size_req != 4
12511 && !(inst.operands[1].imm & ~0xff))
12512 {
12513 mask = 1 << inst.operands[0].reg;
12514
12515 if (inst.operands[0].reg <= 7)
12516 {
12517 if (inst.instruction == T_MNEM_stmia
12518 ? inst.operands[0].writeback
12519 : (inst.operands[0].writeback
12520 == !(inst.operands[1].imm & mask)))
12521 {
12522 if (inst.instruction == T_MNEM_stmia
12523 && (inst.operands[1].imm & mask)
12524 && (inst.operands[1].imm & (mask - 1)))
12525 as_warn (_("value stored for r%d is UNKNOWN"),
12526 inst.operands[0].reg);
12527
12528 inst.instruction = THUMB_OP16 (inst.instruction);
12529 inst.instruction |= inst.operands[0].reg << 8;
12530 inst.instruction |= inst.operands[1].imm;
12531 narrow = TRUE;
12532 }
12533 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12534 {
12535 /* This means 1 register in reg list one of 3 situations:
12536 1. Instruction is stmia, but without writeback.
12537 2. lmdia without writeback, but with Rn not in
12538 reglist.
12539 3. ldmia with writeback, but with Rn in reglist.
12540 Case 3 is UNPREDICTABLE behaviour, so we handle
12541 case 1 and 2 which can be converted into a 16-bit
12542 str or ldr. The SP cases are handled below. */
12543 unsigned long opcode;
12544 /* First, record an error for Case 3. */
12545 if (inst.operands[1].imm & mask
12546 && inst.operands[0].writeback)
12547 inst.error =
12548 _("having the base register in the register list when "
12549 "using write back is UNPREDICTABLE");
12550
12551 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12552 : T_MNEM_ldr);
12553 inst.instruction = THUMB_OP16 (opcode);
12554 inst.instruction |= inst.operands[0].reg << 3;
12555 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12556 narrow = TRUE;
12557 }
12558 }
12559 else if (inst.operands[0] .reg == REG_SP)
12560 {
12561 if (inst.operands[0].writeback)
12562 {
12563 inst.instruction =
12564 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12565 ? T_MNEM_push : T_MNEM_pop);
12566 inst.instruction |= inst.operands[1].imm;
12567 narrow = TRUE;
12568 }
12569 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12570 {
12571 inst.instruction =
12572 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12573 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12574 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12575 narrow = TRUE;
12576 }
12577 }
12578 }
12579
12580 if (!narrow)
12581 {
12582 if (inst.instruction < 0xffff)
12583 inst.instruction = THUMB_OP32 (inst.instruction);
12584
12585 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12586 inst.operands[1].imm,
12587 inst.operands[0].writeback);
12588 }
12589 }
12590 else
12591 {
12592 constraint (inst.operands[0].reg > 7
12593 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12594 constraint (inst.instruction != T_MNEM_ldmia
12595 && inst.instruction != T_MNEM_stmia,
12596 _("Thumb-2 instruction only valid in unified syntax"));
12597 if (inst.instruction == T_MNEM_stmia)
12598 {
12599 if (!inst.operands[0].writeback)
12600 as_warn (_("this instruction will write back the base register"));
12601 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12602 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12603 as_warn (_("value stored for r%d is UNKNOWN"),
12604 inst.operands[0].reg);
12605 }
12606 else
12607 {
12608 if (!inst.operands[0].writeback
12609 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12610 as_warn (_("this instruction will write back the base register"));
12611 else if (inst.operands[0].writeback
12612 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12613 as_warn (_("this instruction will not write back the base register"));
12614 }
12615
12616 inst.instruction = THUMB_OP16 (inst.instruction);
12617 inst.instruction |= inst.operands[0].reg << 8;
12618 inst.instruction |= inst.operands[1].imm;
12619 }
12620 }
12621
12622 static void
12623 do_t_ldrex (void)
12624 {
12625 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12626 || inst.operands[1].postind || inst.operands[1].writeback
12627 || inst.operands[1].immisreg || inst.operands[1].shifted
12628 || inst.operands[1].negative,
12629 BAD_ADDR_MODE);
12630
12631 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12632
12633 inst.instruction |= inst.operands[0].reg << 12;
12634 inst.instruction |= inst.operands[1].reg << 16;
12635 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12636 }
12637
12638 static void
12639 do_t_ldrexd (void)
12640 {
12641 if (!inst.operands[1].present)
12642 {
12643 constraint (inst.operands[0].reg == REG_LR,
12644 _("r14 not allowed as first register "
12645 "when second register is omitted"));
12646 inst.operands[1].reg = inst.operands[0].reg + 1;
12647 }
12648 constraint (inst.operands[0].reg == inst.operands[1].reg,
12649 BAD_OVERLAP);
12650
12651 inst.instruction |= inst.operands[0].reg << 12;
12652 inst.instruction |= inst.operands[1].reg << 8;
12653 inst.instruction |= inst.operands[2].reg << 16;
12654 }
12655
12656 static void
12657 do_t_ldst (void)
12658 {
12659 unsigned long opcode;
12660 int Rn;
12661
12662 if (inst.operands[0].isreg
12663 && !inst.operands[0].preind
12664 && inst.operands[0].reg == REG_PC)
12665 set_pred_insn_type_last ();
12666
12667 opcode = inst.instruction;
12668 if (unified_syntax)
12669 {
12670 if (!inst.operands[1].isreg)
12671 {
12672 if (opcode <= 0xffff)
12673 inst.instruction = THUMB_OP32 (opcode);
12674 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12675 return;
12676 }
12677 if (inst.operands[1].isreg
12678 && !inst.operands[1].writeback
12679 && !inst.operands[1].shifted && !inst.operands[1].postind
12680 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12681 && opcode <= 0xffff
12682 && inst.size_req != 4)
12683 {
12684 /* Insn may have a 16-bit form. */
12685 Rn = inst.operands[1].reg;
12686 if (inst.operands[1].immisreg)
12687 {
12688 inst.instruction = THUMB_OP16 (opcode);
12689 /* [Rn, Rik] */
12690 if (Rn <= 7 && inst.operands[1].imm <= 7)
12691 goto op16;
12692 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12693 reject_bad_reg (inst.operands[1].imm);
12694 }
12695 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12696 && opcode != T_MNEM_ldrsb)
12697 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12698 || (Rn == REG_SP && opcode == T_MNEM_str))
12699 {
12700 /* [Rn, #const] */
12701 if (Rn > 7)
12702 {
12703 if (Rn == REG_PC)
12704 {
12705 if (inst.relocs[0].pc_rel)
12706 opcode = T_MNEM_ldr_pc2;
12707 else
12708 opcode = T_MNEM_ldr_pc;
12709 }
12710 else
12711 {
12712 if (opcode == T_MNEM_ldr)
12713 opcode = T_MNEM_ldr_sp;
12714 else
12715 opcode = T_MNEM_str_sp;
12716 }
12717 inst.instruction = inst.operands[0].reg << 8;
12718 }
12719 else
12720 {
12721 inst.instruction = inst.operands[0].reg;
12722 inst.instruction |= inst.operands[1].reg << 3;
12723 }
12724 inst.instruction |= THUMB_OP16 (opcode);
12725 if (inst.size_req == 2)
12726 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12727 else
12728 inst.relax = opcode;
12729 return;
12730 }
12731 }
12732 /* Definitely a 32-bit variant. */
12733
12734 /* Warning for Erratum 752419. */
12735 if (opcode == T_MNEM_ldr
12736 && inst.operands[0].reg == REG_SP
12737 && inst.operands[1].writeback == 1
12738 && !inst.operands[1].immisreg)
12739 {
12740 if (no_cpu_selected ()
12741 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12742 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12743 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12744 as_warn (_("This instruction may be unpredictable "
12745 "if executed on M-profile cores "
12746 "with interrupts enabled."));
12747 }
12748
12749 /* Do some validations regarding addressing modes. */
12750 if (inst.operands[1].immisreg)
12751 reject_bad_reg (inst.operands[1].imm);
12752
12753 constraint (inst.operands[1].writeback == 1
12754 && inst.operands[0].reg == inst.operands[1].reg,
12755 BAD_OVERLAP);
12756
12757 inst.instruction = THUMB_OP32 (opcode);
12758 inst.instruction |= inst.operands[0].reg << 12;
12759 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12760 check_ldr_r15_aligned ();
12761 return;
12762 }
12763
12764 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12765
12766 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12767 {
12768 /* Only [Rn,Rm] is acceptable. */
12769 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12770 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12771 || inst.operands[1].postind || inst.operands[1].shifted
12772 || inst.operands[1].negative,
12773 _("Thumb does not support this addressing mode"));
12774 inst.instruction = THUMB_OP16 (inst.instruction);
12775 goto op16;
12776 }
12777
12778 inst.instruction = THUMB_OP16 (inst.instruction);
12779 if (!inst.operands[1].isreg)
12780 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12781 return;
12782
12783 constraint (!inst.operands[1].preind
12784 || inst.operands[1].shifted
12785 || inst.operands[1].writeback,
12786 _("Thumb does not support this addressing mode"));
12787 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12788 {
12789 constraint (inst.instruction & 0x0600,
12790 _("byte or halfword not valid for base register"));
12791 constraint (inst.operands[1].reg == REG_PC
12792 && !(inst.instruction & THUMB_LOAD_BIT),
12793 _("r15 based store not allowed"));
12794 constraint (inst.operands[1].immisreg,
12795 _("invalid base register for register offset"));
12796
12797 if (inst.operands[1].reg == REG_PC)
12798 inst.instruction = T_OPCODE_LDR_PC;
12799 else if (inst.instruction & THUMB_LOAD_BIT)
12800 inst.instruction = T_OPCODE_LDR_SP;
12801 else
12802 inst.instruction = T_OPCODE_STR_SP;
12803
12804 inst.instruction |= inst.operands[0].reg << 8;
12805 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12806 return;
12807 }
12808
12809 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12810 if (!inst.operands[1].immisreg)
12811 {
12812 /* Immediate offset. */
12813 inst.instruction |= inst.operands[0].reg;
12814 inst.instruction |= inst.operands[1].reg << 3;
12815 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12816 return;
12817 }
12818
12819 /* Register offset. */
12820 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12821 constraint (inst.operands[1].negative,
12822 _("Thumb does not support this addressing mode"));
12823
12824 op16:
12825 switch (inst.instruction)
12826 {
12827 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12828 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12829 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12830 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12831 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12832 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12833 case 0x5600 /* ldrsb */:
12834 case 0x5e00 /* ldrsh */: break;
12835 default: abort ();
12836 }
12837
12838 inst.instruction |= inst.operands[0].reg;
12839 inst.instruction |= inst.operands[1].reg << 3;
12840 inst.instruction |= inst.operands[1].imm << 6;
12841 }
12842
12843 static void
12844 do_t_ldstd (void)
12845 {
12846 if (!inst.operands[1].present)
12847 {
12848 inst.operands[1].reg = inst.operands[0].reg + 1;
12849 constraint (inst.operands[0].reg == REG_LR,
12850 _("r14 not allowed here"));
12851 constraint (inst.operands[0].reg == REG_R12,
12852 _("r12 not allowed here"));
12853 }
12854
12855 if (inst.operands[2].writeback
12856 && (inst.operands[0].reg == inst.operands[2].reg
12857 || inst.operands[1].reg == inst.operands[2].reg))
12858 as_warn (_("base register written back, and overlaps "
12859 "one of transfer registers"));
12860
12861 inst.instruction |= inst.operands[0].reg << 12;
12862 inst.instruction |= inst.operands[1].reg << 8;
12863 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12864 }
12865
12866 static void
12867 do_t_ldstt (void)
12868 {
12869 inst.instruction |= inst.operands[0].reg << 12;
12870 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12871 }
12872
12873 static void
12874 do_t_mla (void)
12875 {
12876 unsigned Rd, Rn, Rm, Ra;
12877
12878 Rd = inst.operands[0].reg;
12879 Rn = inst.operands[1].reg;
12880 Rm = inst.operands[2].reg;
12881 Ra = inst.operands[3].reg;
12882
12883 reject_bad_reg (Rd);
12884 reject_bad_reg (Rn);
12885 reject_bad_reg (Rm);
12886 reject_bad_reg (Ra);
12887
12888 inst.instruction |= Rd << 8;
12889 inst.instruction |= Rn << 16;
12890 inst.instruction |= Rm;
12891 inst.instruction |= Ra << 12;
12892 }
12893
12894 static void
12895 do_t_mlal (void)
12896 {
12897 unsigned RdLo, RdHi, Rn, Rm;
12898
12899 RdLo = inst.operands[0].reg;
12900 RdHi = inst.operands[1].reg;
12901 Rn = inst.operands[2].reg;
12902 Rm = inst.operands[3].reg;
12903
12904 reject_bad_reg (RdLo);
12905 reject_bad_reg (RdHi);
12906 reject_bad_reg (Rn);
12907 reject_bad_reg (Rm);
12908
12909 inst.instruction |= RdLo << 12;
12910 inst.instruction |= RdHi << 8;
12911 inst.instruction |= Rn << 16;
12912 inst.instruction |= Rm;
12913 }
12914
12915 static void
12916 do_t_mov_cmp (void)
12917 {
12918 unsigned Rn, Rm;
12919
12920 Rn = inst.operands[0].reg;
12921 Rm = inst.operands[1].reg;
12922
12923 if (Rn == REG_PC)
12924 set_pred_insn_type_last ();
12925
12926 if (unified_syntax)
12927 {
12928 int r0off = (inst.instruction == T_MNEM_mov
12929 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12930 unsigned long opcode;
12931 bfd_boolean narrow;
12932 bfd_boolean low_regs;
12933
12934 low_regs = (Rn <= 7 && Rm <= 7);
12935 opcode = inst.instruction;
12936 if (in_pred_block ())
12937 narrow = opcode != T_MNEM_movs;
12938 else
12939 narrow = opcode != T_MNEM_movs || low_regs;
12940 if (inst.size_req == 4
12941 || inst.operands[1].shifted)
12942 narrow = FALSE;
12943
12944 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12945 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12946 && !inst.operands[1].shifted
12947 && Rn == REG_PC
12948 && Rm == REG_LR)
12949 {
12950 inst.instruction = T2_SUBS_PC_LR;
12951 return;
12952 }
12953
12954 if (opcode == T_MNEM_cmp)
12955 {
12956 constraint (Rn == REG_PC, BAD_PC);
12957 if (narrow)
12958 {
12959 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12960 but valid. */
12961 warn_deprecated_sp (Rm);
12962 /* R15 was documented as a valid choice for Rm in ARMv6,
12963 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12964 tools reject R15, so we do too. */
12965 constraint (Rm == REG_PC, BAD_PC);
12966 }
12967 else
12968 reject_bad_reg (Rm);
12969 }
12970 else if (opcode == T_MNEM_mov
12971 || opcode == T_MNEM_movs)
12972 {
12973 if (inst.operands[1].isreg)
12974 {
12975 if (opcode == T_MNEM_movs)
12976 {
12977 reject_bad_reg (Rn);
12978 reject_bad_reg (Rm);
12979 }
12980 else if (narrow)
12981 {
12982 /* This is mov.n. */
12983 if ((Rn == REG_SP || Rn == REG_PC)
12984 && (Rm == REG_SP || Rm == REG_PC))
12985 {
12986 as_tsktsk (_("Use of r%u as a source register is "
12987 "deprecated when r%u is the destination "
12988 "register."), Rm, Rn);
12989 }
12990 }
12991 else
12992 {
12993 /* This is mov.w. */
12994 constraint (Rn == REG_PC, BAD_PC);
12995 constraint (Rm == REG_PC, BAD_PC);
12996 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12997 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12998 }
12999 }
13000 else
13001 reject_bad_reg (Rn);
13002 }
13003
13004 if (!inst.operands[1].isreg)
13005 {
13006 /* Immediate operand. */
13007 if (!in_pred_block () && opcode == T_MNEM_mov)
13008 narrow = 0;
13009 if (low_regs && narrow)
13010 {
13011 inst.instruction = THUMB_OP16 (opcode);
13012 inst.instruction |= Rn << 8;
13013 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13014 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13015 {
13016 if (inst.size_req == 2)
13017 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13018 else
13019 inst.relax = opcode;
13020 }
13021 }
13022 else
13023 {
13024 constraint ((inst.relocs[0].type
13025 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13026 && (inst.relocs[0].type
13027 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13028 THUMB1_RELOC_ONLY);
13029
13030 inst.instruction = THUMB_OP32 (inst.instruction);
13031 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13032 inst.instruction |= Rn << r0off;
13033 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13034 }
13035 }
13036 else if (inst.operands[1].shifted && inst.operands[1].immisreg
13037 && (inst.instruction == T_MNEM_mov
13038 || inst.instruction == T_MNEM_movs))
13039 {
13040 /* Register shifts are encoded as separate shift instructions. */
13041 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
13042
13043 if (in_pred_block ())
13044 narrow = !flags;
13045 else
13046 narrow = flags;
13047
13048 if (inst.size_req == 4)
13049 narrow = FALSE;
13050
13051 if (!low_regs || inst.operands[1].imm > 7)
13052 narrow = FALSE;
13053
13054 if (Rn != Rm)
13055 narrow = FALSE;
13056
13057 switch (inst.operands[1].shift_kind)
13058 {
13059 case SHIFT_LSL:
13060 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13061 break;
13062 case SHIFT_ASR:
13063 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13064 break;
13065 case SHIFT_LSR:
13066 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13067 break;
13068 case SHIFT_ROR:
13069 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13070 break;
13071 default:
13072 abort ();
13073 }
13074
13075 inst.instruction = opcode;
13076 if (narrow)
13077 {
13078 inst.instruction |= Rn;
13079 inst.instruction |= inst.operands[1].imm << 3;
13080 }
13081 else
13082 {
13083 if (flags)
13084 inst.instruction |= CONDS_BIT;
13085
13086 inst.instruction |= Rn << 8;
13087 inst.instruction |= Rm << 16;
13088 inst.instruction |= inst.operands[1].imm;
13089 }
13090 }
13091 else if (!narrow)
13092 {
13093 /* Some mov with immediate shift have narrow variants.
13094 Register shifts are handled above. */
13095 if (low_regs && inst.operands[1].shifted
13096 && (inst.instruction == T_MNEM_mov
13097 || inst.instruction == T_MNEM_movs))
13098 {
13099 if (in_pred_block ())
13100 narrow = (inst.instruction == T_MNEM_mov);
13101 else
13102 narrow = (inst.instruction == T_MNEM_movs);
13103 }
13104
13105 if (narrow)
13106 {
13107 switch (inst.operands[1].shift_kind)
13108 {
13109 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13110 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13111 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13112 default: narrow = FALSE; break;
13113 }
13114 }
13115
13116 if (narrow)
13117 {
13118 inst.instruction |= Rn;
13119 inst.instruction |= Rm << 3;
13120 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13121 }
13122 else
13123 {
13124 inst.instruction = THUMB_OP32 (inst.instruction);
13125 inst.instruction |= Rn << r0off;
13126 encode_thumb32_shifted_operand (1);
13127 }
13128 }
13129 else
13130 switch (inst.instruction)
13131 {
13132 case T_MNEM_mov:
13133 /* In v4t or v5t a move of two lowregs produces unpredictable
13134 results. Don't allow this. */
13135 if (low_regs)
13136 {
13137 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13138 "MOV Rd, Rs with two low registers is not "
13139 "permitted on this architecture");
13140 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13141 arm_ext_v6);
13142 }
13143
13144 inst.instruction = T_OPCODE_MOV_HR;
13145 inst.instruction |= (Rn & 0x8) << 4;
13146 inst.instruction |= (Rn & 0x7);
13147 inst.instruction |= Rm << 3;
13148 break;
13149
13150 case T_MNEM_movs:
13151 /* We know we have low registers at this point.
13152 Generate LSLS Rd, Rs, #0. */
13153 inst.instruction = T_OPCODE_LSL_I;
13154 inst.instruction |= Rn;
13155 inst.instruction |= Rm << 3;
13156 break;
13157
13158 case T_MNEM_cmp:
13159 if (low_regs)
13160 {
13161 inst.instruction = T_OPCODE_CMP_LR;
13162 inst.instruction |= Rn;
13163 inst.instruction |= Rm << 3;
13164 }
13165 else
13166 {
13167 inst.instruction = T_OPCODE_CMP_HR;
13168 inst.instruction |= (Rn & 0x8) << 4;
13169 inst.instruction |= (Rn & 0x7);
13170 inst.instruction |= Rm << 3;
13171 }
13172 break;
13173 }
13174 return;
13175 }
13176
13177 inst.instruction = THUMB_OP16 (inst.instruction);
13178
13179 /* PR 10443: Do not silently ignore shifted operands. */
13180 constraint (inst.operands[1].shifted,
13181 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13182
13183 if (inst.operands[1].isreg)
13184 {
13185 if (Rn < 8 && Rm < 8)
13186 {
13187 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13188 since a MOV instruction produces unpredictable results. */
13189 if (inst.instruction == T_OPCODE_MOV_I8)
13190 inst.instruction = T_OPCODE_ADD_I3;
13191 else
13192 inst.instruction = T_OPCODE_CMP_LR;
13193
13194 inst.instruction |= Rn;
13195 inst.instruction |= Rm << 3;
13196 }
13197 else
13198 {
13199 if (inst.instruction == T_OPCODE_MOV_I8)
13200 inst.instruction = T_OPCODE_MOV_HR;
13201 else
13202 inst.instruction = T_OPCODE_CMP_HR;
13203 do_t_cpy ();
13204 }
13205 }
13206 else
13207 {
13208 constraint (Rn > 7,
13209 _("only lo regs allowed with immediate"));
13210 inst.instruction |= Rn << 8;
13211 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13212 }
13213 }
13214
13215 static void
13216 do_t_mov16 (void)
13217 {
13218 unsigned Rd;
13219 bfd_vma imm;
13220 bfd_boolean top;
13221
13222 top = (inst.instruction & 0x00800000) != 0;
13223 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13224 {
13225 constraint (top, _(":lower16: not allowed in this instruction"));
13226 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13227 }
13228 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13229 {
13230 constraint (!top, _(":upper16: not allowed in this instruction"));
13231 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13232 }
13233
13234 Rd = inst.operands[0].reg;
13235 reject_bad_reg (Rd);
13236
13237 inst.instruction |= Rd << 8;
13238 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13239 {
13240 imm = inst.relocs[0].exp.X_add_number;
13241 inst.instruction |= (imm & 0xf000) << 4;
13242 inst.instruction |= (imm & 0x0800) << 15;
13243 inst.instruction |= (imm & 0x0700) << 4;
13244 inst.instruction |= (imm & 0x00ff);
13245 }
13246 }
13247
13248 static void
13249 do_t_mvn_tst (void)
13250 {
13251 unsigned Rn, Rm;
13252
13253 Rn = inst.operands[0].reg;
13254 Rm = inst.operands[1].reg;
13255
13256 if (inst.instruction == T_MNEM_cmp
13257 || inst.instruction == T_MNEM_cmn)
13258 constraint (Rn == REG_PC, BAD_PC);
13259 else
13260 reject_bad_reg (Rn);
13261 reject_bad_reg (Rm);
13262
13263 if (unified_syntax)
13264 {
13265 int r0off = (inst.instruction == T_MNEM_mvn
13266 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13267 bfd_boolean narrow;
13268
13269 if (inst.size_req == 4
13270 || inst.instruction > 0xffff
13271 || inst.operands[1].shifted
13272 || Rn > 7 || Rm > 7)
13273 narrow = FALSE;
13274 else if (inst.instruction == T_MNEM_cmn
13275 || inst.instruction == T_MNEM_tst)
13276 narrow = TRUE;
13277 else if (THUMB_SETS_FLAGS (inst.instruction))
13278 narrow = !in_pred_block ();
13279 else
13280 narrow = in_pred_block ();
13281
13282 if (!inst.operands[1].isreg)
13283 {
13284 /* For an immediate, we always generate a 32-bit opcode;
13285 section relaxation will shrink it later if possible. */
13286 if (inst.instruction < 0xffff)
13287 inst.instruction = THUMB_OP32 (inst.instruction);
13288 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13289 inst.instruction |= Rn << r0off;
13290 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13291 }
13292 else
13293 {
13294 /* See if we can do this with a 16-bit instruction. */
13295 if (narrow)
13296 {
13297 inst.instruction = THUMB_OP16 (inst.instruction);
13298 inst.instruction |= Rn;
13299 inst.instruction |= Rm << 3;
13300 }
13301 else
13302 {
13303 constraint (inst.operands[1].shifted
13304 && inst.operands[1].immisreg,
13305 _("shift must be constant"));
13306 if (inst.instruction < 0xffff)
13307 inst.instruction = THUMB_OP32 (inst.instruction);
13308 inst.instruction |= Rn << r0off;
13309 encode_thumb32_shifted_operand (1);
13310 }
13311 }
13312 }
13313 else
13314 {
13315 constraint (inst.instruction > 0xffff
13316 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13317 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13318 _("unshifted register required"));
13319 constraint (Rn > 7 || Rm > 7,
13320 BAD_HIREG);
13321
13322 inst.instruction = THUMB_OP16 (inst.instruction);
13323 inst.instruction |= Rn;
13324 inst.instruction |= Rm << 3;
13325 }
13326 }
13327
13328 static void
13329 do_t_mrs (void)
13330 {
13331 unsigned Rd;
13332
13333 if (do_vfp_nsyn_mrs () == SUCCESS)
13334 return;
13335
13336 Rd = inst.operands[0].reg;
13337 reject_bad_reg (Rd);
13338 inst.instruction |= Rd << 8;
13339
13340 if (inst.operands[1].isreg)
13341 {
13342 unsigned br = inst.operands[1].reg;
13343 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13344 as_bad (_("bad register for mrs"));
13345
13346 inst.instruction |= br & (0xf << 16);
13347 inst.instruction |= (br & 0x300) >> 4;
13348 inst.instruction |= (br & SPSR_BIT) >> 2;
13349 }
13350 else
13351 {
13352 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13353
13354 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13355 {
13356 /* PR gas/12698: The constraint is only applied for m_profile.
13357 If the user has specified -march=all, we want to ignore it as
13358 we are building for any CPU type, including non-m variants. */
13359 bfd_boolean m_profile =
13360 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13361 constraint ((flags != 0) && m_profile, _("selected processor does "
13362 "not support requested special purpose register"));
13363 }
13364 else
13365 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13366 devices). */
13367 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13368 _("'APSR', 'CPSR' or 'SPSR' expected"));
13369
13370 inst.instruction |= (flags & SPSR_BIT) >> 2;
13371 inst.instruction |= inst.operands[1].imm & 0xff;
13372 inst.instruction |= 0xf0000;
13373 }
13374 }
13375
13376 static void
13377 do_t_msr (void)
13378 {
13379 int flags;
13380 unsigned Rn;
13381
13382 if (do_vfp_nsyn_msr () == SUCCESS)
13383 return;
13384
13385 constraint (!inst.operands[1].isreg,
13386 _("Thumb encoding does not support an immediate here"));
13387
13388 if (inst.operands[0].isreg)
13389 flags = (int)(inst.operands[0].reg);
13390 else
13391 flags = inst.operands[0].imm;
13392
13393 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13394 {
13395 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13396
13397 /* PR gas/12698: The constraint is only applied for m_profile.
13398 If the user has specified -march=all, we want to ignore it as
13399 we are building for any CPU type, including non-m variants. */
13400 bfd_boolean m_profile =
13401 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13402 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13403 && (bits & ~(PSR_s | PSR_f)) != 0)
13404 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13405 && bits != PSR_f)) && m_profile,
13406 _("selected processor does not support requested special "
13407 "purpose register"));
13408 }
13409 else
13410 constraint ((flags & 0xff) != 0, _("selected processor does not support "
13411 "requested special purpose register"));
13412
13413 Rn = inst.operands[1].reg;
13414 reject_bad_reg (Rn);
13415
13416 inst.instruction |= (flags & SPSR_BIT) >> 2;
13417 inst.instruction |= (flags & 0xf0000) >> 8;
13418 inst.instruction |= (flags & 0x300) >> 4;
13419 inst.instruction |= (flags & 0xff);
13420 inst.instruction |= Rn << 16;
13421 }
13422
13423 static void
13424 do_t_mul (void)
13425 {
13426 bfd_boolean narrow;
13427 unsigned Rd, Rn, Rm;
13428
13429 if (!inst.operands[2].present)
13430 inst.operands[2].reg = inst.operands[0].reg;
13431
13432 Rd = inst.operands[0].reg;
13433 Rn = inst.operands[1].reg;
13434 Rm = inst.operands[2].reg;
13435
13436 if (unified_syntax)
13437 {
13438 if (inst.size_req == 4
13439 || (Rd != Rn
13440 && Rd != Rm)
13441 || Rn > 7
13442 || Rm > 7)
13443 narrow = FALSE;
13444 else if (inst.instruction == T_MNEM_muls)
13445 narrow = !in_pred_block ();
13446 else
13447 narrow = in_pred_block ();
13448 }
13449 else
13450 {
13451 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13452 constraint (Rn > 7 || Rm > 7,
13453 BAD_HIREG);
13454 narrow = TRUE;
13455 }
13456
13457 if (narrow)
13458 {
13459 /* 16-bit MULS/Conditional MUL. */
13460 inst.instruction = THUMB_OP16 (inst.instruction);
13461 inst.instruction |= Rd;
13462
13463 if (Rd == Rn)
13464 inst.instruction |= Rm << 3;
13465 else if (Rd == Rm)
13466 inst.instruction |= Rn << 3;
13467 else
13468 constraint (1, _("dest must overlap one source register"));
13469 }
13470 else
13471 {
13472 constraint (inst.instruction != T_MNEM_mul,
13473 _("Thumb-2 MUL must not set flags"));
13474 /* 32-bit MUL. */
13475 inst.instruction = THUMB_OP32 (inst.instruction);
13476 inst.instruction |= Rd << 8;
13477 inst.instruction |= Rn << 16;
13478 inst.instruction |= Rm << 0;
13479
13480 reject_bad_reg (Rd);
13481 reject_bad_reg (Rn);
13482 reject_bad_reg (Rm);
13483 }
13484 }
13485
13486 static void
13487 do_t_mull (void)
13488 {
13489 unsigned RdLo, RdHi, Rn, Rm;
13490
13491 RdLo = inst.operands[0].reg;
13492 RdHi = inst.operands[1].reg;
13493 Rn = inst.operands[2].reg;
13494 Rm = inst.operands[3].reg;
13495
13496 reject_bad_reg (RdLo);
13497 reject_bad_reg (RdHi);
13498 reject_bad_reg (Rn);
13499 reject_bad_reg (Rm);
13500
13501 inst.instruction |= RdLo << 12;
13502 inst.instruction |= RdHi << 8;
13503 inst.instruction |= Rn << 16;
13504 inst.instruction |= Rm;
13505
13506 if (RdLo == RdHi)
13507 as_tsktsk (_("rdhi and rdlo must be different"));
13508 }
13509
13510 static void
13511 do_t_nop (void)
13512 {
13513 set_pred_insn_type (NEUTRAL_IT_INSN);
13514
13515 if (unified_syntax)
13516 {
13517 if (inst.size_req == 4 || inst.operands[0].imm > 15)
13518 {
13519 inst.instruction = THUMB_OP32 (inst.instruction);
13520 inst.instruction |= inst.operands[0].imm;
13521 }
13522 else
13523 {
13524 /* PR9722: Check for Thumb2 availability before
13525 generating a thumb2 nop instruction. */
13526 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13527 {
13528 inst.instruction = THUMB_OP16 (inst.instruction);
13529 inst.instruction |= inst.operands[0].imm << 4;
13530 }
13531 else
13532 inst.instruction = 0x46c0;
13533 }
13534 }
13535 else
13536 {
13537 constraint (inst.operands[0].present,
13538 _("Thumb does not support NOP with hints"));
13539 inst.instruction = 0x46c0;
13540 }
13541 }
13542
13543 static void
13544 do_t_neg (void)
13545 {
13546 if (unified_syntax)
13547 {
13548 bfd_boolean narrow;
13549
13550 if (THUMB_SETS_FLAGS (inst.instruction))
13551 narrow = !in_pred_block ();
13552 else
13553 narrow = in_pred_block ();
13554 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13555 narrow = FALSE;
13556 if (inst.size_req == 4)
13557 narrow = FALSE;
13558
13559 if (!narrow)
13560 {
13561 inst.instruction = THUMB_OP32 (inst.instruction);
13562 inst.instruction |= inst.operands[0].reg << 8;
13563 inst.instruction |= inst.operands[1].reg << 16;
13564 }
13565 else
13566 {
13567 inst.instruction = THUMB_OP16 (inst.instruction);
13568 inst.instruction |= inst.operands[0].reg;
13569 inst.instruction |= inst.operands[1].reg << 3;
13570 }
13571 }
13572 else
13573 {
13574 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13575 BAD_HIREG);
13576 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13577
13578 inst.instruction = THUMB_OP16 (inst.instruction);
13579 inst.instruction |= inst.operands[0].reg;
13580 inst.instruction |= inst.operands[1].reg << 3;
13581 }
13582 }
13583
13584 static void
13585 do_t_orn (void)
13586 {
13587 unsigned Rd, Rn;
13588
13589 Rd = inst.operands[0].reg;
13590 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13591
13592 reject_bad_reg (Rd);
13593 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13594 reject_bad_reg (Rn);
13595
13596 inst.instruction |= Rd << 8;
13597 inst.instruction |= Rn << 16;
13598
13599 if (!inst.operands[2].isreg)
13600 {
13601 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13602 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13603 }
13604 else
13605 {
13606 unsigned Rm;
13607
13608 Rm = inst.operands[2].reg;
13609 reject_bad_reg (Rm);
13610
13611 constraint (inst.operands[2].shifted
13612 && inst.operands[2].immisreg,
13613 _("shift must be constant"));
13614 encode_thumb32_shifted_operand (2);
13615 }
13616 }
13617
13618 static void
13619 do_t_pkhbt (void)
13620 {
13621 unsigned Rd, Rn, Rm;
13622
13623 Rd = inst.operands[0].reg;
13624 Rn = inst.operands[1].reg;
13625 Rm = inst.operands[2].reg;
13626
13627 reject_bad_reg (Rd);
13628 reject_bad_reg (Rn);
13629 reject_bad_reg (Rm);
13630
13631 inst.instruction |= Rd << 8;
13632 inst.instruction |= Rn << 16;
13633 inst.instruction |= Rm;
13634 if (inst.operands[3].present)
13635 {
13636 unsigned int val = inst.relocs[0].exp.X_add_number;
13637 constraint (inst.relocs[0].exp.X_op != O_constant,
13638 _("expression too complex"));
13639 inst.instruction |= (val & 0x1c) << 10;
13640 inst.instruction |= (val & 0x03) << 6;
13641 }
13642 }
13643
13644 static void
13645 do_t_pkhtb (void)
13646 {
13647 if (!inst.operands[3].present)
13648 {
13649 unsigned Rtmp;
13650
13651 inst.instruction &= ~0x00000020;
13652
13653 /* PR 10168. Swap the Rm and Rn registers. */
13654 Rtmp = inst.operands[1].reg;
13655 inst.operands[1].reg = inst.operands[2].reg;
13656 inst.operands[2].reg = Rtmp;
13657 }
13658 do_t_pkhbt ();
13659 }
13660
13661 static void
13662 do_t_pld (void)
13663 {
13664 if (inst.operands[0].immisreg)
13665 reject_bad_reg (inst.operands[0].imm);
13666
13667 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13668 }
13669
13670 static void
13671 do_t_push_pop (void)
13672 {
13673 unsigned mask;
13674
13675 constraint (inst.operands[0].writeback,
13676 _("push/pop do not support {reglist}^"));
13677 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13678 _("expression too complex"));
13679
13680 mask = inst.operands[0].imm;
13681 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13682 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13683 else if (inst.size_req != 4
13684 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13685 ? REG_LR : REG_PC)))
13686 {
13687 inst.instruction = THUMB_OP16 (inst.instruction);
13688 inst.instruction |= THUMB_PP_PC_LR;
13689 inst.instruction |= mask & 0xff;
13690 }
13691 else if (unified_syntax)
13692 {
13693 inst.instruction = THUMB_OP32 (inst.instruction);
13694 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13695 }
13696 else
13697 {
13698 inst.error = _("invalid register list to push/pop instruction");
13699 return;
13700 }
13701 }
13702
13703 static void
13704 do_t_clrm (void)
13705 {
13706 if (unified_syntax)
13707 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13708 else
13709 {
13710 inst.error = _("invalid register list to push/pop instruction");
13711 return;
13712 }
13713 }
13714
13715 static void
13716 do_t_vscclrm (void)
13717 {
13718 if (inst.operands[0].issingle)
13719 {
13720 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13721 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13722 inst.instruction |= inst.operands[0].imm;
13723 }
13724 else
13725 {
13726 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13727 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13728 inst.instruction |= 1 << 8;
13729 inst.instruction |= inst.operands[0].imm << 1;
13730 }
13731 }
13732
13733 static void
13734 do_t_rbit (void)
13735 {
13736 unsigned Rd, Rm;
13737
13738 Rd = inst.operands[0].reg;
13739 Rm = inst.operands[1].reg;
13740
13741 reject_bad_reg (Rd);
13742 reject_bad_reg (Rm);
13743
13744 inst.instruction |= Rd << 8;
13745 inst.instruction |= Rm << 16;
13746 inst.instruction |= Rm;
13747 }
13748
13749 static void
13750 do_t_rev (void)
13751 {
13752 unsigned Rd, Rm;
13753
13754 Rd = inst.operands[0].reg;
13755 Rm = inst.operands[1].reg;
13756
13757 reject_bad_reg (Rd);
13758 reject_bad_reg (Rm);
13759
13760 if (Rd <= 7 && Rm <= 7
13761 && inst.size_req != 4)
13762 {
13763 inst.instruction = THUMB_OP16 (inst.instruction);
13764 inst.instruction |= Rd;
13765 inst.instruction |= Rm << 3;
13766 }
13767 else if (unified_syntax)
13768 {
13769 inst.instruction = THUMB_OP32 (inst.instruction);
13770 inst.instruction |= Rd << 8;
13771 inst.instruction |= Rm << 16;
13772 inst.instruction |= Rm;
13773 }
13774 else
13775 inst.error = BAD_HIREG;
13776 }
13777
13778 static void
13779 do_t_rrx (void)
13780 {
13781 unsigned Rd, Rm;
13782
13783 Rd = inst.operands[0].reg;
13784 Rm = inst.operands[1].reg;
13785
13786 reject_bad_reg (Rd);
13787 reject_bad_reg (Rm);
13788
13789 inst.instruction |= Rd << 8;
13790 inst.instruction |= Rm;
13791 }
13792
13793 static void
13794 do_t_rsb (void)
13795 {
13796 unsigned Rd, Rs;
13797
13798 Rd = inst.operands[0].reg;
13799 Rs = (inst.operands[1].present
13800 ? inst.operands[1].reg /* Rd, Rs, foo */
13801 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13802
13803 reject_bad_reg (Rd);
13804 reject_bad_reg (Rs);
13805 if (inst.operands[2].isreg)
13806 reject_bad_reg (inst.operands[2].reg);
13807
13808 inst.instruction |= Rd << 8;
13809 inst.instruction |= Rs << 16;
13810 if (!inst.operands[2].isreg)
13811 {
13812 bfd_boolean narrow;
13813
13814 if ((inst.instruction & 0x00100000) != 0)
13815 narrow = !in_pred_block ();
13816 else
13817 narrow = in_pred_block ();
13818
13819 if (Rd > 7 || Rs > 7)
13820 narrow = FALSE;
13821
13822 if (inst.size_req == 4 || !unified_syntax)
13823 narrow = FALSE;
13824
13825 if (inst.relocs[0].exp.X_op != O_constant
13826 || inst.relocs[0].exp.X_add_number != 0)
13827 narrow = FALSE;
13828
13829 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13830 relaxation, but it doesn't seem worth the hassle. */
13831 if (narrow)
13832 {
13833 inst.relocs[0].type = BFD_RELOC_UNUSED;
13834 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13835 inst.instruction |= Rs << 3;
13836 inst.instruction |= Rd;
13837 }
13838 else
13839 {
13840 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13841 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13842 }
13843 }
13844 else
13845 encode_thumb32_shifted_operand (2);
13846 }
13847
13848 static void
13849 do_t_setend (void)
13850 {
13851 if (warn_on_deprecated
13852 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13853 as_tsktsk (_("setend use is deprecated for ARMv8"));
13854
13855 set_pred_insn_type (OUTSIDE_PRED_INSN);
13856 if (inst.operands[0].imm)
13857 inst.instruction |= 0x8;
13858 }
13859
13860 static void
13861 do_t_shift (void)
13862 {
13863 if (!inst.operands[1].present)
13864 inst.operands[1].reg = inst.operands[0].reg;
13865
13866 if (unified_syntax)
13867 {
13868 bfd_boolean narrow;
13869 int shift_kind;
13870
13871 switch (inst.instruction)
13872 {
13873 case T_MNEM_asr:
13874 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13875 case T_MNEM_lsl:
13876 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13877 case T_MNEM_lsr:
13878 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13879 case T_MNEM_ror:
13880 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13881 default: abort ();
13882 }
13883
13884 if (THUMB_SETS_FLAGS (inst.instruction))
13885 narrow = !in_pred_block ();
13886 else
13887 narrow = in_pred_block ();
13888 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13889 narrow = FALSE;
13890 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13891 narrow = FALSE;
13892 if (inst.operands[2].isreg
13893 && (inst.operands[1].reg != inst.operands[0].reg
13894 || inst.operands[2].reg > 7))
13895 narrow = FALSE;
13896 if (inst.size_req == 4)
13897 narrow = FALSE;
13898
13899 reject_bad_reg (inst.operands[0].reg);
13900 reject_bad_reg (inst.operands[1].reg);
13901
13902 if (!narrow)
13903 {
13904 if (inst.operands[2].isreg)
13905 {
13906 reject_bad_reg (inst.operands[2].reg);
13907 inst.instruction = THUMB_OP32 (inst.instruction);
13908 inst.instruction |= inst.operands[0].reg << 8;
13909 inst.instruction |= inst.operands[1].reg << 16;
13910 inst.instruction |= inst.operands[2].reg;
13911
13912 /* PR 12854: Error on extraneous shifts. */
13913 constraint (inst.operands[2].shifted,
13914 _("extraneous shift as part of operand to shift insn"));
13915 }
13916 else
13917 {
13918 inst.operands[1].shifted = 1;
13919 inst.operands[1].shift_kind = shift_kind;
13920 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13921 ? T_MNEM_movs : T_MNEM_mov);
13922 inst.instruction |= inst.operands[0].reg << 8;
13923 encode_thumb32_shifted_operand (1);
13924 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13925 inst.relocs[0].type = BFD_RELOC_UNUSED;
13926 }
13927 }
13928 else
13929 {
13930 if (inst.operands[2].isreg)
13931 {
13932 switch (shift_kind)
13933 {
13934 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13935 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13936 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13937 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13938 default: abort ();
13939 }
13940
13941 inst.instruction |= inst.operands[0].reg;
13942 inst.instruction |= inst.operands[2].reg << 3;
13943
13944 /* PR 12854: Error on extraneous shifts. */
13945 constraint (inst.operands[2].shifted,
13946 _("extraneous shift as part of operand to shift insn"));
13947 }
13948 else
13949 {
13950 switch (shift_kind)
13951 {
13952 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13953 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13954 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13955 default: abort ();
13956 }
13957 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13958 inst.instruction |= inst.operands[0].reg;
13959 inst.instruction |= inst.operands[1].reg << 3;
13960 }
13961 }
13962 }
13963 else
13964 {
13965 constraint (inst.operands[0].reg > 7
13966 || inst.operands[1].reg > 7, BAD_HIREG);
13967 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13968
13969 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13970 {
13971 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13972 constraint (inst.operands[0].reg != inst.operands[1].reg,
13973 _("source1 and dest must be same register"));
13974
13975 switch (inst.instruction)
13976 {
13977 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13978 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13979 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13980 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13981 default: abort ();
13982 }
13983
13984 inst.instruction |= inst.operands[0].reg;
13985 inst.instruction |= inst.operands[2].reg << 3;
13986
13987 /* PR 12854: Error on extraneous shifts. */
13988 constraint (inst.operands[2].shifted,
13989 _("extraneous shift as part of operand to shift insn"));
13990 }
13991 else
13992 {
13993 switch (inst.instruction)
13994 {
13995 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13996 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13997 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13998 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13999 default: abort ();
14000 }
14001 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14002 inst.instruction |= inst.operands[0].reg;
14003 inst.instruction |= inst.operands[1].reg << 3;
14004 }
14005 }
14006 }
14007
14008 static void
14009 do_t_simd (void)
14010 {
14011 unsigned Rd, Rn, Rm;
14012
14013 Rd = inst.operands[0].reg;
14014 Rn = inst.operands[1].reg;
14015 Rm = inst.operands[2].reg;
14016
14017 reject_bad_reg (Rd);
14018 reject_bad_reg (Rn);
14019 reject_bad_reg (Rm);
14020
14021 inst.instruction |= Rd << 8;
14022 inst.instruction |= Rn << 16;
14023 inst.instruction |= Rm;
14024 }
14025
14026 static void
14027 do_t_simd2 (void)
14028 {
14029 unsigned Rd, Rn, Rm;
14030
14031 Rd = inst.operands[0].reg;
14032 Rm = inst.operands[1].reg;
14033 Rn = inst.operands[2].reg;
14034
14035 reject_bad_reg (Rd);
14036 reject_bad_reg (Rn);
14037 reject_bad_reg (Rm);
14038
14039 inst.instruction |= Rd << 8;
14040 inst.instruction |= Rn << 16;
14041 inst.instruction |= Rm;
14042 }
14043
14044 static void
14045 do_t_smc (void)
14046 {
14047 unsigned int value = inst.relocs[0].exp.X_add_number;
14048 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14049 _("SMC is not permitted on this architecture"));
14050 constraint (inst.relocs[0].exp.X_op != O_constant,
14051 _("expression too complex"));
14052 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14053
14054 inst.relocs[0].type = BFD_RELOC_UNUSED;
14055 inst.instruction |= (value & 0x000f) << 16;
14056
14057 /* PR gas/15623: SMC instructions must be last in an IT block. */
14058 set_pred_insn_type_last ();
14059 }
14060
14061 static void
14062 do_t_hvc (void)
14063 {
14064 unsigned int value = inst.relocs[0].exp.X_add_number;
14065
14066 inst.relocs[0].type = BFD_RELOC_UNUSED;
14067 inst.instruction |= (value & 0x0fff);
14068 inst.instruction |= (value & 0xf000) << 4;
14069 }
14070
14071 static void
14072 do_t_ssat_usat (int bias)
14073 {
14074 unsigned Rd, Rn;
14075
14076 Rd = inst.operands[0].reg;
14077 Rn = inst.operands[2].reg;
14078
14079 reject_bad_reg (Rd);
14080 reject_bad_reg (Rn);
14081
14082 inst.instruction |= Rd << 8;
14083 inst.instruction |= inst.operands[1].imm - bias;
14084 inst.instruction |= Rn << 16;
14085
14086 if (inst.operands[3].present)
14087 {
14088 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14089
14090 inst.relocs[0].type = BFD_RELOC_UNUSED;
14091
14092 constraint (inst.relocs[0].exp.X_op != O_constant,
14093 _("expression too complex"));
14094
14095 if (shift_amount != 0)
14096 {
14097 constraint (shift_amount > 31,
14098 _("shift expression is too large"));
14099
14100 if (inst.operands[3].shift_kind == SHIFT_ASR)
14101 inst.instruction |= 0x00200000; /* sh bit. */
14102
14103 inst.instruction |= (shift_amount & 0x1c) << 10;
14104 inst.instruction |= (shift_amount & 0x03) << 6;
14105 }
14106 }
14107 }
14108
14109 static void
14110 do_t_ssat (void)
14111 {
14112 do_t_ssat_usat (1);
14113 }
14114
14115 static void
14116 do_t_ssat16 (void)
14117 {
14118 unsigned Rd, Rn;
14119
14120 Rd = inst.operands[0].reg;
14121 Rn = inst.operands[2].reg;
14122
14123 reject_bad_reg (Rd);
14124 reject_bad_reg (Rn);
14125
14126 inst.instruction |= Rd << 8;
14127 inst.instruction |= inst.operands[1].imm - 1;
14128 inst.instruction |= Rn << 16;
14129 }
14130
14131 static void
14132 do_t_strex (void)
14133 {
14134 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14135 || inst.operands[2].postind || inst.operands[2].writeback
14136 || inst.operands[2].immisreg || inst.operands[2].shifted
14137 || inst.operands[2].negative,
14138 BAD_ADDR_MODE);
14139
14140 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14141
14142 inst.instruction |= inst.operands[0].reg << 8;
14143 inst.instruction |= inst.operands[1].reg << 12;
14144 inst.instruction |= inst.operands[2].reg << 16;
14145 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14146 }
14147
14148 static void
14149 do_t_strexd (void)
14150 {
14151 if (!inst.operands[2].present)
14152 inst.operands[2].reg = inst.operands[1].reg + 1;
14153
14154 constraint (inst.operands[0].reg == inst.operands[1].reg
14155 || inst.operands[0].reg == inst.operands[2].reg
14156 || inst.operands[0].reg == inst.operands[3].reg,
14157 BAD_OVERLAP);
14158
14159 inst.instruction |= inst.operands[0].reg;
14160 inst.instruction |= inst.operands[1].reg << 12;
14161 inst.instruction |= inst.operands[2].reg << 8;
14162 inst.instruction |= inst.operands[3].reg << 16;
14163 }
14164
14165 static void
14166 do_t_sxtah (void)
14167 {
14168 unsigned Rd, Rn, Rm;
14169
14170 Rd = inst.operands[0].reg;
14171 Rn = inst.operands[1].reg;
14172 Rm = inst.operands[2].reg;
14173
14174 reject_bad_reg (Rd);
14175 reject_bad_reg (Rn);
14176 reject_bad_reg (Rm);
14177
14178 inst.instruction |= Rd << 8;
14179 inst.instruction |= Rn << 16;
14180 inst.instruction |= Rm;
14181 inst.instruction |= inst.operands[3].imm << 4;
14182 }
14183
14184 static void
14185 do_t_sxth (void)
14186 {
14187 unsigned Rd, Rm;
14188
14189 Rd = inst.operands[0].reg;
14190 Rm = inst.operands[1].reg;
14191
14192 reject_bad_reg (Rd);
14193 reject_bad_reg (Rm);
14194
14195 if (inst.instruction <= 0xffff
14196 && inst.size_req != 4
14197 && Rd <= 7 && Rm <= 7
14198 && (!inst.operands[2].present || inst.operands[2].imm == 0))
14199 {
14200 inst.instruction = THUMB_OP16 (inst.instruction);
14201 inst.instruction |= Rd;
14202 inst.instruction |= Rm << 3;
14203 }
14204 else if (unified_syntax)
14205 {
14206 if (inst.instruction <= 0xffff)
14207 inst.instruction = THUMB_OP32 (inst.instruction);
14208 inst.instruction |= Rd << 8;
14209 inst.instruction |= Rm;
14210 inst.instruction |= inst.operands[2].imm << 4;
14211 }
14212 else
14213 {
14214 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14215 _("Thumb encoding does not support rotation"));
14216 constraint (1, BAD_HIREG);
14217 }
14218 }
14219
14220 static void
14221 do_t_swi (void)
14222 {
14223 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14224 }
14225
14226 static void
14227 do_t_tb (void)
14228 {
14229 unsigned Rn, Rm;
14230 int half;
14231
14232 half = (inst.instruction & 0x10) != 0;
14233 set_pred_insn_type_last ();
14234 constraint (inst.operands[0].immisreg,
14235 _("instruction requires register index"));
14236
14237 Rn = inst.operands[0].reg;
14238 Rm = inst.operands[0].imm;
14239
14240 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14241 constraint (Rn == REG_SP, BAD_SP);
14242 reject_bad_reg (Rm);
14243
14244 constraint (!half && inst.operands[0].shifted,
14245 _("instruction does not allow shifted index"));
14246 inst.instruction |= (Rn << 16) | Rm;
14247 }
14248
14249 static void
14250 do_t_udf (void)
14251 {
14252 if (!inst.operands[0].present)
14253 inst.operands[0].imm = 0;
14254
14255 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14256 {
14257 constraint (inst.size_req == 2,
14258 _("immediate value out of range"));
14259 inst.instruction = THUMB_OP32 (inst.instruction);
14260 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14261 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14262 }
14263 else
14264 {
14265 inst.instruction = THUMB_OP16 (inst.instruction);
14266 inst.instruction |= inst.operands[0].imm;
14267 }
14268
14269 set_pred_insn_type (NEUTRAL_IT_INSN);
14270 }
14271
14272
14273 static void
14274 do_t_usat (void)
14275 {
14276 do_t_ssat_usat (0);
14277 }
14278
14279 static void
14280 do_t_usat16 (void)
14281 {
14282 unsigned Rd, Rn;
14283
14284 Rd = inst.operands[0].reg;
14285 Rn = inst.operands[2].reg;
14286
14287 reject_bad_reg (Rd);
14288 reject_bad_reg (Rn);
14289
14290 inst.instruction |= Rd << 8;
14291 inst.instruction |= inst.operands[1].imm;
14292 inst.instruction |= Rn << 16;
14293 }
14294
14295 /* Checking the range of the branch offset (VAL) with NBITS bits
14296 and IS_SIGNED signedness. Also checks the LSB to be 0. */
14297 static int
14298 v8_1_branch_value_check (int val, int nbits, int is_signed)
14299 {
14300 gas_assert (nbits > 0 && nbits <= 32);
14301 if (is_signed)
14302 {
14303 int cmp = (1 << (nbits - 1));
14304 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14305 return FAIL;
14306 }
14307 else
14308 {
14309 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14310 return FAIL;
14311 }
14312 return SUCCESS;
14313 }
14314
14315 /* For branches in Armv8.1-M Mainline. */
14316 static void
14317 do_t_branch_future (void)
14318 {
14319 unsigned long insn = inst.instruction;
14320
14321 inst.instruction = THUMB_OP32 (inst.instruction);
14322 if (inst.operands[0].hasreloc == 0)
14323 {
14324 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
14325 as_bad (BAD_BRANCH_OFF);
14326
14327 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14328 }
14329 else
14330 {
14331 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14332 inst.relocs[0].pc_rel = 1;
14333 }
14334
14335 switch (insn)
14336 {
14337 case T_MNEM_bf:
14338 if (inst.operands[1].hasreloc == 0)
14339 {
14340 int val = inst.operands[1].imm;
14341 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
14342 as_bad (BAD_BRANCH_OFF);
14343
14344 int immA = (val & 0x0001f000) >> 12;
14345 int immB = (val & 0x00000ffc) >> 2;
14346 int immC = (val & 0x00000002) >> 1;
14347 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14348 }
14349 else
14350 {
14351 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14352 inst.relocs[1].pc_rel = 1;
14353 }
14354 break;
14355
14356 case T_MNEM_bfl:
14357 if (inst.operands[1].hasreloc == 0)
14358 {
14359 int val = inst.operands[1].imm;
14360 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
14361 as_bad (BAD_BRANCH_OFF);
14362
14363 int immA = (val & 0x0007f000) >> 12;
14364 int immB = (val & 0x00000ffc) >> 2;
14365 int immC = (val & 0x00000002) >> 1;
14366 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14367 }
14368 else
14369 {
14370 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14371 inst.relocs[1].pc_rel = 1;
14372 }
14373 break;
14374
14375 case T_MNEM_bfcsel:
14376 /* Operand 1. */
14377 if (inst.operands[1].hasreloc == 0)
14378 {
14379 int val = inst.operands[1].imm;
14380 int immA = (val & 0x00001000) >> 12;
14381 int immB = (val & 0x00000ffc) >> 2;
14382 int immC = (val & 0x00000002) >> 1;
14383 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14384 }
14385 else
14386 {
14387 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14388 inst.relocs[1].pc_rel = 1;
14389 }
14390
14391 /* Operand 2. */
14392 if (inst.operands[2].hasreloc == 0)
14393 {
14394 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14395 int val2 = inst.operands[2].imm;
14396 int val0 = inst.operands[0].imm & 0x1f;
14397 int diff = val2 - val0;
14398 if (diff == 4)
14399 inst.instruction |= 1 << 17; /* T bit. */
14400 else if (diff != 2)
14401 as_bad (_("out of range label-relative fixup value"));
14402 }
14403 else
14404 {
14405 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14406 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14407 inst.relocs[2].pc_rel = 1;
14408 }
14409
14410 /* Operand 3. */
14411 constraint (inst.cond != COND_ALWAYS, BAD_COND);
14412 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14413 break;
14414
14415 case T_MNEM_bfx:
14416 case T_MNEM_bflx:
14417 inst.instruction |= inst.operands[1].reg << 16;
14418 break;
14419
14420 default: abort ();
14421 }
14422 }
14423
14424 /* Helper function for do_t_loloop to handle relocations. */
14425 static void
14426 v8_1_loop_reloc (int is_le)
14427 {
14428 if (inst.relocs[0].exp.X_op == O_constant)
14429 {
14430 int value = inst.relocs[0].exp.X_add_number;
14431 value = (is_le) ? -value : value;
14432
14433 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
14434 as_bad (BAD_BRANCH_OFF);
14435
14436 int imml, immh;
14437
14438 immh = (value & 0x00000ffc) >> 2;
14439 imml = (value & 0x00000002) >> 1;
14440
14441 inst.instruction |= (imml << 11) | (immh << 1);
14442 }
14443 else
14444 {
14445 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14446 inst.relocs[0].pc_rel = 1;
14447 }
14448 }
14449
14450 /* For shifts with four operands in MVE. */
14451 static void
14452 do_mve_scalar_shift1 (void)
14453 {
14454 unsigned int value = inst.operands[2].imm;
14455
14456 inst.instruction |= inst.operands[0].reg << 16;
14457 inst.instruction |= inst.operands[1].reg << 8;
14458
14459 /* Setting the bit for saturation. */
14460 inst.instruction |= ((value == 64) ? 0: 1) << 7;
14461
14462 /* Assuming Rm is already checked not to be 11x1. */
14463 constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14464 constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14465 inst.instruction |= inst.operands[3].reg << 12;
14466 }
14467
14468 /* For shifts in MVE. */
14469 static void
14470 do_mve_scalar_shift (void)
14471 {
14472 if (!inst.operands[2].present)
14473 {
14474 inst.operands[2] = inst.operands[1];
14475 inst.operands[1].reg = 0xf;
14476 }
14477
14478 inst.instruction |= inst.operands[0].reg << 16;
14479 inst.instruction |= inst.operands[1].reg << 8;
14480
14481 if (inst.operands[2].isreg)
14482 {
14483 /* Assuming Rm is already checked not to be 11x1. */
14484 constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14485 constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14486 inst.instruction |= inst.operands[2].reg << 12;
14487 }
14488 else
14489 {
14490 /* Assuming imm is already checked as [1,32]. */
14491 unsigned int value = inst.operands[2].imm;
14492 inst.instruction |= (value & 0x1c) << 10;
14493 inst.instruction |= (value & 0x03) << 6;
14494 /* Change last 4 bits from 0xd to 0xf. */
14495 inst.instruction |= 0x2;
14496 }
14497 }
14498
14499 /* MVE instruction encoder helpers. */
14500 #define M_MNEM_vabav 0xee800f01
14501 #define M_MNEM_vmladav 0xeef00e00
14502 #define M_MNEM_vmladava 0xeef00e20
14503 #define M_MNEM_vmladavx 0xeef01e00
14504 #define M_MNEM_vmladavax 0xeef01e20
14505 #define M_MNEM_vmlsdav 0xeef00e01
14506 #define M_MNEM_vmlsdava 0xeef00e21
14507 #define M_MNEM_vmlsdavx 0xeef01e01
14508 #define M_MNEM_vmlsdavax 0xeef01e21
14509 #define M_MNEM_vmullt 0xee011e00
14510 #define M_MNEM_vmullb 0xee010e00
14511 #define M_MNEM_vctp 0xf000e801
14512 #define M_MNEM_vst20 0xfc801e00
14513 #define M_MNEM_vst21 0xfc801e20
14514 #define M_MNEM_vst40 0xfc801e01
14515 #define M_MNEM_vst41 0xfc801e21
14516 #define M_MNEM_vst42 0xfc801e41
14517 #define M_MNEM_vst43 0xfc801e61
14518 #define M_MNEM_vld20 0xfc901e00
14519 #define M_MNEM_vld21 0xfc901e20
14520 #define M_MNEM_vld40 0xfc901e01
14521 #define M_MNEM_vld41 0xfc901e21
14522 #define M_MNEM_vld42 0xfc901e41
14523 #define M_MNEM_vld43 0xfc901e61
14524 #define M_MNEM_vstrb 0xec000e00
14525 #define M_MNEM_vstrh 0xec000e10
14526 #define M_MNEM_vstrw 0xec000e40
14527 #define M_MNEM_vstrd 0xec000e50
14528 #define M_MNEM_vldrb 0xec100e00
14529 #define M_MNEM_vldrh 0xec100e10
14530 #define M_MNEM_vldrw 0xec100e40
14531 #define M_MNEM_vldrd 0xec100e50
14532 #define M_MNEM_vmovlt 0xeea01f40
14533 #define M_MNEM_vmovlb 0xeea00f40
14534 #define M_MNEM_vmovnt 0xfe311e81
14535 #define M_MNEM_vmovnb 0xfe310e81
14536 #define M_MNEM_vadc 0xee300f00
14537 #define M_MNEM_vadci 0xee301f00
14538 #define M_MNEM_vbrsr 0xfe011e60
14539 #define M_MNEM_vaddlv 0xee890f00
14540 #define M_MNEM_vaddlva 0xee890f20
14541 #define M_MNEM_vaddv 0xeef10f00
14542 #define M_MNEM_vaddva 0xeef10f20
14543 #define M_MNEM_vddup 0xee011f6e
14544 #define M_MNEM_vdwdup 0xee011f60
14545 #define M_MNEM_vidup 0xee010f6e
14546 #define M_MNEM_viwdup 0xee010f60
14547 #define M_MNEM_vmaxv 0xeee20f00
14548 #define M_MNEM_vmaxav 0xeee00f00
14549 #define M_MNEM_vminv 0xeee20f80
14550 #define M_MNEM_vminav 0xeee00f80
14551 #define M_MNEM_vmlaldav 0xee800e00
14552 #define M_MNEM_vmlaldava 0xee800e20
14553 #define M_MNEM_vmlaldavx 0xee801e00
14554 #define M_MNEM_vmlaldavax 0xee801e20
14555 #define M_MNEM_vmlsldav 0xee800e01
14556 #define M_MNEM_vmlsldava 0xee800e21
14557 #define M_MNEM_vmlsldavx 0xee801e01
14558 #define M_MNEM_vmlsldavax 0xee801e21
14559 #define M_MNEM_vrmlaldavhx 0xee801f00
14560 #define M_MNEM_vrmlaldavhax 0xee801f20
14561 #define M_MNEM_vrmlsldavh 0xfe800e01
14562 #define M_MNEM_vrmlsldavha 0xfe800e21
14563 #define M_MNEM_vrmlsldavhx 0xfe801e01
14564 #define M_MNEM_vrmlsldavhax 0xfe801e21
14565 #define M_MNEM_vqmovnt 0xee331e01
14566 #define M_MNEM_vqmovnb 0xee330e01
14567 #define M_MNEM_vqmovunt 0xee311e81
14568 #define M_MNEM_vqmovunb 0xee310e81
14569 #define M_MNEM_vshrnt 0xee801fc1
14570 #define M_MNEM_vshrnb 0xee800fc1
14571 #define M_MNEM_vrshrnt 0xfe801fc1
14572 #define M_MNEM_vqshrnt 0xee801f40
14573 #define M_MNEM_vqshrnb 0xee800f40
14574 #define M_MNEM_vqshrunt 0xee801fc0
14575 #define M_MNEM_vqshrunb 0xee800fc0
14576 #define M_MNEM_vrshrnb 0xfe800fc1
14577 #define M_MNEM_vqrshrnt 0xee801f41
14578 #define M_MNEM_vqrshrnb 0xee800f41
14579 #define M_MNEM_vqrshrunt 0xfe801fc0
14580 #define M_MNEM_vqrshrunb 0xfe800fc0
14581
14582 /* Bfloat16 instruction encoder helpers. */
14583 #define B_MNEM_vfmat 0xfc300850
14584 #define B_MNEM_vfmab 0xfc300810
14585
14586 /* Neon instruction encoder helpers. */
14587
14588 /* Encodings for the different types for various Neon opcodes. */
14589
14590 /* An "invalid" code for the following tables. */
14591 #define N_INV -1u
14592
14593 struct neon_tab_entry
14594 {
14595 unsigned integer;
14596 unsigned float_or_poly;
14597 unsigned scalar_or_imm;
14598 };
14599
14600 /* Map overloaded Neon opcodes to their respective encodings. */
14601 #define NEON_ENC_TAB \
14602 X(vabd, 0x0000700, 0x1200d00, N_INV), \
14603 X(vabdl, 0x0800700, N_INV, N_INV), \
14604 X(vmax, 0x0000600, 0x0000f00, N_INV), \
14605 X(vmin, 0x0000610, 0x0200f00, N_INV), \
14606 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
14607 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
14608 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
14609 X(vadd, 0x0000800, 0x0000d00, N_INV), \
14610 X(vaddl, 0x0800000, N_INV, N_INV), \
14611 X(vsub, 0x1000800, 0x0200d00, N_INV), \
14612 X(vsubl, 0x0800200, N_INV, N_INV), \
14613 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
14614 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
14615 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
14616 /* Register variants of the following two instructions are encoded as
14617 vcge / vcgt with the operands reversed. */ \
14618 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
14619 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
14620 X(vfma, N_INV, 0x0000c10, N_INV), \
14621 X(vfms, N_INV, 0x0200c10, N_INV), \
14622 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14623 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14624 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14625 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14626 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14627 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14628 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14629 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14630 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14631 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14632 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14633 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14634 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14635 X(vshl, 0x0000400, N_INV, 0x0800510), \
14636 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14637 X(vand, 0x0000110, N_INV, 0x0800030), \
14638 X(vbic, 0x0100110, N_INV, 0x0800030), \
14639 X(veor, 0x1000110, N_INV, N_INV), \
14640 X(vorn, 0x0300110, N_INV, 0x0800010), \
14641 X(vorr, 0x0200110, N_INV, 0x0800010), \
14642 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14643 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14644 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14645 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14646 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14647 X(vst1, 0x0000000, 0x0800000, N_INV), \
14648 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14649 X(vst2, 0x0000100, 0x0800100, N_INV), \
14650 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14651 X(vst3, 0x0000200, 0x0800200, N_INV), \
14652 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14653 X(vst4, 0x0000300, 0x0800300, N_INV), \
14654 X(vmovn, 0x1b20200, N_INV, N_INV), \
14655 X(vtrn, 0x1b20080, N_INV, N_INV), \
14656 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14657 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14658 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14659 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14660 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14661 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14662 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14663 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14664 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14665 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14666 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14667 X(vseleq, 0xe000a00, N_INV, N_INV), \
14668 X(vselvs, 0xe100a00, N_INV, N_INV), \
14669 X(vselge, 0xe200a00, N_INV, N_INV), \
14670 X(vselgt, 0xe300a00, N_INV, N_INV), \
14671 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14672 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14673 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14674 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14675 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14676 X(aes, 0x3b00300, N_INV, N_INV), \
14677 X(sha3op, 0x2000c00, N_INV, N_INV), \
14678 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14679 X(sha2op, 0x3ba0380, N_INV, N_INV)
14680
14681 enum neon_opc
14682 {
14683 #define X(OPC,I,F,S) N_MNEM_##OPC
14684 NEON_ENC_TAB
14685 #undef X
14686 };
14687
14688 static const struct neon_tab_entry neon_enc_tab[] =
14689 {
14690 #define X(OPC,I,F,S) { (I), (F), (S) }
14691 NEON_ENC_TAB
14692 #undef X
14693 };
14694
14695 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14696 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14697 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14698 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14699 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14700 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14701 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14702 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14703 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14704 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14705 #define NEON_ENC_SINGLE_(X) \
14706 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14707 #define NEON_ENC_DOUBLE_(X) \
14708 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14709 #define NEON_ENC_FPV8_(X) \
14710 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14711
14712 #define NEON_ENCODE(type, inst) \
14713 do \
14714 { \
14715 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14716 inst.is_neon = 1; \
14717 } \
14718 while (0)
14719
14720 #define check_neon_suffixes \
14721 do \
14722 { \
14723 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14724 { \
14725 as_bad (_("invalid neon suffix for non neon instruction")); \
14726 return; \
14727 } \
14728 } \
14729 while (0)
14730
14731 /* Define shapes for instruction operands. The following mnemonic characters
14732 are used in this table:
14733
14734 F - VFP S<n> register
14735 D - Neon D<n> register
14736 Q - Neon Q<n> register
14737 I - Immediate
14738 S - Scalar
14739 R - ARM register
14740 L - D<n> register list
14741
14742 This table is used to generate various data:
14743 - enumerations of the form NS_DDR to be used as arguments to
14744 neon_select_shape.
14745 - a table classifying shapes into single, double, quad, mixed.
14746 - a table used to drive neon_select_shape. */
14747
14748 #define NEON_SHAPE_DEF \
14749 X(4, (R, R, Q, Q), QUAD), \
14750 X(4, (Q, R, R, I), QUAD), \
14751 X(4, (R, R, S, S), QUAD), \
14752 X(4, (S, S, R, R), QUAD), \
14753 X(3, (Q, R, I), QUAD), \
14754 X(3, (I, Q, Q), QUAD), \
14755 X(3, (I, Q, R), QUAD), \
14756 X(3, (R, Q, Q), QUAD), \
14757 X(3, (D, D, D), DOUBLE), \
14758 X(3, (Q, Q, Q), QUAD), \
14759 X(3, (D, D, I), DOUBLE), \
14760 X(3, (Q, Q, I), QUAD), \
14761 X(3, (D, D, S), DOUBLE), \
14762 X(3, (Q, Q, S), QUAD), \
14763 X(3, (Q, Q, R), QUAD), \
14764 X(3, (R, R, Q), QUAD), \
14765 X(2, (R, Q), QUAD), \
14766 X(2, (D, D), DOUBLE), \
14767 X(2, (Q, Q), QUAD), \
14768 X(2, (D, S), DOUBLE), \
14769 X(2, (Q, S), QUAD), \
14770 X(2, (D, R), DOUBLE), \
14771 X(2, (Q, R), QUAD), \
14772 X(2, (D, I), DOUBLE), \
14773 X(2, (Q, I), QUAD), \
14774 X(3, (D, L, D), DOUBLE), \
14775 X(2, (D, Q), MIXED), \
14776 X(2, (Q, D), MIXED), \
14777 X(3, (D, Q, I), MIXED), \
14778 X(3, (Q, D, I), MIXED), \
14779 X(3, (Q, D, D), MIXED), \
14780 X(3, (D, Q, Q), MIXED), \
14781 X(3, (Q, Q, D), MIXED), \
14782 X(3, (Q, D, S), MIXED), \
14783 X(3, (D, Q, S), MIXED), \
14784 X(4, (D, D, D, I), DOUBLE), \
14785 X(4, (Q, Q, Q, I), QUAD), \
14786 X(4, (D, D, S, I), DOUBLE), \
14787 X(4, (Q, Q, S, I), QUAD), \
14788 X(2, (F, F), SINGLE), \
14789 X(3, (F, F, F), SINGLE), \
14790 X(2, (F, I), SINGLE), \
14791 X(2, (F, D), MIXED), \
14792 X(2, (D, F), MIXED), \
14793 X(3, (F, F, I), MIXED), \
14794 X(4, (R, R, F, F), SINGLE), \
14795 X(4, (F, F, R, R), SINGLE), \
14796 X(3, (D, R, R), DOUBLE), \
14797 X(3, (R, R, D), DOUBLE), \
14798 X(2, (S, R), SINGLE), \
14799 X(2, (R, S), SINGLE), \
14800 X(2, (F, R), SINGLE), \
14801 X(2, (R, F), SINGLE), \
14802 /* Used for MVE tail predicated loop instructions. */\
14803 X(2, (R, R), QUAD), \
14804 /* Half float shape supported so far. */\
14805 X (2, (H, D), MIXED), \
14806 X (2, (D, H), MIXED), \
14807 X (2, (H, F), MIXED), \
14808 X (2, (F, H), MIXED), \
14809 X (2, (H, H), HALF), \
14810 X (2, (H, R), HALF), \
14811 X (2, (R, H), HALF), \
14812 X (2, (H, I), HALF), \
14813 X (3, (H, H, H), HALF), \
14814 X (3, (H, F, I), MIXED), \
14815 X (3, (F, H, I), MIXED), \
14816 X (3, (D, H, H), MIXED), \
14817 X (3, (D, H, S), MIXED)
14818
14819 #define S2(A,B) NS_##A##B
14820 #define S3(A,B,C) NS_##A##B##C
14821 #define S4(A,B,C,D) NS_##A##B##C##D
14822
14823 #define X(N, L, C) S##N L
14824
14825 enum neon_shape
14826 {
14827 NEON_SHAPE_DEF,
14828 NS_NULL
14829 };
14830
14831 #undef X
14832 #undef S2
14833 #undef S3
14834 #undef S4
14835
14836 enum neon_shape_class
14837 {
14838 SC_HALF,
14839 SC_SINGLE,
14840 SC_DOUBLE,
14841 SC_QUAD,
14842 SC_MIXED
14843 };
14844
14845 #define X(N, L, C) SC_##C
14846
14847 static enum neon_shape_class neon_shape_class[] =
14848 {
14849 NEON_SHAPE_DEF
14850 };
14851
14852 #undef X
14853
14854 enum neon_shape_el
14855 {
14856 SE_H,
14857 SE_F,
14858 SE_D,
14859 SE_Q,
14860 SE_I,
14861 SE_S,
14862 SE_R,
14863 SE_L
14864 };
14865
14866 /* Register widths of above. */
14867 static unsigned neon_shape_el_size[] =
14868 {
14869 16,
14870 32,
14871 64,
14872 128,
14873 0,
14874 32,
14875 32,
14876 0
14877 };
14878
14879 struct neon_shape_info
14880 {
14881 unsigned els;
14882 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14883 };
14884
14885 #define S2(A,B) { SE_##A, SE_##B }
14886 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14887 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14888
14889 #define X(N, L, C) { N, S##N L }
14890
14891 static struct neon_shape_info neon_shape_tab[] =
14892 {
14893 NEON_SHAPE_DEF
14894 };
14895
14896 #undef X
14897 #undef S2
14898 #undef S3
14899 #undef S4
14900
14901 /* Bit masks used in type checking given instructions.
14902 'N_EQK' means the type must be the same as (or based on in some way) the key
14903 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14904 set, various other bits can be set as well in order to modify the meaning of
14905 the type constraint. */
14906
14907 enum neon_type_mask
14908 {
14909 N_S8 = 0x0000001,
14910 N_S16 = 0x0000002,
14911 N_S32 = 0x0000004,
14912 N_S64 = 0x0000008,
14913 N_U8 = 0x0000010,
14914 N_U16 = 0x0000020,
14915 N_U32 = 0x0000040,
14916 N_U64 = 0x0000080,
14917 N_I8 = 0x0000100,
14918 N_I16 = 0x0000200,
14919 N_I32 = 0x0000400,
14920 N_I64 = 0x0000800,
14921 N_8 = 0x0001000,
14922 N_16 = 0x0002000,
14923 N_32 = 0x0004000,
14924 N_64 = 0x0008000,
14925 N_P8 = 0x0010000,
14926 N_P16 = 0x0020000,
14927 N_F16 = 0x0040000,
14928 N_F32 = 0x0080000,
14929 N_F64 = 0x0100000,
14930 N_P64 = 0x0200000,
14931 N_BF16 = 0x0400000,
14932 N_KEY = 0x1000000, /* Key element (main type specifier). */
14933 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14934 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14935 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14936 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14937 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14938 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14939 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14940 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14941 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14942 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14943 N_UTYP = 0,
14944 N_MAX_NONSPECIAL = N_P64
14945 };
14946
14947 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14948
14949 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14950 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14951 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14952 #define N_S_32 (N_S8 | N_S16 | N_S32)
14953 #define N_F_16_32 (N_F16 | N_F32)
14954 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14955 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14956 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14957 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14958 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14959 #define N_F_MVE (N_F16 | N_F32)
14960 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14961
14962 /* Pass this as the first type argument to neon_check_type to ignore types
14963 altogether. */
14964 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14965
14966 /* Select a "shape" for the current instruction (describing register types or
14967 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14968 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14969 function of operand parsing, so this function doesn't need to be called.
14970 Shapes should be listed in order of decreasing length. */
14971
14972 static enum neon_shape
14973 neon_select_shape (enum neon_shape shape, ...)
14974 {
14975 va_list ap;
14976 enum neon_shape first_shape = shape;
14977
14978 /* Fix missing optional operands. FIXME: we don't know at this point how
14979 many arguments we should have, so this makes the assumption that we have
14980 > 1. This is true of all current Neon opcodes, I think, but may not be
14981 true in the future. */
14982 if (!inst.operands[1].present)
14983 inst.operands[1] = inst.operands[0];
14984
14985 va_start (ap, shape);
14986
14987 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14988 {
14989 unsigned j;
14990 int matches = 1;
14991
14992 for (j = 0; j < neon_shape_tab[shape].els; j++)
14993 {
14994 if (!inst.operands[j].present)
14995 {
14996 matches = 0;
14997 break;
14998 }
14999
15000 switch (neon_shape_tab[shape].el[j])
15001 {
15002 /* If a .f16, .16, .u16, .s16 type specifier is given over
15003 a VFP single precision register operand, it's essentially
15004 means only half of the register is used.
15005
15006 If the type specifier is given after the mnemonics, the
15007 information is stored in inst.vectype. If the type specifier
15008 is given after register operand, the information is stored
15009 in inst.operands[].vectype.
15010
15011 When there is only one type specifier, and all the register
15012 operands are the same type of hardware register, the type
15013 specifier applies to all register operands.
15014
15015 If no type specifier is given, the shape is inferred from
15016 operand information.
15017
15018 for example:
15019 vadd.f16 s0, s1, s2: NS_HHH
15020 vabs.f16 s0, s1: NS_HH
15021 vmov.f16 s0, r1: NS_HR
15022 vmov.f16 r0, s1: NS_RH
15023 vcvt.f16 r0, s1: NS_RH
15024 vcvt.f16.s32 s2, s2, #29: NS_HFI
15025 vcvt.f16.s32 s2, s2: NS_HF
15026 */
15027 case SE_H:
15028 if (!(inst.operands[j].isreg
15029 && inst.operands[j].isvec
15030 && inst.operands[j].issingle
15031 && !inst.operands[j].isquad
15032 && ((inst.vectype.elems == 1
15033 && inst.vectype.el[0].size == 16)
15034 || (inst.vectype.elems > 1
15035 && inst.vectype.el[j].size == 16)
15036 || (inst.vectype.elems == 0
15037 && inst.operands[j].vectype.type != NT_invtype
15038 && inst.operands[j].vectype.size == 16))))
15039 matches = 0;
15040 break;
15041
15042 case SE_F:
15043 if (!(inst.operands[j].isreg
15044 && inst.operands[j].isvec
15045 && inst.operands[j].issingle
15046 && !inst.operands[j].isquad
15047 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15048 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15049 || (inst.vectype.elems == 0
15050 && (inst.operands[j].vectype.size == 32
15051 || inst.operands[j].vectype.type == NT_invtype)))))
15052 matches = 0;
15053 break;
15054
15055 case SE_D:
15056 if (!(inst.operands[j].isreg
15057 && inst.operands[j].isvec
15058 && !inst.operands[j].isquad
15059 && !inst.operands[j].issingle))
15060 matches = 0;
15061 break;
15062
15063 case SE_R:
15064 if (!(inst.operands[j].isreg
15065 && !inst.operands[j].isvec))
15066 matches = 0;
15067 break;
15068
15069 case SE_Q:
15070 if (!(inst.operands[j].isreg
15071 && inst.operands[j].isvec
15072 && inst.operands[j].isquad
15073 && !inst.operands[j].issingle))
15074 matches = 0;
15075 break;
15076
15077 case SE_I:
15078 if (!(!inst.operands[j].isreg
15079 && !inst.operands[j].isscalar))
15080 matches = 0;
15081 break;
15082
15083 case SE_S:
15084 if (!(!inst.operands[j].isreg
15085 && inst.operands[j].isscalar))
15086 matches = 0;
15087 break;
15088
15089 case SE_L:
15090 break;
15091 }
15092 if (!matches)
15093 break;
15094 }
15095 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15096 /* We've matched all the entries in the shape table, and we don't
15097 have any left over operands which have not been matched. */
15098 break;
15099 }
15100
15101 va_end (ap);
15102
15103 if (shape == NS_NULL && first_shape != NS_NULL)
15104 first_error (_("invalid instruction shape"));
15105
15106 return shape;
15107 }
15108
15109 /* True if SHAPE is predominantly a quadword operation (most of the time, this
15110 means the Q bit should be set). */
15111
15112 static int
15113 neon_quad (enum neon_shape shape)
15114 {
15115 return neon_shape_class[shape] == SC_QUAD;
15116 }
15117
15118 static void
15119 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15120 unsigned *g_size)
15121 {
15122 /* Allow modification to be made to types which are constrained to be
15123 based on the key element, based on bits set alongside N_EQK. */
15124 if ((typebits & N_EQK) != 0)
15125 {
15126 if ((typebits & N_HLF) != 0)
15127 *g_size /= 2;
15128 else if ((typebits & N_DBL) != 0)
15129 *g_size *= 2;
15130 if ((typebits & N_SGN) != 0)
15131 *g_type = NT_signed;
15132 else if ((typebits & N_UNS) != 0)
15133 *g_type = NT_unsigned;
15134 else if ((typebits & N_INT) != 0)
15135 *g_type = NT_integer;
15136 else if ((typebits & N_FLT) != 0)
15137 *g_type = NT_float;
15138 else if ((typebits & N_SIZ) != 0)
15139 *g_type = NT_untyped;
15140 }
15141 }
15142
15143 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15144 operand type, i.e. the single type specified in a Neon instruction when it
15145 is the only one given. */
15146
15147 static struct neon_type_el
15148 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15149 {
15150 struct neon_type_el dest = *key;
15151
15152 gas_assert ((thisarg & N_EQK) != 0);
15153
15154 neon_modify_type_size (thisarg, &dest.type, &dest.size);
15155
15156 return dest;
15157 }
15158
15159 /* Convert Neon type and size into compact bitmask representation. */
15160
15161 static enum neon_type_mask
15162 type_chk_of_el_type (enum neon_el_type type, unsigned size)
15163 {
15164 switch (type)
15165 {
15166 case NT_untyped:
15167 switch (size)
15168 {
15169 case 8: return N_8;
15170 case 16: return N_16;
15171 case 32: return N_32;
15172 case 64: return N_64;
15173 default: ;
15174 }
15175 break;
15176
15177 case NT_integer:
15178 switch (size)
15179 {
15180 case 8: return N_I8;
15181 case 16: return N_I16;
15182 case 32: return N_I32;
15183 case 64: return N_I64;
15184 default: ;
15185 }
15186 break;
15187
15188 case NT_float:
15189 switch (size)
15190 {
15191 case 16: return N_F16;
15192 case 32: return N_F32;
15193 case 64: return N_F64;
15194 default: ;
15195 }
15196 break;
15197
15198 case NT_poly:
15199 switch (size)
15200 {
15201 case 8: return N_P8;
15202 case 16: return N_P16;
15203 case 64: return N_P64;
15204 default: ;
15205 }
15206 break;
15207
15208 case NT_signed:
15209 switch (size)
15210 {
15211 case 8: return N_S8;
15212 case 16: return N_S16;
15213 case 32: return N_S32;
15214 case 64: return N_S64;
15215 default: ;
15216 }
15217 break;
15218
15219 case NT_unsigned:
15220 switch (size)
15221 {
15222 case 8: return N_U8;
15223 case 16: return N_U16;
15224 case 32: return N_U32;
15225 case 64: return N_U64;
15226 default: ;
15227 }
15228 break;
15229
15230 case NT_bfloat:
15231 if (size == 16) return N_BF16;
15232 break;
15233
15234 default: ;
15235 }
15236
15237 return N_UTYP;
15238 }
15239
15240 /* Convert compact Neon bitmask type representation to a type and size. Only
15241 handles the case where a single bit is set in the mask. */
15242
15243 static int
15244 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15245 enum neon_type_mask mask)
15246 {
15247 if ((mask & N_EQK) != 0)
15248 return FAIL;
15249
15250 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15251 *size = 8;
15252 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15253 != 0)
15254 *size = 16;
15255 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15256 *size = 32;
15257 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15258 *size = 64;
15259 else
15260 return FAIL;
15261
15262 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15263 *type = NT_signed;
15264 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15265 *type = NT_unsigned;
15266 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15267 *type = NT_integer;
15268 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15269 *type = NT_untyped;
15270 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15271 *type = NT_poly;
15272 else if ((mask & (N_F_ALL)) != 0)
15273 *type = NT_float;
15274 else if ((mask & (N_BF16)) != 0)
15275 *type = NT_bfloat;
15276 else
15277 return FAIL;
15278
15279 return SUCCESS;
15280 }
15281
15282 /* Modify a bitmask of allowed types. This is only needed for type
15283 relaxation. */
15284
15285 static unsigned
15286 modify_types_allowed (unsigned allowed, unsigned mods)
15287 {
15288 unsigned size;
15289 enum neon_el_type type;
15290 unsigned destmask;
15291 int i;
15292
15293 destmask = 0;
15294
15295 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15296 {
15297 if (el_type_of_type_chk (&type, &size,
15298 (enum neon_type_mask) (allowed & i)) == SUCCESS)
15299 {
15300 neon_modify_type_size (mods, &type, &size);
15301 destmask |= type_chk_of_el_type (type, size);
15302 }
15303 }
15304
15305 return destmask;
15306 }
15307
15308 /* Check type and return type classification.
15309 The manual states (paraphrase): If one datatype is given, it indicates the
15310 type given in:
15311 - the second operand, if there is one
15312 - the operand, if there is no second operand
15313 - the result, if there are no operands.
15314 This isn't quite good enough though, so we use a concept of a "key" datatype
15315 which is set on a per-instruction basis, which is the one which matters when
15316 only one data type is written.
15317 Note: this function has side-effects (e.g. filling in missing operands). All
15318 Neon instructions should call it before performing bit encoding. */
15319
15320 static struct neon_type_el
15321 neon_check_type (unsigned els, enum neon_shape ns, ...)
15322 {
15323 va_list ap;
15324 unsigned i, pass, key_el = 0;
15325 unsigned types[NEON_MAX_TYPE_ELS];
15326 enum neon_el_type k_type = NT_invtype;
15327 unsigned k_size = -1u;
15328 struct neon_type_el badtype = {NT_invtype, -1};
15329 unsigned key_allowed = 0;
15330
15331 /* Optional registers in Neon instructions are always (not) in operand 1.
15332 Fill in the missing operand here, if it was omitted. */
15333 if (els > 1 && !inst.operands[1].present)
15334 inst.operands[1] = inst.operands[0];
15335
15336 /* Suck up all the varargs. */
15337 va_start (ap, ns);
15338 for (i = 0; i < els; i++)
15339 {
15340 unsigned thisarg = va_arg (ap, unsigned);
15341 if (thisarg == N_IGNORE_TYPE)
15342 {
15343 va_end (ap);
15344 return badtype;
15345 }
15346 types[i] = thisarg;
15347 if ((thisarg & N_KEY) != 0)
15348 key_el = i;
15349 }
15350 va_end (ap);
15351
15352 if (inst.vectype.elems > 0)
15353 for (i = 0; i < els; i++)
15354 if (inst.operands[i].vectype.type != NT_invtype)
15355 {
15356 first_error (_("types specified in both the mnemonic and operands"));
15357 return badtype;
15358 }
15359
15360 /* Duplicate inst.vectype elements here as necessary.
15361 FIXME: No idea if this is exactly the same as the ARM assembler,
15362 particularly when an insn takes one register and one non-register
15363 operand. */
15364 if (inst.vectype.elems == 1 && els > 1)
15365 {
15366 unsigned j;
15367 inst.vectype.elems = els;
15368 inst.vectype.el[key_el] = inst.vectype.el[0];
15369 for (j = 0; j < els; j++)
15370 if (j != key_el)
15371 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15372 types[j]);
15373 }
15374 else if (inst.vectype.elems == 0 && els > 0)
15375 {
15376 unsigned j;
15377 /* No types were given after the mnemonic, so look for types specified
15378 after each operand. We allow some flexibility here; as long as the
15379 "key" operand has a type, we can infer the others. */
15380 for (j = 0; j < els; j++)
15381 if (inst.operands[j].vectype.type != NT_invtype)
15382 inst.vectype.el[j] = inst.operands[j].vectype;
15383
15384 if (inst.operands[key_el].vectype.type != NT_invtype)
15385 {
15386 for (j = 0; j < els; j++)
15387 if (inst.operands[j].vectype.type == NT_invtype)
15388 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15389 types[j]);
15390 }
15391 else
15392 {
15393 first_error (_("operand types can't be inferred"));
15394 return badtype;
15395 }
15396 }
15397 else if (inst.vectype.elems != els)
15398 {
15399 first_error (_("type specifier has the wrong number of parts"));
15400 return badtype;
15401 }
15402
15403 for (pass = 0; pass < 2; pass++)
15404 {
15405 for (i = 0; i < els; i++)
15406 {
15407 unsigned thisarg = types[i];
15408 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15409 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15410 enum neon_el_type g_type = inst.vectype.el[i].type;
15411 unsigned g_size = inst.vectype.el[i].size;
15412
15413 /* Decay more-specific signed & unsigned types to sign-insensitive
15414 integer types if sign-specific variants are unavailable. */
15415 if ((g_type == NT_signed || g_type == NT_unsigned)
15416 && (types_allowed & N_SU_ALL) == 0)
15417 g_type = NT_integer;
15418
15419 /* If only untyped args are allowed, decay any more specific types to
15420 them. Some instructions only care about signs for some element
15421 sizes, so handle that properly. */
15422 if (((types_allowed & N_UNT) == 0)
15423 && ((g_size == 8 && (types_allowed & N_8) != 0)
15424 || (g_size == 16 && (types_allowed & N_16) != 0)
15425 || (g_size == 32 && (types_allowed & N_32) != 0)
15426 || (g_size == 64 && (types_allowed & N_64) != 0)))
15427 g_type = NT_untyped;
15428
15429 if (pass == 0)
15430 {
15431 if ((thisarg & N_KEY) != 0)
15432 {
15433 k_type = g_type;
15434 k_size = g_size;
15435 key_allowed = thisarg & ~N_KEY;
15436
15437 /* Check architecture constraint on FP16 extension. */
15438 if (k_size == 16
15439 && k_type == NT_float
15440 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15441 {
15442 inst.error = _(BAD_FP16);
15443 return badtype;
15444 }
15445 }
15446 }
15447 else
15448 {
15449 if ((thisarg & N_VFP) != 0)
15450 {
15451 enum neon_shape_el regshape;
15452 unsigned regwidth, match;
15453
15454 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
15455 if (ns == NS_NULL)
15456 {
15457 first_error (_("invalid instruction shape"));
15458 return badtype;
15459 }
15460 regshape = neon_shape_tab[ns].el[i];
15461 regwidth = neon_shape_el_size[regshape];
15462
15463 /* In VFP mode, operands must match register widths. If we
15464 have a key operand, use its width, else use the width of
15465 the current operand. */
15466 if (k_size != -1u)
15467 match = k_size;
15468 else
15469 match = g_size;
15470
15471 /* FP16 will use a single precision register. */
15472 if (regwidth == 32 && match == 16)
15473 {
15474 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15475 match = regwidth;
15476 else
15477 {
15478 inst.error = _(BAD_FP16);
15479 return badtype;
15480 }
15481 }
15482
15483 if (regwidth != match)
15484 {
15485 first_error (_("operand size must match register width"));
15486 return badtype;
15487 }
15488 }
15489
15490 if ((thisarg & N_EQK) == 0)
15491 {
15492 unsigned given_type = type_chk_of_el_type (g_type, g_size);
15493
15494 if ((given_type & types_allowed) == 0)
15495 {
15496 first_error (BAD_SIMD_TYPE);
15497 return badtype;
15498 }
15499 }
15500 else
15501 {
15502 enum neon_el_type mod_k_type = k_type;
15503 unsigned mod_k_size = k_size;
15504 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15505 if (g_type != mod_k_type || g_size != mod_k_size)
15506 {
15507 first_error (_("inconsistent types in Neon instruction"));
15508 return badtype;
15509 }
15510 }
15511 }
15512 }
15513 }
15514
15515 return inst.vectype.el[key_el];
15516 }
15517
15518 /* Neon-style VFP instruction forwarding. */
15519
15520 /* Thumb VFP instructions have 0xE in the condition field. */
15521
15522 static void
15523 do_vfp_cond_or_thumb (void)
15524 {
15525 inst.is_neon = 1;
15526
15527 if (thumb_mode)
15528 inst.instruction |= 0xe0000000;
15529 else
15530 inst.instruction |= inst.cond << 28;
15531 }
15532
15533 /* Look up and encode a simple mnemonic, for use as a helper function for the
15534 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
15535 etc. It is assumed that operand parsing has already been done, and that the
15536 operands are in the form expected by the given opcode (this isn't necessarily
15537 the same as the form in which they were parsed, hence some massaging must
15538 take place before this function is called).
15539 Checks current arch version against that in the looked-up opcode. */
15540
15541 static void
15542 do_vfp_nsyn_opcode (const char *opname)
15543 {
15544 const struct asm_opcode *opcode;
15545
15546 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
15547
15548 if (!opcode)
15549 abort ();
15550
15551 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15552 thumb_mode ? *opcode->tvariant : *opcode->avariant),
15553 _(BAD_FPU));
15554
15555 inst.is_neon = 1;
15556
15557 if (thumb_mode)
15558 {
15559 inst.instruction = opcode->tvalue;
15560 opcode->tencode ();
15561 }
15562 else
15563 {
15564 inst.instruction = (inst.cond << 28) | opcode->avalue;
15565 opcode->aencode ();
15566 }
15567 }
15568
15569 static void
15570 do_vfp_nsyn_add_sub (enum neon_shape rs)
15571 {
15572 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15573
15574 if (rs == NS_FFF || rs == NS_HHH)
15575 {
15576 if (is_add)
15577 do_vfp_nsyn_opcode ("fadds");
15578 else
15579 do_vfp_nsyn_opcode ("fsubs");
15580
15581 /* ARMv8.2 fp16 instruction. */
15582 if (rs == NS_HHH)
15583 do_scalar_fp16_v82_encode ();
15584 }
15585 else
15586 {
15587 if (is_add)
15588 do_vfp_nsyn_opcode ("faddd");
15589 else
15590 do_vfp_nsyn_opcode ("fsubd");
15591 }
15592 }
15593
15594 /* Check operand types to see if this is a VFP instruction, and if so call
15595 PFN (). */
15596
15597 static int
15598 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15599 {
15600 enum neon_shape rs;
15601 struct neon_type_el et;
15602
15603 switch (args)
15604 {
15605 case 2:
15606 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15607 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15608 break;
15609
15610 case 3:
15611 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15612 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15613 N_F_ALL | N_KEY | N_VFP);
15614 break;
15615
15616 default:
15617 abort ();
15618 }
15619
15620 if (et.type != NT_invtype)
15621 {
15622 pfn (rs);
15623 return SUCCESS;
15624 }
15625
15626 inst.error = NULL;
15627 return FAIL;
15628 }
15629
15630 static void
15631 do_vfp_nsyn_mla_mls (enum neon_shape rs)
15632 {
15633 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15634
15635 if (rs == NS_FFF || rs == NS_HHH)
15636 {
15637 if (is_mla)
15638 do_vfp_nsyn_opcode ("fmacs");
15639 else
15640 do_vfp_nsyn_opcode ("fnmacs");
15641
15642 /* ARMv8.2 fp16 instruction. */
15643 if (rs == NS_HHH)
15644 do_scalar_fp16_v82_encode ();
15645 }
15646 else
15647 {
15648 if (is_mla)
15649 do_vfp_nsyn_opcode ("fmacd");
15650 else
15651 do_vfp_nsyn_opcode ("fnmacd");
15652 }
15653 }
15654
15655 static void
15656 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15657 {
15658 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15659
15660 if (rs == NS_FFF || rs == NS_HHH)
15661 {
15662 if (is_fma)
15663 do_vfp_nsyn_opcode ("ffmas");
15664 else
15665 do_vfp_nsyn_opcode ("ffnmas");
15666
15667 /* ARMv8.2 fp16 instruction. */
15668 if (rs == NS_HHH)
15669 do_scalar_fp16_v82_encode ();
15670 }
15671 else
15672 {
15673 if (is_fma)
15674 do_vfp_nsyn_opcode ("ffmad");
15675 else
15676 do_vfp_nsyn_opcode ("ffnmad");
15677 }
15678 }
15679
15680 static void
15681 do_vfp_nsyn_mul (enum neon_shape rs)
15682 {
15683 if (rs == NS_FFF || rs == NS_HHH)
15684 {
15685 do_vfp_nsyn_opcode ("fmuls");
15686
15687 /* ARMv8.2 fp16 instruction. */
15688 if (rs == NS_HHH)
15689 do_scalar_fp16_v82_encode ();
15690 }
15691 else
15692 do_vfp_nsyn_opcode ("fmuld");
15693 }
15694
15695 static void
15696 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15697 {
15698 int is_neg = (inst.instruction & 0x80) != 0;
15699 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15700
15701 if (rs == NS_FF || rs == NS_HH)
15702 {
15703 if (is_neg)
15704 do_vfp_nsyn_opcode ("fnegs");
15705 else
15706 do_vfp_nsyn_opcode ("fabss");
15707
15708 /* ARMv8.2 fp16 instruction. */
15709 if (rs == NS_HH)
15710 do_scalar_fp16_v82_encode ();
15711 }
15712 else
15713 {
15714 if (is_neg)
15715 do_vfp_nsyn_opcode ("fnegd");
15716 else
15717 do_vfp_nsyn_opcode ("fabsd");
15718 }
15719 }
15720
15721 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15722 insns belong to Neon, and are handled elsewhere. */
15723
15724 static void
15725 do_vfp_nsyn_ldm_stm (int is_dbmode)
15726 {
15727 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15728 if (is_ldm)
15729 {
15730 if (is_dbmode)
15731 do_vfp_nsyn_opcode ("fldmdbs");
15732 else
15733 do_vfp_nsyn_opcode ("fldmias");
15734 }
15735 else
15736 {
15737 if (is_dbmode)
15738 do_vfp_nsyn_opcode ("fstmdbs");
15739 else
15740 do_vfp_nsyn_opcode ("fstmias");
15741 }
15742 }
15743
15744 static void
15745 do_vfp_nsyn_sqrt (void)
15746 {
15747 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15748 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15749
15750 if (rs == NS_FF || rs == NS_HH)
15751 {
15752 do_vfp_nsyn_opcode ("fsqrts");
15753
15754 /* ARMv8.2 fp16 instruction. */
15755 if (rs == NS_HH)
15756 do_scalar_fp16_v82_encode ();
15757 }
15758 else
15759 do_vfp_nsyn_opcode ("fsqrtd");
15760 }
15761
15762 static void
15763 do_vfp_nsyn_div (void)
15764 {
15765 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15766 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15767 N_F_ALL | N_KEY | N_VFP);
15768
15769 if (rs == NS_FFF || rs == NS_HHH)
15770 {
15771 do_vfp_nsyn_opcode ("fdivs");
15772
15773 /* ARMv8.2 fp16 instruction. */
15774 if (rs == NS_HHH)
15775 do_scalar_fp16_v82_encode ();
15776 }
15777 else
15778 do_vfp_nsyn_opcode ("fdivd");
15779 }
15780
15781 static void
15782 do_vfp_nsyn_nmul (void)
15783 {
15784 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15785 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15786 N_F_ALL | N_KEY | N_VFP);
15787
15788 if (rs == NS_FFF || rs == NS_HHH)
15789 {
15790 NEON_ENCODE (SINGLE, inst);
15791 do_vfp_sp_dyadic ();
15792
15793 /* ARMv8.2 fp16 instruction. */
15794 if (rs == NS_HHH)
15795 do_scalar_fp16_v82_encode ();
15796 }
15797 else
15798 {
15799 NEON_ENCODE (DOUBLE, inst);
15800 do_vfp_dp_rd_rn_rm ();
15801 }
15802 do_vfp_cond_or_thumb ();
15803
15804 }
15805
15806 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15807 (0, 1, 2, 3). */
15808
15809 static unsigned
15810 neon_logbits (unsigned x)
15811 {
15812 return ffs (x) - 4;
15813 }
15814
15815 #define LOW4(R) ((R) & 0xf)
15816 #define HI1(R) (((R) >> 4) & 1)
15817
15818 static unsigned
15819 mve_get_vcmp_vpt_cond (struct neon_type_el et)
15820 {
15821 switch (et.type)
15822 {
15823 default:
15824 first_error (BAD_EL_TYPE);
15825 return 0;
15826 case NT_float:
15827 switch (inst.operands[0].imm)
15828 {
15829 default:
15830 first_error (_("invalid condition"));
15831 return 0;
15832 case 0x0:
15833 /* eq. */
15834 return 0;
15835 case 0x1:
15836 /* ne. */
15837 return 1;
15838 case 0xa:
15839 /* ge/ */
15840 return 4;
15841 case 0xb:
15842 /* lt. */
15843 return 5;
15844 case 0xc:
15845 /* gt. */
15846 return 6;
15847 case 0xd:
15848 /* le. */
15849 return 7;
15850 }
15851 case NT_integer:
15852 /* only accept eq and ne. */
15853 if (inst.operands[0].imm > 1)
15854 {
15855 first_error (_("invalid condition"));
15856 return 0;
15857 }
15858 return inst.operands[0].imm;
15859 case NT_unsigned:
15860 if (inst.operands[0].imm == 0x2)
15861 return 2;
15862 else if (inst.operands[0].imm == 0x8)
15863 return 3;
15864 else
15865 {
15866 first_error (_("invalid condition"));
15867 return 0;
15868 }
15869 case NT_signed:
15870 switch (inst.operands[0].imm)
15871 {
15872 default:
15873 first_error (_("invalid condition"));
15874 return 0;
15875 case 0xa:
15876 /* ge. */
15877 return 4;
15878 case 0xb:
15879 /* lt. */
15880 return 5;
15881 case 0xc:
15882 /* gt. */
15883 return 6;
15884 case 0xd:
15885 /* le. */
15886 return 7;
15887 }
15888 }
15889 /* Should be unreachable. */
15890 abort ();
15891 }
15892
15893 /* For VCTP (create vector tail predicate) in MVE. */
15894 static void
15895 do_mve_vctp (void)
15896 {
15897 int dt = 0;
15898 unsigned size = 0x0;
15899
15900 if (inst.cond > COND_ALWAYS)
15901 inst.pred_insn_type = INSIDE_VPT_INSN;
15902 else
15903 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15904
15905 /* This is a typical MVE instruction which has no type but have size 8, 16,
15906 32 and 64. For instructions with no type, inst.vectype.el[j].type is set
15907 to NT_untyped and size is updated in inst.vectype.el[j].size. */
15908 if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15909 dt = inst.vectype.el[0].size;
15910
15911 /* Setting this does not indicate an actual NEON instruction, but only
15912 indicates that the mnemonic accepts neon-style type suffixes. */
15913 inst.is_neon = 1;
15914
15915 switch (dt)
15916 {
15917 case 8:
15918 break;
15919 case 16:
15920 size = 0x1; break;
15921 case 32:
15922 size = 0x2; break;
15923 case 64:
15924 size = 0x3; break;
15925 default:
15926 first_error (_("Type is not allowed for this instruction"));
15927 }
15928 inst.instruction |= size << 20;
15929 inst.instruction |= inst.operands[0].reg << 16;
15930 }
15931
15932 static void
15933 do_mve_vpt (void)
15934 {
15935 /* We are dealing with a vector predicated block. */
15936 if (inst.operands[0].present)
15937 {
15938 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
15939 struct neon_type_el et
15940 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
15941 N_EQK);
15942
15943 unsigned fcond = mve_get_vcmp_vpt_cond (et);
15944
15945 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
15946
15947 if (et.type == NT_invtype)
15948 return;
15949
15950 if (et.type == NT_float)
15951 {
15952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
15953 BAD_FPU);
15954 constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
15955 inst.instruction |= (et.size == 16) << 28;
15956 inst.instruction |= 0x3 << 20;
15957 }
15958 else
15959 {
15960 constraint (et.size != 8 && et.size != 16 && et.size != 32,
15961 BAD_EL_TYPE);
15962 inst.instruction |= 1 << 28;
15963 inst.instruction |= neon_logbits (et.size) << 20;
15964 }
15965
15966 if (inst.operands[2].isquad)
15967 {
15968 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15969 inst.instruction |= LOW4 (inst.operands[2].reg);
15970 inst.instruction |= (fcond & 0x2) >> 1;
15971 }
15972 else
15973 {
15974 if (inst.operands[2].reg == REG_SP)
15975 as_tsktsk (MVE_BAD_SP);
15976 inst.instruction |= 1 << 6;
15977 inst.instruction |= (fcond & 0x2) << 4;
15978 inst.instruction |= inst.operands[2].reg;
15979 }
15980 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15981 inst.instruction |= (fcond & 0x4) << 10;
15982 inst.instruction |= (fcond & 0x1) << 7;
15983
15984 }
15985 set_pred_insn_type (VPT_INSN);
15986 now_pred.cc = 0;
15987 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
15988 | ((inst.instruction & 0xe000) >> 13);
15989 now_pred.warn_deprecated = FALSE;
15990 now_pred.type = VECTOR_PRED;
15991 inst.is_neon = 1;
15992 }
15993
15994 static void
15995 do_mve_vcmp (void)
15996 {
15997 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
15998 if (!inst.operands[1].isreg || !inst.operands[1].isquad)
15999 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16000 if (!inst.operands[2].present)
16001 first_error (_("MVE vector or ARM register expected"));
16002 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16003
16004 /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe. */
16005 if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16006 && inst.operands[1].isquad)
16007 {
16008 inst.instruction = N_MNEM_vcmp;
16009 inst.cond = 0x10;
16010 }
16011
16012 if (inst.cond > COND_ALWAYS)
16013 inst.pred_insn_type = INSIDE_VPT_INSN;
16014 else
16015 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16016
16017 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16018 struct neon_type_el et
16019 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16020 N_EQK);
16021
16022 constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16023 && !inst.operands[2].iszr, BAD_PC);
16024
16025 unsigned fcond = mve_get_vcmp_vpt_cond (et);
16026
16027 inst.instruction = 0xee010f00;
16028 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16029 inst.instruction |= (fcond & 0x4) << 10;
16030 inst.instruction |= (fcond & 0x1) << 7;
16031 if (et.type == NT_float)
16032 {
16033 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16034 BAD_FPU);
16035 inst.instruction |= (et.size == 16) << 28;
16036 inst.instruction |= 0x3 << 20;
16037 }
16038 else
16039 {
16040 inst.instruction |= 1 << 28;
16041 inst.instruction |= neon_logbits (et.size) << 20;
16042 }
16043 if (inst.operands[2].isquad)
16044 {
16045 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16046 inst.instruction |= (fcond & 0x2) >> 1;
16047 inst.instruction |= LOW4 (inst.operands[2].reg);
16048 }
16049 else
16050 {
16051 if (inst.operands[2].reg == REG_SP)
16052 as_tsktsk (MVE_BAD_SP);
16053 inst.instruction |= 1 << 6;
16054 inst.instruction |= (fcond & 0x2) << 4;
16055 inst.instruction |= inst.operands[2].reg;
16056 }
16057
16058 inst.is_neon = 1;
16059 return;
16060 }
16061
16062 static void
16063 do_mve_vmaxa_vmina (void)
16064 {
16065 if (inst.cond > COND_ALWAYS)
16066 inst.pred_insn_type = INSIDE_VPT_INSN;
16067 else
16068 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16069
16070 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16071 struct neon_type_el et
16072 = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16073
16074 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16075 inst.instruction |= neon_logbits (et.size) << 18;
16076 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16077 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16078 inst.instruction |= LOW4 (inst.operands[1].reg);
16079 inst.is_neon = 1;
16080 }
16081
16082 static void
16083 do_mve_vfmas (void)
16084 {
16085 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16086 struct neon_type_el et
16087 = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16088
16089 if (inst.cond > COND_ALWAYS)
16090 inst.pred_insn_type = INSIDE_VPT_INSN;
16091 else
16092 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16093
16094 if (inst.operands[2].reg == REG_SP)
16095 as_tsktsk (MVE_BAD_SP);
16096 else if (inst.operands[2].reg == REG_PC)
16097 as_tsktsk (MVE_BAD_PC);
16098
16099 inst.instruction |= (et.size == 16) << 28;
16100 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16101 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16102 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16103 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16104 inst.instruction |= inst.operands[2].reg;
16105 inst.is_neon = 1;
16106 }
16107
16108 static void
16109 do_mve_viddup (void)
16110 {
16111 if (inst.cond > COND_ALWAYS)
16112 inst.pred_insn_type = INSIDE_VPT_INSN;
16113 else
16114 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16115
16116 unsigned imm = inst.relocs[0].exp.X_add_number;
16117 constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16118 _("immediate must be either 1, 2, 4 or 8"));
16119
16120 enum neon_shape rs;
16121 struct neon_type_el et;
16122 unsigned Rm;
16123 if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16124 {
16125 rs = neon_select_shape (NS_QRI, NS_NULL);
16126 et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16127 Rm = 7;
16128 }
16129 else
16130 {
16131 constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16132 if (inst.operands[2].reg == REG_SP)
16133 as_tsktsk (MVE_BAD_SP);
16134 else if (inst.operands[2].reg == REG_PC)
16135 first_error (BAD_PC);
16136
16137 rs = neon_select_shape (NS_QRRI, NS_NULL);
16138 et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16139 Rm = inst.operands[2].reg >> 1;
16140 }
16141 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16142 inst.instruction |= neon_logbits (et.size) << 20;
16143 inst.instruction |= inst.operands[1].reg << 16;
16144 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16145 inst.instruction |= (imm > 2) << 7;
16146 inst.instruction |= Rm << 1;
16147 inst.instruction |= (imm == 2 || imm == 8);
16148 inst.is_neon = 1;
16149 }
16150
16151 static void
16152 do_mve_vmlas (void)
16153 {
16154 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16155 struct neon_type_el et
16156 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16157
16158 if (inst.operands[2].reg == REG_PC)
16159 as_tsktsk (MVE_BAD_PC);
16160 else if (inst.operands[2].reg == REG_SP)
16161 as_tsktsk (MVE_BAD_SP);
16162
16163 if (inst.cond > COND_ALWAYS)
16164 inst.pred_insn_type = INSIDE_VPT_INSN;
16165 else
16166 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16167
16168 inst.instruction |= (et.type == NT_unsigned) << 28;
16169 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16170 inst.instruction |= neon_logbits (et.size) << 20;
16171 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16172 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16173 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16174 inst.instruction |= inst.operands[2].reg;
16175 inst.is_neon = 1;
16176 }
16177
16178 static void
16179 do_mve_vshll (void)
16180 {
16181 struct neon_type_el et
16182 = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16183
16184 if (inst.cond > COND_ALWAYS)
16185 inst.pred_insn_type = INSIDE_VPT_INSN;
16186 else
16187 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16188
16189 int imm = inst.operands[2].imm;
16190 constraint (imm < 1 || (unsigned)imm > et.size,
16191 _("immediate value out of range"));
16192
16193 if ((unsigned)imm == et.size)
16194 {
16195 inst.instruction |= neon_logbits (et.size) << 18;
16196 inst.instruction |= 0x110001;
16197 }
16198 else
16199 {
16200 inst.instruction |= (et.size + imm) << 16;
16201 inst.instruction |= 0x800140;
16202 }
16203
16204 inst.instruction |= (et.type == NT_unsigned) << 28;
16205 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16206 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16207 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16208 inst.instruction |= LOW4 (inst.operands[1].reg);
16209 inst.is_neon = 1;
16210 }
16211
16212 static void
16213 do_mve_vshlc (void)
16214 {
16215 if (inst.cond > COND_ALWAYS)
16216 inst.pred_insn_type = INSIDE_VPT_INSN;
16217 else
16218 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16219
16220 if (inst.operands[1].reg == REG_PC)
16221 as_tsktsk (MVE_BAD_PC);
16222 else if (inst.operands[1].reg == REG_SP)
16223 as_tsktsk (MVE_BAD_SP);
16224
16225 int imm = inst.operands[2].imm;
16226 constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16227
16228 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16229 inst.instruction |= (imm & 0x1f) << 16;
16230 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16231 inst.instruction |= inst.operands[1].reg;
16232 inst.is_neon = 1;
16233 }
16234
16235 static void
16236 do_mve_vshrn (void)
16237 {
16238 unsigned types;
16239 switch (inst.instruction)
16240 {
16241 case M_MNEM_vshrnt:
16242 case M_MNEM_vshrnb:
16243 case M_MNEM_vrshrnt:
16244 case M_MNEM_vrshrnb:
16245 types = N_I16 | N_I32;
16246 break;
16247 case M_MNEM_vqshrnt:
16248 case M_MNEM_vqshrnb:
16249 case M_MNEM_vqrshrnt:
16250 case M_MNEM_vqrshrnb:
16251 types = N_U16 | N_U32 | N_S16 | N_S32;
16252 break;
16253 case M_MNEM_vqshrunt:
16254 case M_MNEM_vqshrunb:
16255 case M_MNEM_vqrshrunt:
16256 case M_MNEM_vqrshrunb:
16257 types = N_S16 | N_S32;
16258 break;
16259 default:
16260 abort ();
16261 }
16262
16263 struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16264
16265 if (inst.cond > COND_ALWAYS)
16266 inst.pred_insn_type = INSIDE_VPT_INSN;
16267 else
16268 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16269
16270 unsigned Qd = inst.operands[0].reg;
16271 unsigned Qm = inst.operands[1].reg;
16272 unsigned imm = inst.operands[2].imm;
16273 constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16274 et.size == 16
16275 ? _("immediate operand expected in the range [1,8]")
16276 : _("immediate operand expected in the range [1,16]"));
16277
16278 inst.instruction |= (et.type == NT_unsigned) << 28;
16279 inst.instruction |= HI1 (Qd) << 22;
16280 inst.instruction |= (et.size - imm) << 16;
16281 inst.instruction |= LOW4 (Qd) << 12;
16282 inst.instruction |= HI1 (Qm) << 5;
16283 inst.instruction |= LOW4 (Qm);
16284 inst.is_neon = 1;
16285 }
16286
16287 static void
16288 do_mve_vqmovn (void)
16289 {
16290 struct neon_type_el et;
16291 if (inst.instruction == M_MNEM_vqmovnt
16292 || inst.instruction == M_MNEM_vqmovnb)
16293 et = neon_check_type (2, NS_QQ, N_EQK,
16294 N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16295 else
16296 et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16297
16298 if (inst.cond > COND_ALWAYS)
16299 inst.pred_insn_type = INSIDE_VPT_INSN;
16300 else
16301 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16302
16303 inst.instruction |= (et.type == NT_unsigned) << 28;
16304 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16305 inst.instruction |= (et.size == 32) << 18;
16306 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16307 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16308 inst.instruction |= LOW4 (inst.operands[1].reg);
16309 inst.is_neon = 1;
16310 }
16311
16312 static void
16313 do_mve_vpsel (void)
16314 {
16315 neon_select_shape (NS_QQQ, NS_NULL);
16316
16317 if (inst.cond > COND_ALWAYS)
16318 inst.pred_insn_type = INSIDE_VPT_INSN;
16319 else
16320 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16321
16322 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16323 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16324 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16325 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16326 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16327 inst.instruction |= LOW4 (inst.operands[2].reg);
16328 inst.is_neon = 1;
16329 }
16330
16331 static void
16332 do_mve_vpnot (void)
16333 {
16334 if (inst.cond > COND_ALWAYS)
16335 inst.pred_insn_type = INSIDE_VPT_INSN;
16336 else
16337 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16338 }
16339
16340 static void
16341 do_mve_vmaxnma_vminnma (void)
16342 {
16343 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16344 struct neon_type_el et
16345 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16346
16347 if (inst.cond > COND_ALWAYS)
16348 inst.pred_insn_type = INSIDE_VPT_INSN;
16349 else
16350 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16351
16352 inst.instruction |= (et.size == 16) << 28;
16353 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16354 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16355 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16356 inst.instruction |= LOW4 (inst.operands[1].reg);
16357 inst.is_neon = 1;
16358 }
16359
16360 static void
16361 do_mve_vcmul (void)
16362 {
16363 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16364 struct neon_type_el et
16365 = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16366
16367 if (inst.cond > COND_ALWAYS)
16368 inst.pred_insn_type = INSIDE_VPT_INSN;
16369 else
16370 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16371
16372 unsigned rot = inst.relocs[0].exp.X_add_number;
16373 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16374 _("immediate out of range"));
16375
16376 if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16377 || inst.operands[0].reg == inst.operands[2].reg))
16378 as_tsktsk (BAD_MVE_SRCDEST);
16379
16380 inst.instruction |= (et.size == 32) << 28;
16381 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16382 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16383 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16384 inst.instruction |= (rot > 90) << 12;
16385 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16386 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16387 inst.instruction |= LOW4 (inst.operands[2].reg);
16388 inst.instruction |= (rot == 90 || rot == 270);
16389 inst.is_neon = 1;
16390 }
16391
16392 /* To handle the Low Overhead Loop instructions
16393 in Armv8.1-M Mainline and MVE. */
16394 static void
16395 do_t_loloop (void)
16396 {
16397 unsigned long insn = inst.instruction;
16398
16399 inst.instruction = THUMB_OP32 (inst.instruction);
16400
16401 if (insn == T_MNEM_lctp)
16402 return;
16403
16404 set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16405
16406 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16407 {
16408 struct neon_type_el et
16409 = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16410 inst.instruction |= neon_logbits (et.size) << 20;
16411 inst.is_neon = 1;
16412 }
16413
16414 switch (insn)
16415 {
16416 case T_MNEM_letp:
16417 constraint (!inst.operands[0].present,
16418 _("expected LR"));
16419 /* fall through. */
16420 case T_MNEM_le:
16421 /* le <label>. */
16422 if (!inst.operands[0].present)
16423 inst.instruction |= 1 << 21;
16424
16425 v8_1_loop_reloc (TRUE);
16426 break;
16427
16428 case T_MNEM_wls:
16429 case T_MNEM_wlstp:
16430 v8_1_loop_reloc (FALSE);
16431 /* fall through. */
16432 case T_MNEM_dlstp:
16433 case T_MNEM_dls:
16434 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16435
16436 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16437 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16438 else if (inst.operands[1].reg == REG_PC)
16439 as_tsktsk (MVE_BAD_PC);
16440 if (inst.operands[1].reg == REG_SP)
16441 as_tsktsk (MVE_BAD_SP);
16442
16443 inst.instruction |= (inst.operands[1].reg << 16);
16444 break;
16445
16446 default:
16447 abort ();
16448 }
16449 }
16450
16451
16452 static void
16453 do_vfp_nsyn_cmp (void)
16454 {
16455 enum neon_shape rs;
16456 if (!inst.operands[0].isreg)
16457 {
16458 do_mve_vcmp ();
16459 return;
16460 }
16461 else
16462 {
16463 constraint (inst.operands[2].present, BAD_SYNTAX);
16464 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16465 BAD_FPU);
16466 }
16467
16468 if (inst.operands[1].isreg)
16469 {
16470 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16471 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16472
16473 if (rs == NS_FF || rs == NS_HH)
16474 {
16475 NEON_ENCODE (SINGLE, inst);
16476 do_vfp_sp_monadic ();
16477 }
16478 else
16479 {
16480 NEON_ENCODE (DOUBLE, inst);
16481 do_vfp_dp_rd_rm ();
16482 }
16483 }
16484 else
16485 {
16486 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16487 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16488
16489 switch (inst.instruction & 0x0fffffff)
16490 {
16491 case N_MNEM_vcmp:
16492 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16493 break;
16494 case N_MNEM_vcmpe:
16495 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16496 break;
16497 default:
16498 abort ();
16499 }
16500
16501 if (rs == NS_FI || rs == NS_HI)
16502 {
16503 NEON_ENCODE (SINGLE, inst);
16504 do_vfp_sp_compare_z ();
16505 }
16506 else
16507 {
16508 NEON_ENCODE (DOUBLE, inst);
16509 do_vfp_dp_rd ();
16510 }
16511 }
16512 do_vfp_cond_or_thumb ();
16513
16514 /* ARMv8.2 fp16 instruction. */
16515 if (rs == NS_HI || rs == NS_HH)
16516 do_scalar_fp16_v82_encode ();
16517 }
16518
16519 static void
16520 nsyn_insert_sp (void)
16521 {
16522 inst.operands[1] = inst.operands[0];
16523 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16524 inst.operands[0].reg = REG_SP;
16525 inst.operands[0].isreg = 1;
16526 inst.operands[0].writeback = 1;
16527 inst.operands[0].present = 1;
16528 }
16529
16530 /* Fix up Neon data-processing instructions, ORing in the correct bits for
16531 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
16532
16533 static void
16534 neon_dp_fixup (struct arm_it* insn)
16535 {
16536 unsigned int i = insn->instruction;
16537 insn->is_neon = 1;
16538
16539 if (thumb_mode)
16540 {
16541 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
16542 if (i & (1 << 24))
16543 i |= 1 << 28;
16544
16545 i &= ~(1 << 24);
16546
16547 i |= 0xef000000;
16548 }
16549 else
16550 i |= 0xf2000000;
16551
16552 insn->instruction = i;
16553 }
16554
16555 static void
16556 mve_encode_qqr (int size, int U, int fp)
16557 {
16558 if (inst.operands[2].reg == REG_SP)
16559 as_tsktsk (MVE_BAD_SP);
16560 else if (inst.operands[2].reg == REG_PC)
16561 as_tsktsk (MVE_BAD_PC);
16562
16563 if (fp)
16564 {
16565 /* vadd. */
16566 if (((unsigned)inst.instruction) == 0xd00)
16567 inst.instruction = 0xee300f40;
16568 /* vsub. */
16569 else if (((unsigned)inst.instruction) == 0x200d00)
16570 inst.instruction = 0xee301f40;
16571 /* vmul. */
16572 else if (((unsigned)inst.instruction) == 0x1000d10)
16573 inst.instruction = 0xee310e60;
16574
16575 /* Setting size which is 1 for F16 and 0 for F32. */
16576 inst.instruction |= (size == 16) << 28;
16577 }
16578 else
16579 {
16580 /* vadd. */
16581 if (((unsigned)inst.instruction) == 0x800)
16582 inst.instruction = 0xee010f40;
16583 /* vsub. */
16584 else if (((unsigned)inst.instruction) == 0x1000800)
16585 inst.instruction = 0xee011f40;
16586 /* vhadd. */
16587 else if (((unsigned)inst.instruction) == 0)
16588 inst.instruction = 0xee000f40;
16589 /* vhsub. */
16590 else if (((unsigned)inst.instruction) == 0x200)
16591 inst.instruction = 0xee001f40;
16592 /* vmla. */
16593 else if (((unsigned)inst.instruction) == 0x900)
16594 inst.instruction = 0xee010e40;
16595 /* vmul. */
16596 else if (((unsigned)inst.instruction) == 0x910)
16597 inst.instruction = 0xee011e60;
16598 /* vqadd. */
16599 else if (((unsigned)inst.instruction) == 0x10)
16600 inst.instruction = 0xee000f60;
16601 /* vqsub. */
16602 else if (((unsigned)inst.instruction) == 0x210)
16603 inst.instruction = 0xee001f60;
16604 /* vqrdmlah. */
16605 else if (((unsigned)inst.instruction) == 0x3000b10)
16606 inst.instruction = 0xee000e40;
16607 /* vqdmulh. */
16608 else if (((unsigned)inst.instruction) == 0x0000b00)
16609 inst.instruction = 0xee010e60;
16610 /* vqrdmulh. */
16611 else if (((unsigned)inst.instruction) == 0x1000b00)
16612 inst.instruction = 0xfe010e60;
16613
16614 /* Set U-bit. */
16615 inst.instruction |= U << 28;
16616
16617 /* Setting bits for size. */
16618 inst.instruction |= neon_logbits (size) << 20;
16619 }
16620 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16621 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16622 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16623 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16624 inst.instruction |= inst.operands[2].reg;
16625 inst.is_neon = 1;
16626 }
16627
16628 static void
16629 mve_encode_rqq (unsigned bit28, unsigned size)
16630 {
16631 inst.instruction |= bit28 << 28;
16632 inst.instruction |= neon_logbits (size) << 20;
16633 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16634 inst.instruction |= inst.operands[0].reg << 12;
16635 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16636 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16637 inst.instruction |= LOW4 (inst.operands[2].reg);
16638 inst.is_neon = 1;
16639 }
16640
16641 static void
16642 mve_encode_qqq (int ubit, int size)
16643 {
16644
16645 inst.instruction |= (ubit != 0) << 28;
16646 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16647 inst.instruction |= neon_logbits (size) << 20;
16648 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16649 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16650 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16651 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16652 inst.instruction |= LOW4 (inst.operands[2].reg);
16653
16654 inst.is_neon = 1;
16655 }
16656
16657 static void
16658 mve_encode_rq (unsigned bit28, unsigned size)
16659 {
16660 inst.instruction |= bit28 << 28;
16661 inst.instruction |= neon_logbits (size) << 18;
16662 inst.instruction |= inst.operands[0].reg << 12;
16663 inst.instruction |= LOW4 (inst.operands[1].reg);
16664 inst.is_neon = 1;
16665 }
16666
16667 static void
16668 mve_encode_rrqq (unsigned U, unsigned size)
16669 {
16670 constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16671
16672 inst.instruction |= U << 28;
16673 inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16674 inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16675 inst.instruction |= (size == 32) << 16;
16676 inst.instruction |= inst.operands[0].reg << 12;
16677 inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16678 inst.instruction |= inst.operands[3].reg;
16679 inst.is_neon = 1;
16680 }
16681
16682 /* Helper function for neon_three_same handling the operands. */
16683 static void
16684 neon_three_args (int isquad)
16685 {
16686 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16687 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16688 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16689 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16690 inst.instruction |= LOW4 (inst.operands[2].reg);
16691 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16692 inst.instruction |= (isquad != 0) << 6;
16693 inst.is_neon = 1;
16694 }
16695
16696 /* Encode insns with bit pattern:
16697
16698 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16699 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
16700
16701 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16702 different meaning for some instruction. */
16703
16704 static void
16705 neon_three_same (int isquad, int ubit, int size)
16706 {
16707 neon_three_args (isquad);
16708 inst.instruction |= (ubit != 0) << 24;
16709 if (size != -1)
16710 inst.instruction |= neon_logbits (size) << 20;
16711
16712 neon_dp_fixup (&inst);
16713 }
16714
16715 /* Encode instructions of the form:
16716
16717 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
16718 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
16719
16720 Don't write size if SIZE == -1. */
16721
16722 static void
16723 neon_two_same (int qbit, int ubit, int size)
16724 {
16725 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16726 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16727 inst.instruction |= LOW4 (inst.operands[1].reg);
16728 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16729 inst.instruction |= (qbit != 0) << 6;
16730 inst.instruction |= (ubit != 0) << 24;
16731
16732 if (size != -1)
16733 inst.instruction |= neon_logbits (size) << 18;
16734
16735 neon_dp_fixup (&inst);
16736 }
16737
16738 enum vfp_or_neon_is_neon_bits
16739 {
16740 NEON_CHECK_CC = 1,
16741 NEON_CHECK_ARCH = 2,
16742 NEON_CHECK_ARCH8 = 4
16743 };
16744
16745 /* Call this function if an instruction which may have belonged to the VFP or
16746 Neon instruction sets, but turned out to be a Neon instruction (due to the
16747 operand types involved, etc.). We have to check and/or fix-up a couple of
16748 things:
16749
16750 - Make sure the user hasn't attempted to make a Neon instruction
16751 conditional.
16752 - Alter the value in the condition code field if necessary.
16753 - Make sure that the arch supports Neon instructions.
16754
16755 Which of these operations take place depends on bits from enum
16756 vfp_or_neon_is_neon_bits.
16757
16758 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16759 current instruction's condition is COND_ALWAYS, the condition field is
16760 changed to inst.uncond_value. This is necessary because instructions shared
16761 between VFP and Neon may be conditional for the VFP variants only, and the
16762 unconditional Neon version must have, e.g., 0xF in the condition field. */
16763
16764 static int
16765 vfp_or_neon_is_neon (unsigned check)
16766 {
16767 /* Conditions are always legal in Thumb mode (IT blocks). */
16768 if (!thumb_mode && (check & NEON_CHECK_CC))
16769 {
16770 if (inst.cond != COND_ALWAYS)
16771 {
16772 first_error (_(BAD_COND));
16773 return FAIL;
16774 }
16775 if (inst.uncond_value != -1)
16776 inst.instruction |= inst.uncond_value << 28;
16777 }
16778
16779
16780 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16781 || ((check & NEON_CHECK_ARCH8)
16782 && !mark_feature_used (&fpu_neon_ext_armv8)))
16783 {
16784 first_error (_(BAD_FPU));
16785 return FAIL;
16786 }
16787
16788 return SUCCESS;
16789 }
16790
16791
16792 /* Return TRUE if the SIMD instruction is available for the current
16793 cpu_variant. FP is set to TRUE if this is a SIMD floating-point
16794 instruction. CHECK contains th. CHECK contains the set of bits to pass to
16795 vfp_or_neon_is_neon for the NEON specific checks. */
16796
16797 static bfd_boolean
16798 check_simd_pred_availability (int fp, unsigned check)
16799 {
16800 if (inst.cond > COND_ALWAYS)
16801 {
16802 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16803 {
16804 inst.error = BAD_FPU;
16805 return FALSE;
16806 }
16807 inst.pred_insn_type = INSIDE_VPT_INSN;
16808 }
16809 else if (inst.cond < COND_ALWAYS)
16810 {
16811 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16812 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16813 else if (vfp_or_neon_is_neon (check) == FAIL)
16814 return FALSE;
16815 }
16816 else
16817 {
16818 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16819 && vfp_or_neon_is_neon (check) == FAIL)
16820 return FALSE;
16821
16822 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16823 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16824 }
16825 return TRUE;
16826 }
16827
16828 /* Neon instruction encoders, in approximate order of appearance. */
16829
16830 static void
16831 do_neon_dyadic_i_su (void)
16832 {
16833 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16834 return;
16835
16836 enum neon_shape rs;
16837 struct neon_type_el et;
16838 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16839 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16840 else
16841 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16842
16843 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16844
16845
16846 if (rs != NS_QQR)
16847 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16848 else
16849 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16850 }
16851
16852 static void
16853 do_neon_dyadic_i64_su (void)
16854 {
16855 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
16856 return;
16857 enum neon_shape rs;
16858 struct neon_type_el et;
16859 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16860 {
16861 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16862 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16863 }
16864 else
16865 {
16866 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16867 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16868 }
16869 if (rs == NS_QQR)
16870 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16871 else
16872 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16873 }
16874
16875 static void
16876 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16877 unsigned immbits)
16878 {
16879 unsigned size = et.size >> 3;
16880 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16881 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16882 inst.instruction |= LOW4 (inst.operands[1].reg);
16883 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16884 inst.instruction |= (isquad != 0) << 6;
16885 inst.instruction |= immbits << 16;
16886 inst.instruction |= (size >> 3) << 7;
16887 inst.instruction |= (size & 0x7) << 19;
16888 if (write_ubit)
16889 inst.instruction |= (uval != 0) << 24;
16890
16891 neon_dp_fixup (&inst);
16892 }
16893
16894 static void
16895 do_neon_shl (void)
16896 {
16897 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16898 return;
16899
16900 if (!inst.operands[2].isreg)
16901 {
16902 enum neon_shape rs;
16903 struct neon_type_el et;
16904 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16905 {
16906 rs = neon_select_shape (NS_QQI, NS_NULL);
16907 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16908 }
16909 else
16910 {
16911 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16912 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16913 }
16914 int imm = inst.operands[2].imm;
16915
16916 constraint (imm < 0 || (unsigned)imm >= et.size,
16917 _("immediate out of range for shift"));
16918 NEON_ENCODE (IMMED, inst);
16919 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16920 }
16921 else
16922 {
16923 enum neon_shape rs;
16924 struct neon_type_el et;
16925 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16926 {
16927 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16928 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16929 }
16930 else
16931 {
16932 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16933 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16934 }
16935
16936
16937 if (rs == NS_QQR)
16938 {
16939 constraint (inst.operands[0].reg != inst.operands[1].reg,
16940 _("invalid instruction shape"));
16941 if (inst.operands[2].reg == REG_SP)
16942 as_tsktsk (MVE_BAD_SP);
16943 else if (inst.operands[2].reg == REG_PC)
16944 as_tsktsk (MVE_BAD_PC);
16945
16946 inst.instruction = 0xee311e60;
16947 inst.instruction |= (et.type == NT_unsigned) << 28;
16948 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16949 inst.instruction |= neon_logbits (et.size) << 18;
16950 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16951 inst.instruction |= inst.operands[2].reg;
16952 inst.is_neon = 1;
16953 }
16954 else
16955 {
16956 unsigned int tmp;
16957
16958 /* VSHL/VQSHL 3-register variants have syntax such as:
16959 vshl.xx Dd, Dm, Dn
16960 whereas other 3-register operations encoded by neon_three_same have
16961 syntax like:
16962 vadd.xx Dd, Dn, Dm
16963 (i.e. with Dn & Dm reversed). Swap operands[1].reg and
16964 operands[2].reg here. */
16965 tmp = inst.operands[2].reg;
16966 inst.operands[2].reg = inst.operands[1].reg;
16967 inst.operands[1].reg = tmp;
16968 NEON_ENCODE (INTEGER, inst);
16969 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16970 }
16971 }
16972 }
16973
16974 static void
16975 do_neon_qshl (void)
16976 {
16977 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16978 return;
16979
16980 if (!inst.operands[2].isreg)
16981 {
16982 enum neon_shape rs;
16983 struct neon_type_el et;
16984 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16985 {
16986 rs = neon_select_shape (NS_QQI, NS_NULL);
16987 et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
16988 }
16989 else
16990 {
16991 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16992 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16993 }
16994 int imm = inst.operands[2].imm;
16995
16996 constraint (imm < 0 || (unsigned)imm >= et.size,
16997 _("immediate out of range for shift"));
16998 NEON_ENCODE (IMMED, inst);
16999 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
17000 }
17001 else
17002 {
17003 enum neon_shape rs;
17004 struct neon_type_el et;
17005
17006 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17007 {
17008 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17009 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17010 }
17011 else
17012 {
17013 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17014 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17015 }
17016
17017 if (rs == NS_QQR)
17018 {
17019 constraint (inst.operands[0].reg != inst.operands[1].reg,
17020 _("invalid instruction shape"));
17021 if (inst.operands[2].reg == REG_SP)
17022 as_tsktsk (MVE_BAD_SP);
17023 else if (inst.operands[2].reg == REG_PC)
17024 as_tsktsk (MVE_BAD_PC);
17025
17026 inst.instruction = 0xee311ee0;
17027 inst.instruction |= (et.type == NT_unsigned) << 28;
17028 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17029 inst.instruction |= neon_logbits (et.size) << 18;
17030 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17031 inst.instruction |= inst.operands[2].reg;
17032 inst.is_neon = 1;
17033 }
17034 else
17035 {
17036 unsigned int tmp;
17037
17038 /* See note in do_neon_shl. */
17039 tmp = inst.operands[2].reg;
17040 inst.operands[2].reg = inst.operands[1].reg;
17041 inst.operands[1].reg = tmp;
17042 NEON_ENCODE (INTEGER, inst);
17043 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17044 }
17045 }
17046 }
17047
17048 static void
17049 do_neon_rshl (void)
17050 {
17051 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17052 return;
17053
17054 enum neon_shape rs;
17055 struct neon_type_el et;
17056 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17057 {
17058 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17059 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17060 }
17061 else
17062 {
17063 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17064 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17065 }
17066
17067 unsigned int tmp;
17068
17069 if (rs == NS_QQR)
17070 {
17071 if (inst.operands[2].reg == REG_PC)
17072 as_tsktsk (MVE_BAD_PC);
17073 else if (inst.operands[2].reg == REG_SP)
17074 as_tsktsk (MVE_BAD_SP);
17075
17076 constraint (inst.operands[0].reg != inst.operands[1].reg,
17077 _("invalid instruction shape"));
17078
17079 if (inst.instruction == 0x0000510)
17080 /* We are dealing with vqrshl. */
17081 inst.instruction = 0xee331ee0;
17082 else
17083 /* We are dealing with vrshl. */
17084 inst.instruction = 0xee331e60;
17085
17086 inst.instruction |= (et.type == NT_unsigned) << 28;
17087 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17088 inst.instruction |= neon_logbits (et.size) << 18;
17089 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17090 inst.instruction |= inst.operands[2].reg;
17091 inst.is_neon = 1;
17092 }
17093 else
17094 {
17095 tmp = inst.operands[2].reg;
17096 inst.operands[2].reg = inst.operands[1].reg;
17097 inst.operands[1].reg = tmp;
17098 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17099 }
17100 }
17101
17102 static int
17103 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17104 {
17105 /* Handle .I8 pseudo-instructions. */
17106 if (size == 8)
17107 {
17108 /* Unfortunately, this will make everything apart from zero out-of-range.
17109 FIXME is this the intended semantics? There doesn't seem much point in
17110 accepting .I8 if so. */
17111 immediate |= immediate << 8;
17112 size = 16;
17113 }
17114
17115 if (size >= 32)
17116 {
17117 if (immediate == (immediate & 0x000000ff))
17118 {
17119 *immbits = immediate;
17120 return 0x1;
17121 }
17122 else if (immediate == (immediate & 0x0000ff00))
17123 {
17124 *immbits = immediate >> 8;
17125 return 0x3;
17126 }
17127 else if (immediate == (immediate & 0x00ff0000))
17128 {
17129 *immbits = immediate >> 16;
17130 return 0x5;
17131 }
17132 else if (immediate == (immediate & 0xff000000))
17133 {
17134 *immbits = immediate >> 24;
17135 return 0x7;
17136 }
17137 if ((immediate & 0xffff) != (immediate >> 16))
17138 goto bad_immediate;
17139 immediate &= 0xffff;
17140 }
17141
17142 if (immediate == (immediate & 0x000000ff))
17143 {
17144 *immbits = immediate;
17145 return 0x9;
17146 }
17147 else if (immediate == (immediate & 0x0000ff00))
17148 {
17149 *immbits = immediate >> 8;
17150 return 0xb;
17151 }
17152
17153 bad_immediate:
17154 first_error (_("immediate value out of range"));
17155 return FAIL;
17156 }
17157
17158 static void
17159 do_neon_logic (void)
17160 {
17161 if (inst.operands[2].present && inst.operands[2].isreg)
17162 {
17163 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17164 if (rs == NS_QQQ
17165 && !check_simd_pred_availability (FALSE,
17166 NEON_CHECK_ARCH | NEON_CHECK_CC))
17167 return;
17168 else if (rs != NS_QQQ
17169 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17170 first_error (BAD_FPU);
17171
17172 neon_check_type (3, rs, N_IGNORE_TYPE);
17173 /* U bit and size field were set as part of the bitmask. */
17174 NEON_ENCODE (INTEGER, inst);
17175 neon_three_same (neon_quad (rs), 0, -1);
17176 }
17177 else
17178 {
17179 const int three_ops_form = (inst.operands[2].present
17180 && !inst.operands[2].isreg);
17181 const int immoperand = (three_ops_form ? 2 : 1);
17182 enum neon_shape rs = (three_ops_form
17183 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17184 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17185 /* Because neon_select_shape makes the second operand a copy of the first
17186 if the second operand is not present. */
17187 if (rs == NS_QQI
17188 && !check_simd_pred_availability (FALSE,
17189 NEON_CHECK_ARCH | NEON_CHECK_CC))
17190 return;
17191 else if (rs != NS_QQI
17192 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17193 first_error (BAD_FPU);
17194
17195 struct neon_type_el et;
17196 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17197 et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17198 else
17199 et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17200 | N_KEY, N_EQK);
17201
17202 if (et.type == NT_invtype)
17203 return;
17204 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17205 unsigned immbits;
17206 int cmode;
17207
17208
17209 if (three_ops_form)
17210 constraint (inst.operands[0].reg != inst.operands[1].reg,
17211 _("first and second operands shall be the same register"));
17212
17213 NEON_ENCODE (IMMED, inst);
17214
17215 immbits = inst.operands[immoperand].imm;
17216 if (et.size == 64)
17217 {
17218 /* .i64 is a pseudo-op, so the immediate must be a repeating
17219 pattern. */
17220 if (immbits != (inst.operands[immoperand].regisimm ?
17221 inst.operands[immoperand].reg : 0))
17222 {
17223 /* Set immbits to an invalid constant. */
17224 immbits = 0xdeadbeef;
17225 }
17226 }
17227
17228 switch (opcode)
17229 {
17230 case N_MNEM_vbic:
17231 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17232 break;
17233
17234 case N_MNEM_vorr:
17235 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17236 break;
17237
17238 case N_MNEM_vand:
17239 /* Pseudo-instruction for VBIC. */
17240 neon_invert_size (&immbits, 0, et.size);
17241 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17242 break;
17243
17244 case N_MNEM_vorn:
17245 /* Pseudo-instruction for VORR. */
17246 neon_invert_size (&immbits, 0, et.size);
17247 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17248 break;
17249
17250 default:
17251 abort ();
17252 }
17253
17254 if (cmode == FAIL)
17255 return;
17256
17257 inst.instruction |= neon_quad (rs) << 6;
17258 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17259 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17260 inst.instruction |= cmode << 8;
17261 neon_write_immbits (immbits);
17262
17263 neon_dp_fixup (&inst);
17264 }
17265 }
17266
17267 static void
17268 do_neon_bitfield (void)
17269 {
17270 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17271 neon_check_type (3, rs, N_IGNORE_TYPE);
17272 neon_three_same (neon_quad (rs), 0, -1);
17273 }
17274
17275 static void
17276 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17277 unsigned destbits)
17278 {
17279 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17280 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17281 types | N_KEY);
17282 if (et.type == NT_float)
17283 {
17284 NEON_ENCODE (FLOAT, inst);
17285 if (rs == NS_QQR)
17286 mve_encode_qqr (et.size, 0, 1);
17287 else
17288 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17289 }
17290 else
17291 {
17292 NEON_ENCODE (INTEGER, inst);
17293 if (rs == NS_QQR)
17294 mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17295 else
17296 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17297 }
17298 }
17299
17300
17301 static void
17302 do_neon_dyadic_if_su_d (void)
17303 {
17304 /* This version only allow D registers, but that constraint is enforced during
17305 operand parsing so we don't need to do anything extra here. */
17306 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17307 }
17308
17309 static void
17310 do_neon_dyadic_if_i_d (void)
17311 {
17312 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17313 affected if we specify unsigned args. */
17314 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17315 }
17316
17317 static void
17318 do_mve_vstr_vldr_QI (int size, int elsize, int load)
17319 {
17320 constraint (size < 32, BAD_ADDR_MODE);
17321 constraint (size != elsize, BAD_EL_TYPE);
17322 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17323 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17324 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17325 _("destination register and offset register may not be the"
17326 " same"));
17327
17328 int imm = inst.relocs[0].exp.X_add_number;
17329 int add = 1;
17330 if (imm < 0)
17331 {
17332 add = 0;
17333 imm = -imm;
17334 }
17335 constraint ((imm % (size / 8) != 0)
17336 || imm > (0x7f << neon_logbits (size)),
17337 (size == 32) ? _("immediate must be a multiple of 4 in the"
17338 " range of +/-[0,508]")
17339 : _("immediate must be a multiple of 8 in the"
17340 " range of +/-[0,1016]"));
17341 inst.instruction |= 0x11 << 24;
17342 inst.instruction |= add << 23;
17343 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17344 inst.instruction |= inst.operands[1].writeback << 21;
17345 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17346 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17347 inst.instruction |= 1 << 12;
17348 inst.instruction |= (size == 64) << 8;
17349 inst.instruction &= 0xffffff00;
17350 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17351 inst.instruction |= imm >> neon_logbits (size);
17352 }
17353
17354 static void
17355 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17356 {
17357 unsigned os = inst.operands[1].imm >> 5;
17358 unsigned type = inst.vectype.el[0].type;
17359 constraint (os != 0 && size == 8,
17360 _("can not shift offsets when accessing less than half-word"));
17361 constraint (os && os != neon_logbits (size),
17362 _("shift immediate must be 1, 2 or 3 for half-word, word"
17363 " or double-word accesses respectively"));
17364 if (inst.operands[1].reg == REG_PC)
17365 as_tsktsk (MVE_BAD_PC);
17366
17367 switch (size)
17368 {
17369 case 8:
17370 constraint (elsize >= 64, BAD_EL_TYPE);
17371 break;
17372 case 16:
17373 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17374 break;
17375 case 32:
17376 case 64:
17377 constraint (elsize != size, BAD_EL_TYPE);
17378 break;
17379 default:
17380 break;
17381 }
17382 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17383 BAD_ADDR_MODE);
17384 if (load)
17385 {
17386 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17387 _("destination register and offset register may not be"
17388 " the same"));
17389 constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17390 constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17391 BAD_EL_TYPE);
17392 inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17393 }
17394 else
17395 {
17396 constraint (type != NT_untyped, BAD_EL_TYPE);
17397 }
17398
17399 inst.instruction |= 1 << 23;
17400 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17401 inst.instruction |= inst.operands[1].reg << 16;
17402 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17403 inst.instruction |= neon_logbits (elsize) << 7;
17404 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17405 inst.instruction |= LOW4 (inst.operands[1].imm);
17406 inst.instruction |= !!os;
17407 }
17408
17409 static void
17410 do_mve_vstr_vldr_RI (int size, int elsize, int load)
17411 {
17412 enum neon_el_type type = inst.vectype.el[0].type;
17413
17414 constraint (size >= 64, BAD_ADDR_MODE);
17415 switch (size)
17416 {
17417 case 16:
17418 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17419 break;
17420 case 32:
17421 constraint (elsize != size, BAD_EL_TYPE);
17422 break;
17423 default:
17424 break;
17425 }
17426 if (load)
17427 {
17428 constraint (elsize != size && type != NT_unsigned
17429 && type != NT_signed, BAD_EL_TYPE);
17430 }
17431 else
17432 {
17433 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17434 }
17435
17436 int imm = inst.relocs[0].exp.X_add_number;
17437 int add = 1;
17438 if (imm < 0)
17439 {
17440 add = 0;
17441 imm = -imm;
17442 }
17443
17444 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17445 {
17446 switch (size)
17447 {
17448 case 8:
17449 constraint (1, _("immediate must be in the range of +/-[0,127]"));
17450 break;
17451 case 16:
17452 constraint (1, _("immediate must be a multiple of 2 in the"
17453 " range of +/-[0,254]"));
17454 break;
17455 case 32:
17456 constraint (1, _("immediate must be a multiple of 4 in the"
17457 " range of +/-[0,508]"));
17458 break;
17459 }
17460 }
17461
17462 if (size != elsize)
17463 {
17464 constraint (inst.operands[1].reg > 7, BAD_HIREG);
17465 constraint (inst.operands[0].reg > 14,
17466 _("MVE vector register in the range [Q0..Q7] expected"));
17467 inst.instruction |= (load && type == NT_unsigned) << 28;
17468 inst.instruction |= (size == 16) << 19;
17469 inst.instruction |= neon_logbits (elsize) << 7;
17470 }
17471 else
17472 {
17473 if (inst.operands[1].reg == REG_PC)
17474 as_tsktsk (MVE_BAD_PC);
17475 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17476 as_tsktsk (MVE_BAD_SP);
17477 inst.instruction |= 1 << 12;
17478 inst.instruction |= neon_logbits (size) << 7;
17479 }
17480 inst.instruction |= inst.operands[1].preind << 24;
17481 inst.instruction |= add << 23;
17482 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17483 inst.instruction |= inst.operands[1].writeback << 21;
17484 inst.instruction |= inst.operands[1].reg << 16;
17485 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17486 inst.instruction &= 0xffffff80;
17487 inst.instruction |= imm >> neon_logbits (size);
17488
17489 }
17490
17491 static void
17492 do_mve_vstr_vldr (void)
17493 {
17494 unsigned size;
17495 int load = 0;
17496
17497 if (inst.cond > COND_ALWAYS)
17498 inst.pred_insn_type = INSIDE_VPT_INSN;
17499 else
17500 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17501
17502 switch (inst.instruction)
17503 {
17504 default:
17505 gas_assert (0);
17506 break;
17507 case M_MNEM_vldrb:
17508 load = 1;
17509 /* fall through. */
17510 case M_MNEM_vstrb:
17511 size = 8;
17512 break;
17513 case M_MNEM_vldrh:
17514 load = 1;
17515 /* fall through. */
17516 case M_MNEM_vstrh:
17517 size = 16;
17518 break;
17519 case M_MNEM_vldrw:
17520 load = 1;
17521 /* fall through. */
17522 case M_MNEM_vstrw:
17523 size = 32;
17524 break;
17525 case M_MNEM_vldrd:
17526 load = 1;
17527 /* fall through. */
17528 case M_MNEM_vstrd:
17529 size = 64;
17530 break;
17531 }
17532 unsigned elsize = inst.vectype.el[0].size;
17533
17534 if (inst.operands[1].isquad)
17535 {
17536 /* We are dealing with [Q, imm]{!} cases. */
17537 do_mve_vstr_vldr_QI (size, elsize, load);
17538 }
17539 else
17540 {
17541 if (inst.operands[1].immisreg == 2)
17542 {
17543 /* We are dealing with [R, Q, {UXTW #os}] cases. */
17544 do_mve_vstr_vldr_RQ (size, elsize, load);
17545 }
17546 else if (!inst.operands[1].immisreg)
17547 {
17548 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
17549 do_mve_vstr_vldr_RI (size, elsize, load);
17550 }
17551 else
17552 constraint (1, BAD_ADDR_MODE);
17553 }
17554
17555 inst.is_neon = 1;
17556 }
17557
17558 static void
17559 do_mve_vst_vld (void)
17560 {
17561 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17562 return;
17563
17564 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17565 || inst.relocs[0].exp.X_add_number != 0
17566 || inst.operands[1].immisreg != 0,
17567 BAD_ADDR_MODE);
17568 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17569 if (inst.operands[1].reg == REG_PC)
17570 as_tsktsk (MVE_BAD_PC);
17571 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17572 as_tsktsk (MVE_BAD_SP);
17573
17574
17575 /* These instructions are one of the "exceptions" mentioned in
17576 handle_pred_state. They are MVE instructions that are not VPT compatible
17577 and do not accept a VPT code, thus appending such a code is a syntax
17578 error. */
17579 if (inst.cond > COND_ALWAYS)
17580 first_error (BAD_SYNTAX);
17581 /* If we append a scalar condition code we can set this to
17582 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
17583 else if (inst.cond < COND_ALWAYS)
17584 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17585 else
17586 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17587
17588 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17589 inst.instruction |= inst.operands[1].writeback << 21;
17590 inst.instruction |= inst.operands[1].reg << 16;
17591 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17592 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17593 inst.is_neon = 1;
17594 }
17595
17596 static void
17597 do_mve_vaddlv (void)
17598 {
17599 enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17600 struct neon_type_el et
17601 = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17602
17603 if (et.type == NT_invtype)
17604 first_error (BAD_EL_TYPE);
17605
17606 if (inst.cond > COND_ALWAYS)
17607 inst.pred_insn_type = INSIDE_VPT_INSN;
17608 else
17609 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17610
17611 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17612
17613 inst.instruction |= (et.type == NT_unsigned) << 28;
17614 inst.instruction |= inst.operands[1].reg << 19;
17615 inst.instruction |= inst.operands[0].reg << 12;
17616 inst.instruction |= inst.operands[2].reg;
17617 inst.is_neon = 1;
17618 }
17619
17620 static void
17621 do_neon_dyadic_if_su (void)
17622 {
17623 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17624 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17625 N_SUF_32 | N_KEY);
17626
17627 constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17628 || inst.instruction == ((unsigned) N_MNEM_vmin))
17629 && et.type == NT_float
17630 && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17631
17632 if (!check_simd_pred_availability (et.type == NT_float,
17633 NEON_CHECK_ARCH | NEON_CHECK_CC))
17634 return;
17635
17636 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17637 }
17638
17639 static void
17640 do_neon_addsub_if_i (void)
17641 {
17642 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17643 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17644 return;
17645
17646 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17647 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17648 N_EQK, N_IF_32 | N_I64 | N_KEY);
17649
17650 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17651 /* If we are parsing Q registers and the element types match MVE, which NEON
17652 also supports, then we must check whether this is an instruction that can
17653 be used by both MVE/NEON. This distinction can be made based on whether
17654 they are predicated or not. */
17655 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17656 {
17657 if (!check_simd_pred_availability (et.type == NT_float,
17658 NEON_CHECK_ARCH | NEON_CHECK_CC))
17659 return;
17660 }
17661 else
17662 {
17663 /* If they are either in a D register or are using an unsupported. */
17664 if (rs != NS_QQR
17665 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17666 return;
17667 }
17668
17669 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17670 affected if we specify unsigned args. */
17671 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17672 }
17673
17674 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17675 result to be:
17676 V<op> A,B (A is operand 0, B is operand 2)
17677 to mean:
17678 V<op> A,B,A
17679 not:
17680 V<op> A,B,B
17681 so handle that case specially. */
17682
17683 static void
17684 neon_exchange_operands (void)
17685 {
17686 if (inst.operands[1].present)
17687 {
17688 void *scratch = xmalloc (sizeof (inst.operands[0]));
17689
17690 /* Swap operands[1] and operands[2]. */
17691 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17692 inst.operands[1] = inst.operands[2];
17693 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17694 free (scratch);
17695 }
17696 else
17697 {
17698 inst.operands[1] = inst.operands[2];
17699 inst.operands[2] = inst.operands[0];
17700 }
17701 }
17702
17703 static void
17704 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17705 {
17706 if (inst.operands[2].isreg)
17707 {
17708 if (invert)
17709 neon_exchange_operands ();
17710 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17711 }
17712 else
17713 {
17714 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17715 struct neon_type_el et = neon_check_type (2, rs,
17716 N_EQK | N_SIZ, immtypes | N_KEY);
17717
17718 NEON_ENCODE (IMMED, inst);
17719 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17720 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17721 inst.instruction |= LOW4 (inst.operands[1].reg);
17722 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17723 inst.instruction |= neon_quad (rs) << 6;
17724 inst.instruction |= (et.type == NT_float) << 10;
17725 inst.instruction |= neon_logbits (et.size) << 18;
17726
17727 neon_dp_fixup (&inst);
17728 }
17729 }
17730
17731 static void
17732 do_neon_cmp (void)
17733 {
17734 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
17735 }
17736
17737 static void
17738 do_neon_cmp_inv (void)
17739 {
17740 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
17741 }
17742
17743 static void
17744 do_neon_ceq (void)
17745 {
17746 neon_compare (N_IF_32, N_IF_32, FALSE);
17747 }
17748
17749 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
17750 scalars, which are encoded in 5 bits, M : Rm.
17751 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17752 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17753 index in M.
17754
17755 Dot Product instructions are similar to multiply instructions except elsize
17756 should always be 32.
17757
17758 This function translates SCALAR, which is GAS's internal encoding of indexed
17759 scalar register, to raw encoding. There is also register and index range
17760 check based on ELSIZE. */
17761
17762 static unsigned
17763 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17764 {
17765 unsigned regno = NEON_SCALAR_REG (scalar);
17766 unsigned elno = NEON_SCALAR_INDEX (scalar);
17767
17768 switch (elsize)
17769 {
17770 case 16:
17771 if (regno > 7 || elno > 3)
17772 goto bad_scalar;
17773 return regno | (elno << 3);
17774
17775 case 32:
17776 if (regno > 15 || elno > 1)
17777 goto bad_scalar;
17778 return regno | (elno << 4);
17779
17780 default:
17781 bad_scalar:
17782 first_error (_("scalar out of range for multiply instruction"));
17783 }
17784
17785 return 0;
17786 }
17787
17788 /* Encode multiply / multiply-accumulate scalar instructions. */
17789
17790 static void
17791 neon_mul_mac (struct neon_type_el et, int ubit)
17792 {
17793 unsigned scalar;
17794
17795 /* Give a more helpful error message if we have an invalid type. */
17796 if (et.type == NT_invtype)
17797 return;
17798
17799 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17800 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17801 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17802 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17803 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17804 inst.instruction |= LOW4 (scalar);
17805 inst.instruction |= HI1 (scalar) << 5;
17806 inst.instruction |= (et.type == NT_float) << 8;
17807 inst.instruction |= neon_logbits (et.size) << 20;
17808 inst.instruction |= (ubit != 0) << 24;
17809
17810 neon_dp_fixup (&inst);
17811 }
17812
17813 static void
17814 do_neon_mac_maybe_scalar (void)
17815 {
17816 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17817 return;
17818
17819 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17820 return;
17821
17822 if (inst.operands[2].isscalar)
17823 {
17824 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17825 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17826 struct neon_type_el et = neon_check_type (3, rs,
17827 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17828 NEON_ENCODE (SCALAR, inst);
17829 neon_mul_mac (et, neon_quad (rs));
17830 }
17831 else if (!inst.operands[2].isvec)
17832 {
17833 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17834
17835 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17836 neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17837
17838 neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17839 }
17840 else
17841 {
17842 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17843 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17844 affected if we specify unsigned args. */
17845 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17846 }
17847 }
17848
17849 static void
17850 do_bfloat_vfma (void)
17851 {
17852 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17853 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17854 enum neon_shape rs;
17855 int t_bit = 0;
17856
17857 if (inst.instruction != B_MNEM_vfmab)
17858 {
17859 t_bit = 1;
17860 inst.instruction = B_MNEM_vfmat;
17861 }
17862
17863 if (inst.operands[2].isscalar)
17864 {
17865 rs = neon_select_shape (NS_QQS, NS_NULL);
17866 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17867
17868 inst.instruction |= (1 << 25);
17869 int index = inst.operands[2].reg & 0xf;
17870 constraint (!(index < 4), _("index must be in the range 0 to 3"));
17871 inst.operands[2].reg >>= 4;
17872 constraint (!(inst.operands[2].reg < 8),
17873 _("indexed register must be less than 8"));
17874 neon_three_args (t_bit);
17875 inst.instruction |= ((index & 1) << 3);
17876 inst.instruction |= ((index & 2) << 4);
17877 }
17878 else
17879 {
17880 rs = neon_select_shape (NS_QQQ, NS_NULL);
17881 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17882 neon_three_args (t_bit);
17883 }
17884
17885 }
17886
17887 static void
17888 do_neon_fmac (void)
17889 {
17890 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17891 && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17892 return;
17893
17894 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17895 return;
17896
17897 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17898 {
17899 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17900 struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17901 N_EQK);
17902
17903 if (rs == NS_QQR)
17904 {
17905
17906 if (inst.operands[2].reg == REG_SP)
17907 as_tsktsk (MVE_BAD_SP);
17908 else if (inst.operands[2].reg == REG_PC)
17909 as_tsktsk (MVE_BAD_PC);
17910
17911 inst.instruction = 0xee310e40;
17912 inst.instruction |= (et.size == 16) << 28;
17913 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17914 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17915 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17916 inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17917 inst.instruction |= inst.operands[2].reg;
17918 inst.is_neon = 1;
17919 return;
17920 }
17921 }
17922 else
17923 {
17924 constraint (!inst.operands[2].isvec, BAD_FPU);
17925 }
17926
17927 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17928 }
17929
17930 static void
17931 do_mve_vfma (void)
17932 {
17933 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
17934 inst.cond == COND_ALWAYS)
17935 {
17936 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17937 inst.instruction = N_MNEM_vfma;
17938 inst.pred_insn_type = INSIDE_VPT_INSN;
17939 inst.cond = 0xf;
17940 return do_neon_fmac();
17941 }
17942 else
17943 {
17944 do_bfloat_vfma();
17945 }
17946 }
17947
17948 static void
17949 do_neon_tst (void)
17950 {
17951 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17952 struct neon_type_el et = neon_check_type (3, rs,
17953 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
17954 neon_three_same (neon_quad (rs), 0, et.size);
17955 }
17956
17957 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
17958 same types as the MAC equivalents. The polynomial type for this instruction
17959 is encoded the same as the integer type. */
17960
17961 static void
17962 do_neon_mul (void)
17963 {
17964 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
17965 return;
17966
17967 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17968 return;
17969
17970 if (inst.operands[2].isscalar)
17971 {
17972 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17973 do_neon_mac_maybe_scalar ();
17974 }
17975 else
17976 {
17977 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17978 {
17979 enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17980 struct neon_type_el et
17981 = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
17982 if (et.type == NT_float)
17983 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
17984 BAD_FPU);
17985
17986 neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
17987 }
17988 else
17989 {
17990 constraint (!inst.operands[2].isvec, BAD_FPU);
17991 neon_dyadic_misc (NT_poly,
17992 N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
17993 }
17994 }
17995 }
17996
17997 static void
17998 do_neon_qdmulh (void)
17999 {
18000 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18001 return;
18002
18003 if (inst.operands[2].isscalar)
18004 {
18005 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18006 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18007 struct neon_type_el et = neon_check_type (3, rs,
18008 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18009 NEON_ENCODE (SCALAR, inst);
18010 neon_mul_mac (et, neon_quad (rs));
18011 }
18012 else
18013 {
18014 enum neon_shape rs;
18015 struct neon_type_el et;
18016 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18017 {
18018 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18019 et = neon_check_type (3, rs,
18020 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18021 }
18022 else
18023 {
18024 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18025 et = neon_check_type (3, rs,
18026 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18027 }
18028
18029 NEON_ENCODE (INTEGER, inst);
18030 if (rs == NS_QQR)
18031 mve_encode_qqr (et.size, 0, 0);
18032 else
18033 /* The U bit (rounding) comes from bit mask. */
18034 neon_three_same (neon_quad (rs), 0, et.size);
18035 }
18036 }
18037
18038 static void
18039 do_mve_vaddv (void)
18040 {
18041 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18042 struct neon_type_el et
18043 = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18044
18045 if (et.type == NT_invtype)
18046 first_error (BAD_EL_TYPE);
18047
18048 if (inst.cond > COND_ALWAYS)
18049 inst.pred_insn_type = INSIDE_VPT_INSN;
18050 else
18051 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18052
18053 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18054
18055 mve_encode_rq (et.type == NT_unsigned, et.size);
18056 }
18057
18058 static void
18059 do_mve_vhcadd (void)
18060 {
18061 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18062 struct neon_type_el et
18063 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18064
18065 if (inst.cond > COND_ALWAYS)
18066 inst.pred_insn_type = INSIDE_VPT_INSN;
18067 else
18068 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18069
18070 unsigned rot = inst.relocs[0].exp.X_add_number;
18071 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18072
18073 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18074 as_tsktsk (_("Warning: 32-bit element size and same first and third "
18075 "operand makes instruction UNPREDICTABLE"));
18076
18077 mve_encode_qqq (0, et.size);
18078 inst.instruction |= (rot == 270) << 12;
18079 inst.is_neon = 1;
18080 }
18081
18082 static void
18083 do_mve_vqdmull (void)
18084 {
18085 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18086 struct neon_type_el et
18087 = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18088
18089 if (et.size == 32
18090 && (inst.operands[0].reg == inst.operands[1].reg
18091 || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18092 as_tsktsk (BAD_MVE_SRCDEST);
18093
18094 if (inst.cond > COND_ALWAYS)
18095 inst.pred_insn_type = INSIDE_VPT_INSN;
18096 else
18097 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18098
18099 if (rs == NS_QQQ)
18100 {
18101 mve_encode_qqq (et.size == 32, 64);
18102 inst.instruction |= 1;
18103 }
18104 else
18105 {
18106 mve_encode_qqr (64, et.size == 32, 0);
18107 inst.instruction |= 0x3 << 5;
18108 }
18109 }
18110
18111 static void
18112 do_mve_vadc (void)
18113 {
18114 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18115 struct neon_type_el et
18116 = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18117
18118 if (et.type == NT_invtype)
18119 first_error (BAD_EL_TYPE);
18120
18121 if (inst.cond > COND_ALWAYS)
18122 inst.pred_insn_type = INSIDE_VPT_INSN;
18123 else
18124 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18125
18126 mve_encode_qqq (0, 64);
18127 }
18128
18129 static void
18130 do_mve_vbrsr (void)
18131 {
18132 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18133 struct neon_type_el et
18134 = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18135
18136 if (inst.cond > COND_ALWAYS)
18137 inst.pred_insn_type = INSIDE_VPT_INSN;
18138 else
18139 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18140
18141 mve_encode_qqr (et.size, 0, 0);
18142 }
18143
18144 static void
18145 do_mve_vsbc (void)
18146 {
18147 neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18148
18149 if (inst.cond > COND_ALWAYS)
18150 inst.pred_insn_type = INSIDE_VPT_INSN;
18151 else
18152 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18153
18154 mve_encode_qqq (1, 64);
18155 }
18156
18157 static void
18158 do_mve_vmulh (void)
18159 {
18160 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18161 struct neon_type_el et
18162 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18163
18164 if (inst.cond > COND_ALWAYS)
18165 inst.pred_insn_type = INSIDE_VPT_INSN;
18166 else
18167 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18168
18169 mve_encode_qqq (et.type == NT_unsigned, et.size);
18170 }
18171
18172 static void
18173 do_mve_vqdmlah (void)
18174 {
18175 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18176 struct neon_type_el et
18177 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18178
18179 if (inst.cond > COND_ALWAYS)
18180 inst.pred_insn_type = INSIDE_VPT_INSN;
18181 else
18182 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18183
18184 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18185 }
18186
18187 static void
18188 do_mve_vqdmladh (void)
18189 {
18190 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18191 struct neon_type_el et
18192 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18193
18194 if (inst.cond > COND_ALWAYS)
18195 inst.pred_insn_type = INSIDE_VPT_INSN;
18196 else
18197 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18198
18199 mve_encode_qqq (0, et.size);
18200 }
18201
18202
18203 static void
18204 do_mve_vmull (void)
18205 {
18206
18207 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18208 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18209 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
18210 && inst.cond == COND_ALWAYS
18211 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18212 {
18213 if (rs == NS_QQQ)
18214 {
18215
18216 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18217 N_SUF_32 | N_F64 | N_P8
18218 | N_P16 | N_I_MVE | N_KEY);
18219 if (((et.type == NT_poly) && et.size == 8
18220 && ARM_CPU_IS_ANY (cpu_variant))
18221 || (et.type == NT_integer) || (et.type == NT_float))
18222 goto neon_vmul;
18223 }
18224 else
18225 goto neon_vmul;
18226 }
18227
18228 constraint (rs != NS_QQQ, BAD_FPU);
18229 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18230 N_SU_32 | N_P8 | N_P16 | N_KEY);
18231
18232 /* We are dealing with MVE's vmullt. */
18233 if (et.size == 32
18234 && (inst.operands[0].reg == inst.operands[1].reg
18235 || inst.operands[0].reg == inst.operands[2].reg))
18236 as_tsktsk (BAD_MVE_SRCDEST);
18237
18238 if (inst.cond > COND_ALWAYS)
18239 inst.pred_insn_type = INSIDE_VPT_INSN;
18240 else
18241 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18242
18243 if (et.type == NT_poly)
18244 mve_encode_qqq (neon_logbits (et.size), 64);
18245 else
18246 mve_encode_qqq (et.type == NT_unsigned, et.size);
18247
18248 return;
18249
18250 neon_vmul:
18251 inst.instruction = N_MNEM_vmul;
18252 inst.cond = 0xb;
18253 if (thumb_mode)
18254 inst.pred_insn_type = INSIDE_IT_INSN;
18255 do_neon_mul ();
18256 }
18257
18258 static void
18259 do_mve_vabav (void)
18260 {
18261 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18262
18263 if (rs == NS_NULL)
18264 return;
18265
18266 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18267 return;
18268
18269 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18270 | N_S16 | N_S32 | N_U8 | N_U16
18271 | N_U32);
18272
18273 if (inst.cond > COND_ALWAYS)
18274 inst.pred_insn_type = INSIDE_VPT_INSN;
18275 else
18276 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18277
18278 mve_encode_rqq (et.type == NT_unsigned, et.size);
18279 }
18280
18281 static void
18282 do_mve_vmladav (void)
18283 {
18284 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18285 struct neon_type_el et = neon_check_type (3, rs,
18286 N_EQK, N_EQK, N_SU_MVE | N_KEY);
18287
18288 if (et.type == NT_unsigned
18289 && (inst.instruction == M_MNEM_vmladavx
18290 || inst.instruction == M_MNEM_vmladavax
18291 || inst.instruction == M_MNEM_vmlsdav
18292 || inst.instruction == M_MNEM_vmlsdava
18293 || inst.instruction == M_MNEM_vmlsdavx
18294 || inst.instruction == M_MNEM_vmlsdavax))
18295 first_error (BAD_SIMD_TYPE);
18296
18297 constraint (inst.operands[2].reg > 14,
18298 _("MVE vector register in the range [Q0..Q7] expected"));
18299
18300 if (inst.cond > COND_ALWAYS)
18301 inst.pred_insn_type = INSIDE_VPT_INSN;
18302 else
18303 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18304
18305 if (inst.instruction == M_MNEM_vmlsdav
18306 || inst.instruction == M_MNEM_vmlsdava
18307 || inst.instruction == M_MNEM_vmlsdavx
18308 || inst.instruction == M_MNEM_vmlsdavax)
18309 inst.instruction |= (et.size == 8) << 28;
18310 else
18311 inst.instruction |= (et.size == 8) << 8;
18312
18313 mve_encode_rqq (et.type == NT_unsigned, 64);
18314 inst.instruction |= (et.size == 32) << 16;
18315 }
18316
18317 static void
18318 do_mve_vmlaldav (void)
18319 {
18320 enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18321 struct neon_type_el et
18322 = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18323 N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18324
18325 if (et.type == NT_unsigned
18326 && (inst.instruction == M_MNEM_vmlsldav
18327 || inst.instruction == M_MNEM_vmlsldava
18328 || inst.instruction == M_MNEM_vmlsldavx
18329 || inst.instruction == M_MNEM_vmlsldavax))
18330 first_error (BAD_SIMD_TYPE);
18331
18332 if (inst.cond > COND_ALWAYS)
18333 inst.pred_insn_type = INSIDE_VPT_INSN;
18334 else
18335 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18336
18337 mve_encode_rrqq (et.type == NT_unsigned, et.size);
18338 }
18339
18340 static void
18341 do_mve_vrmlaldavh (void)
18342 {
18343 struct neon_type_el et;
18344 if (inst.instruction == M_MNEM_vrmlsldavh
18345 || inst.instruction == M_MNEM_vrmlsldavha
18346 || inst.instruction == M_MNEM_vrmlsldavhx
18347 || inst.instruction == M_MNEM_vrmlsldavhax)
18348 {
18349 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18350 if (inst.operands[1].reg == REG_SP)
18351 as_tsktsk (MVE_BAD_SP);
18352 }
18353 else
18354 {
18355 if (inst.instruction == M_MNEM_vrmlaldavhx
18356 || inst.instruction == M_MNEM_vrmlaldavhax)
18357 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18358 else
18359 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18360 N_U32 | N_S32 | N_KEY);
18361 /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18362 with vmax/min instructions, making the use of SP in assembly really
18363 nonsensical, so instead of issuing a warning like we do for other uses
18364 of SP for the odd register operand we error out. */
18365 constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18366 }
18367
18368 /* Make sure we still check the second operand is an odd one and that PC is
18369 disallowed. This because we are parsing for any GPR operand, to be able
18370 to distinguish between giving a warning or an error for SP as described
18371 above. */
18372 constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18373 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18374
18375 if (inst.cond > COND_ALWAYS)
18376 inst.pred_insn_type = INSIDE_VPT_INSN;
18377 else
18378 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18379
18380 mve_encode_rrqq (et.type == NT_unsigned, 0);
18381 }
18382
18383
18384 static void
18385 do_mve_vmaxnmv (void)
18386 {
18387 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18388 struct neon_type_el et
18389 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18390
18391 if (inst.cond > COND_ALWAYS)
18392 inst.pred_insn_type = INSIDE_VPT_INSN;
18393 else
18394 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18395
18396 if (inst.operands[0].reg == REG_SP)
18397 as_tsktsk (MVE_BAD_SP);
18398 else if (inst.operands[0].reg == REG_PC)
18399 as_tsktsk (MVE_BAD_PC);
18400
18401 mve_encode_rq (et.size == 16, 64);
18402 }
18403
18404 static void
18405 do_mve_vmaxv (void)
18406 {
18407 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18408 struct neon_type_el et;
18409
18410 if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18411 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18412 else
18413 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18414
18415 if (inst.cond > COND_ALWAYS)
18416 inst.pred_insn_type = INSIDE_VPT_INSN;
18417 else
18418 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18419
18420 if (inst.operands[0].reg == REG_SP)
18421 as_tsktsk (MVE_BAD_SP);
18422 else if (inst.operands[0].reg == REG_PC)
18423 as_tsktsk (MVE_BAD_PC);
18424
18425 mve_encode_rq (et.type == NT_unsigned, et.size);
18426 }
18427
18428
18429 static void
18430 do_neon_qrdmlah (void)
18431 {
18432 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18433 return;
18434 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18435 {
18436 /* Check we're on the correct architecture. */
18437 if (!mark_feature_used (&fpu_neon_ext_armv8))
18438 inst.error
18439 = _("instruction form not available on this architecture.");
18440 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18441 {
18442 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18443 record_feature_use (&fpu_neon_ext_v8_1);
18444 }
18445 if (inst.operands[2].isscalar)
18446 {
18447 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18448 struct neon_type_el et = neon_check_type (3, rs,
18449 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18450 NEON_ENCODE (SCALAR, inst);
18451 neon_mul_mac (et, neon_quad (rs));
18452 }
18453 else
18454 {
18455 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18456 struct neon_type_el et = neon_check_type (3, rs,
18457 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18458 NEON_ENCODE (INTEGER, inst);
18459 /* The U bit (rounding) comes from bit mask. */
18460 neon_three_same (neon_quad (rs), 0, et.size);
18461 }
18462 }
18463 else
18464 {
18465 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18466 struct neon_type_el et
18467 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18468
18469 NEON_ENCODE (INTEGER, inst);
18470 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18471 }
18472 }
18473
18474 static void
18475 do_neon_fcmp_absolute (void)
18476 {
18477 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18478 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18479 N_F_16_32 | N_KEY);
18480 /* Size field comes from bit mask. */
18481 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18482 }
18483
18484 static void
18485 do_neon_fcmp_absolute_inv (void)
18486 {
18487 neon_exchange_operands ();
18488 do_neon_fcmp_absolute ();
18489 }
18490
18491 static void
18492 do_neon_step (void)
18493 {
18494 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18495 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18496 N_F_16_32 | N_KEY);
18497 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18498 }
18499
18500 static void
18501 do_neon_abs_neg (void)
18502 {
18503 enum neon_shape rs;
18504 struct neon_type_el et;
18505
18506 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18507 return;
18508
18509 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18510 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18511
18512 if (!check_simd_pred_availability (et.type == NT_float,
18513 NEON_CHECK_ARCH | NEON_CHECK_CC))
18514 return;
18515
18516 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18517 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18518 inst.instruction |= LOW4 (inst.operands[1].reg);
18519 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18520 inst.instruction |= neon_quad (rs) << 6;
18521 inst.instruction |= (et.type == NT_float) << 10;
18522 inst.instruction |= neon_logbits (et.size) << 18;
18523
18524 neon_dp_fixup (&inst);
18525 }
18526
18527 static void
18528 do_neon_sli (void)
18529 {
18530 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18531 return;
18532
18533 enum neon_shape rs;
18534 struct neon_type_el et;
18535 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18536 {
18537 rs = neon_select_shape (NS_QQI, NS_NULL);
18538 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18539 }
18540 else
18541 {
18542 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18543 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18544 }
18545
18546
18547 int imm = inst.operands[2].imm;
18548 constraint (imm < 0 || (unsigned)imm >= et.size,
18549 _("immediate out of range for insert"));
18550 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18551 }
18552
18553 static void
18554 do_neon_sri (void)
18555 {
18556 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18557 return;
18558
18559 enum neon_shape rs;
18560 struct neon_type_el et;
18561 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18562 {
18563 rs = neon_select_shape (NS_QQI, NS_NULL);
18564 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18565 }
18566 else
18567 {
18568 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18569 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18570 }
18571
18572 int imm = inst.operands[2].imm;
18573 constraint (imm < 1 || (unsigned)imm > et.size,
18574 _("immediate out of range for insert"));
18575 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
18576 }
18577
18578 static void
18579 do_neon_qshlu_imm (void)
18580 {
18581 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18582 return;
18583
18584 enum neon_shape rs;
18585 struct neon_type_el et;
18586 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18587 {
18588 rs = neon_select_shape (NS_QQI, NS_NULL);
18589 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18590 }
18591 else
18592 {
18593 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18594 et = neon_check_type (2, rs, N_EQK | N_UNS,
18595 N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18596 }
18597
18598 int imm = inst.operands[2].imm;
18599 constraint (imm < 0 || (unsigned)imm >= et.size,
18600 _("immediate out of range for shift"));
18601 /* Only encodes the 'U present' variant of the instruction.
18602 In this case, signed types have OP (bit 8) set to 0.
18603 Unsigned types have OP set to 1. */
18604 inst.instruction |= (et.type == NT_unsigned) << 8;
18605 /* The rest of the bits are the same as other immediate shifts. */
18606 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18607 }
18608
18609 static void
18610 do_neon_qmovn (void)
18611 {
18612 struct neon_type_el et = neon_check_type (2, NS_DQ,
18613 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18614 /* Saturating move where operands can be signed or unsigned, and the
18615 destination has the same signedness. */
18616 NEON_ENCODE (INTEGER, inst);
18617 if (et.type == NT_unsigned)
18618 inst.instruction |= 0xc0;
18619 else
18620 inst.instruction |= 0x80;
18621 neon_two_same (0, 1, et.size / 2);
18622 }
18623
18624 static void
18625 do_neon_qmovun (void)
18626 {
18627 struct neon_type_el et = neon_check_type (2, NS_DQ,
18628 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18629 /* Saturating move with unsigned results. Operands must be signed. */
18630 NEON_ENCODE (INTEGER, inst);
18631 neon_two_same (0, 1, et.size / 2);
18632 }
18633
18634 static void
18635 do_neon_rshift_sat_narrow (void)
18636 {
18637 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18638 or unsigned. If operands are unsigned, results must also be unsigned. */
18639 struct neon_type_el et = neon_check_type (2, NS_DQI,
18640 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18641 int imm = inst.operands[2].imm;
18642 /* This gets the bounds check, size encoding and immediate bits calculation
18643 right. */
18644 et.size /= 2;
18645
18646 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18647 VQMOVN.I<size> <Dd>, <Qm>. */
18648 if (imm == 0)
18649 {
18650 inst.operands[2].present = 0;
18651 inst.instruction = N_MNEM_vqmovn;
18652 do_neon_qmovn ();
18653 return;
18654 }
18655
18656 constraint (imm < 1 || (unsigned)imm > et.size,
18657 _("immediate out of range"));
18658 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
18659 }
18660
18661 static void
18662 do_neon_rshift_sat_narrow_u (void)
18663 {
18664 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18665 or unsigned. If operands are unsigned, results must also be unsigned. */
18666 struct neon_type_el et = neon_check_type (2, NS_DQI,
18667 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18668 int imm = inst.operands[2].imm;
18669 /* This gets the bounds check, size encoding and immediate bits calculation
18670 right. */
18671 et.size /= 2;
18672
18673 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18674 VQMOVUN.I<size> <Dd>, <Qm>. */
18675 if (imm == 0)
18676 {
18677 inst.operands[2].present = 0;
18678 inst.instruction = N_MNEM_vqmovun;
18679 do_neon_qmovun ();
18680 return;
18681 }
18682
18683 constraint (imm < 1 || (unsigned)imm > et.size,
18684 _("immediate out of range"));
18685 /* FIXME: The manual is kind of unclear about what value U should have in
18686 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18687 must be 1. */
18688 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
18689 }
18690
18691 static void
18692 do_neon_movn (void)
18693 {
18694 struct neon_type_el et = neon_check_type (2, NS_DQ,
18695 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18696 NEON_ENCODE (INTEGER, inst);
18697 neon_two_same (0, 1, et.size / 2);
18698 }
18699
18700 static void
18701 do_neon_rshift_narrow (void)
18702 {
18703 struct neon_type_el et = neon_check_type (2, NS_DQI,
18704 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18705 int imm = inst.operands[2].imm;
18706 /* This gets the bounds check, size encoding and immediate bits calculation
18707 right. */
18708 et.size /= 2;
18709
18710 /* If immediate is zero then we are a pseudo-instruction for
18711 VMOVN.I<size> <Dd>, <Qm> */
18712 if (imm == 0)
18713 {
18714 inst.operands[2].present = 0;
18715 inst.instruction = N_MNEM_vmovn;
18716 do_neon_movn ();
18717 return;
18718 }
18719
18720 constraint (imm < 1 || (unsigned)imm > et.size,
18721 _("immediate out of range for narrowing operation"));
18722 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
18723 }
18724
18725 static void
18726 do_neon_shll (void)
18727 {
18728 /* FIXME: Type checking when lengthening. */
18729 struct neon_type_el et = neon_check_type (2, NS_QDI,
18730 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18731 unsigned imm = inst.operands[2].imm;
18732
18733 if (imm == et.size)
18734 {
18735 /* Maximum shift variant. */
18736 NEON_ENCODE (INTEGER, inst);
18737 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18738 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18739 inst.instruction |= LOW4 (inst.operands[1].reg);
18740 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18741 inst.instruction |= neon_logbits (et.size) << 18;
18742
18743 neon_dp_fixup (&inst);
18744 }
18745 else
18746 {
18747 /* A more-specific type check for non-max versions. */
18748 et = neon_check_type (2, NS_QDI,
18749 N_EQK | N_DBL, N_SU_32 | N_KEY);
18750 NEON_ENCODE (IMMED, inst);
18751 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
18752 }
18753 }
18754
18755 /* Check the various types for the VCVT instruction, and return which version
18756 the current instruction is. */
18757
18758 #define CVT_FLAVOUR_VAR \
18759 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
18760 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
18761 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
18762 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
18763 /* Half-precision conversions. */ \
18764 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18765 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18766 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
18767 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
18768 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
18769 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
18770 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
18771 Compared with single/double precision variants, only the co-processor \
18772 field is different, so the encoding flow is reused here. */ \
18773 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
18774 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
18775 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18776 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18777 CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg, NULL, NULL, NULL) \
18778 /* VFP instructions. */ \
18779 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
18780 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
18781 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18782 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18783 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
18784 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
18785 /* VFP instructions with bitshift. */ \
18786 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
18787 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
18788 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
18789 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
18790 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
18791 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
18792 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
18793 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
18794
18795 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18796 neon_cvt_flavour_##C,
18797
18798 /* The different types of conversions we can do. */
18799 enum neon_cvt_flavour
18800 {
18801 CVT_FLAVOUR_VAR
18802 neon_cvt_flavour_invalid,
18803 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18804 };
18805
18806 #undef CVT_VAR
18807
18808 static enum neon_cvt_flavour
18809 get_neon_cvt_flavour (enum neon_shape rs)
18810 {
18811 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
18812 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
18813 if (et.type != NT_invtype) \
18814 { \
18815 inst.error = NULL; \
18816 return (neon_cvt_flavour_##C); \
18817 }
18818
18819 struct neon_type_el et;
18820 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18821 || rs == NS_FF) ? N_VFP : 0;
18822 /* The instruction versions which take an immediate take one register
18823 argument, which is extended to the width of the full register. Thus the
18824 "source" and "destination" registers must have the same width. Hack that
18825 here by making the size equal to the key (wider, in this case) operand. */
18826 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18827
18828 CVT_FLAVOUR_VAR;
18829
18830 return neon_cvt_flavour_invalid;
18831 #undef CVT_VAR
18832 }
18833
18834 enum neon_cvt_mode
18835 {
18836 neon_cvt_mode_a,
18837 neon_cvt_mode_n,
18838 neon_cvt_mode_p,
18839 neon_cvt_mode_m,
18840 neon_cvt_mode_z,
18841 neon_cvt_mode_x,
18842 neon_cvt_mode_r
18843 };
18844
18845 /* Neon-syntax VFP conversions. */
18846
18847 static void
18848 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18849 {
18850 const char *opname = 0;
18851
18852 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18853 || rs == NS_FHI || rs == NS_HFI)
18854 {
18855 /* Conversions with immediate bitshift. */
18856 const char *enc[] =
18857 {
18858 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18859 CVT_FLAVOUR_VAR
18860 NULL
18861 #undef CVT_VAR
18862 };
18863
18864 if (flavour < (int) ARRAY_SIZE (enc))
18865 {
18866 opname = enc[flavour];
18867 constraint (inst.operands[0].reg != inst.operands[1].reg,
18868 _("operands 0 and 1 must be the same register"));
18869 inst.operands[1] = inst.operands[2];
18870 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18871 }
18872 }
18873 else
18874 {
18875 /* Conversions without bitshift. */
18876 const char *enc[] =
18877 {
18878 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18879 CVT_FLAVOUR_VAR
18880 NULL
18881 #undef CVT_VAR
18882 };
18883
18884 if (flavour < (int) ARRAY_SIZE (enc))
18885 opname = enc[flavour];
18886 }
18887
18888 if (opname)
18889 do_vfp_nsyn_opcode (opname);
18890
18891 /* ARMv8.2 fp16 VCVT instruction. */
18892 if (flavour == neon_cvt_flavour_s32_f16
18893 || flavour == neon_cvt_flavour_u32_f16
18894 || flavour == neon_cvt_flavour_f16_u32
18895 || flavour == neon_cvt_flavour_f16_s32)
18896 do_scalar_fp16_v82_encode ();
18897 }
18898
18899 static void
18900 do_vfp_nsyn_cvtz (void)
18901 {
18902 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18903 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18904 const char *enc[] =
18905 {
18906 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18907 CVT_FLAVOUR_VAR
18908 NULL
18909 #undef CVT_VAR
18910 };
18911
18912 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18913 do_vfp_nsyn_opcode (enc[flavour]);
18914 }
18915
18916 static void
18917 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18918 enum neon_cvt_mode mode)
18919 {
18920 int sz, op;
18921 int rm;
18922
18923 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18924 D register operands. */
18925 if (flavour == neon_cvt_flavour_s32_f64
18926 || flavour == neon_cvt_flavour_u32_f64)
18927 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18928 _(BAD_FPU));
18929
18930 if (flavour == neon_cvt_flavour_s32_f16
18931 || flavour == neon_cvt_flavour_u32_f16)
18932 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18933 _(BAD_FP16));
18934
18935 set_pred_insn_type (OUTSIDE_PRED_INSN);
18936
18937 switch (flavour)
18938 {
18939 case neon_cvt_flavour_s32_f64:
18940 sz = 1;
18941 op = 1;
18942 break;
18943 case neon_cvt_flavour_s32_f32:
18944 sz = 0;
18945 op = 1;
18946 break;
18947 case neon_cvt_flavour_s32_f16:
18948 sz = 0;
18949 op = 1;
18950 break;
18951 case neon_cvt_flavour_u32_f64:
18952 sz = 1;
18953 op = 0;
18954 break;
18955 case neon_cvt_flavour_u32_f32:
18956 sz = 0;
18957 op = 0;
18958 break;
18959 case neon_cvt_flavour_u32_f16:
18960 sz = 0;
18961 op = 0;
18962 break;
18963 default:
18964 first_error (_("invalid instruction shape"));
18965 return;
18966 }
18967
18968 switch (mode)
18969 {
18970 case neon_cvt_mode_a: rm = 0; break;
18971 case neon_cvt_mode_n: rm = 1; break;
18972 case neon_cvt_mode_p: rm = 2; break;
18973 case neon_cvt_mode_m: rm = 3; break;
18974 default: first_error (_("invalid rounding mode")); return;
18975 }
18976
18977 NEON_ENCODE (FPV8, inst);
18978 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
18979 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
18980 inst.instruction |= sz << 8;
18981
18982 /* ARMv8.2 fp16 VCVT instruction. */
18983 if (flavour == neon_cvt_flavour_s32_f16
18984 ||flavour == neon_cvt_flavour_u32_f16)
18985 do_scalar_fp16_v82_encode ();
18986 inst.instruction |= op << 7;
18987 inst.instruction |= rm << 16;
18988 inst.instruction |= 0xf0000000;
18989 inst.is_neon = TRUE;
18990 }
18991
18992 static void
18993 do_neon_cvt_1 (enum neon_cvt_mode mode)
18994 {
18995 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
18996 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
18997 NS_FH, NS_HF, NS_FHI, NS_HFI,
18998 NS_NULL);
18999 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19000
19001 if (flavour == neon_cvt_flavour_invalid)
19002 return;
19003
19004 /* PR11109: Handle round-to-zero for VCVT conversions. */
19005 if (mode == neon_cvt_mode_z
19006 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19007 && (flavour == neon_cvt_flavour_s16_f16
19008 || flavour == neon_cvt_flavour_u16_f16
19009 || flavour == neon_cvt_flavour_s32_f32
19010 || flavour == neon_cvt_flavour_u32_f32
19011 || flavour == neon_cvt_flavour_s32_f64
19012 || flavour == neon_cvt_flavour_u32_f64)
19013 && (rs == NS_FD || rs == NS_FF))
19014 {
19015 do_vfp_nsyn_cvtz ();
19016 return;
19017 }
19018
19019 /* ARMv8.2 fp16 VCVT conversions. */
19020 if (mode == neon_cvt_mode_z
19021 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19022 && (flavour == neon_cvt_flavour_s32_f16
19023 || flavour == neon_cvt_flavour_u32_f16)
19024 && (rs == NS_FH))
19025 {
19026 do_vfp_nsyn_cvtz ();
19027 do_scalar_fp16_v82_encode ();
19028 return;
19029 }
19030
19031 /* VFP rather than Neon conversions. */
19032 if (flavour >= neon_cvt_flavour_first_fp)
19033 {
19034 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19035 do_vfp_nsyn_cvt (rs, flavour);
19036 else
19037 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19038
19039 return;
19040 }
19041
19042 switch (rs)
19043 {
19044 case NS_QQI:
19045 if (mode == neon_cvt_mode_z
19046 && (flavour == neon_cvt_flavour_f16_s16
19047 || flavour == neon_cvt_flavour_f16_u16
19048 || flavour == neon_cvt_flavour_s16_f16
19049 || flavour == neon_cvt_flavour_u16_f16
19050 || flavour == neon_cvt_flavour_f32_u32
19051 || flavour == neon_cvt_flavour_f32_s32
19052 || flavour == neon_cvt_flavour_s32_f32
19053 || flavour == neon_cvt_flavour_u32_f32))
19054 {
19055 if (!check_simd_pred_availability (TRUE,
19056 NEON_CHECK_CC | NEON_CHECK_ARCH))
19057 return;
19058 }
19059 else if (mode == neon_cvt_mode_n)
19060 {
19061 /* We are dealing with vcvt with the 'ne' condition. */
19062 inst.cond = 0x1;
19063 inst.instruction = N_MNEM_vcvt;
19064 do_neon_cvt_1 (neon_cvt_mode_z);
19065 return;
19066 }
19067 /* fall through. */
19068 case NS_DDI:
19069 {
19070 unsigned immbits;
19071 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19072 0x0000100, 0x1000100, 0x0, 0x1000000};
19073
19074 if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19075 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19076 return;
19077
19078 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19079 {
19080 constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19081 _("immediate value out of range"));
19082 switch (flavour)
19083 {
19084 case neon_cvt_flavour_f16_s16:
19085 case neon_cvt_flavour_f16_u16:
19086 case neon_cvt_flavour_s16_f16:
19087 case neon_cvt_flavour_u16_f16:
19088 constraint (inst.operands[2].imm > 16,
19089 _("immediate value out of range"));
19090 break;
19091 case neon_cvt_flavour_f32_u32:
19092 case neon_cvt_flavour_f32_s32:
19093 case neon_cvt_flavour_s32_f32:
19094 case neon_cvt_flavour_u32_f32:
19095 constraint (inst.operands[2].imm > 32,
19096 _("immediate value out of range"));
19097 break;
19098 default:
19099 inst.error = BAD_FPU;
19100 return;
19101 }
19102 }
19103
19104 /* Fixed-point conversion with #0 immediate is encoded as an
19105 integer conversion. */
19106 if (inst.operands[2].present && inst.operands[2].imm == 0)
19107 goto int_encode;
19108 NEON_ENCODE (IMMED, inst);
19109 if (flavour != neon_cvt_flavour_invalid)
19110 inst.instruction |= enctab[flavour];
19111 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19112 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19113 inst.instruction |= LOW4 (inst.operands[1].reg);
19114 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19115 inst.instruction |= neon_quad (rs) << 6;
19116 inst.instruction |= 1 << 21;
19117 if (flavour < neon_cvt_flavour_s16_f16)
19118 {
19119 inst.instruction |= 1 << 21;
19120 immbits = 32 - inst.operands[2].imm;
19121 inst.instruction |= immbits << 16;
19122 }
19123 else
19124 {
19125 inst.instruction |= 3 << 20;
19126 immbits = 16 - inst.operands[2].imm;
19127 inst.instruction |= immbits << 16;
19128 inst.instruction &= ~(1 << 9);
19129 }
19130
19131 neon_dp_fixup (&inst);
19132 }
19133 break;
19134
19135 case NS_QQ:
19136 if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19137 || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19138 && (flavour == neon_cvt_flavour_s16_f16
19139 || flavour == neon_cvt_flavour_u16_f16
19140 || flavour == neon_cvt_flavour_s32_f32
19141 || flavour == neon_cvt_flavour_u32_f32))
19142 {
19143 if (!check_simd_pred_availability (TRUE,
19144 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19145 return;
19146 }
19147 else if (mode == neon_cvt_mode_z
19148 && (flavour == neon_cvt_flavour_f16_s16
19149 || flavour == neon_cvt_flavour_f16_u16
19150 || flavour == neon_cvt_flavour_s16_f16
19151 || flavour == neon_cvt_flavour_u16_f16
19152 || flavour == neon_cvt_flavour_f32_u32
19153 || flavour == neon_cvt_flavour_f32_s32
19154 || flavour == neon_cvt_flavour_s32_f32
19155 || flavour == neon_cvt_flavour_u32_f32))
19156 {
19157 if (!check_simd_pred_availability (TRUE,
19158 NEON_CHECK_CC | NEON_CHECK_ARCH))
19159 return;
19160 }
19161 /* fall through. */
19162 case NS_DD:
19163 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19164 {
19165
19166 NEON_ENCODE (FLOAT, inst);
19167 if (!check_simd_pred_availability (TRUE,
19168 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19169 return;
19170
19171 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19172 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19173 inst.instruction |= LOW4 (inst.operands[1].reg);
19174 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19175 inst.instruction |= neon_quad (rs) << 6;
19176 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19177 || flavour == neon_cvt_flavour_u32_f32) << 7;
19178 inst.instruction |= mode << 8;
19179 if (flavour == neon_cvt_flavour_u16_f16
19180 || flavour == neon_cvt_flavour_s16_f16)
19181 /* Mask off the original size bits and reencode them. */
19182 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19183
19184 if (thumb_mode)
19185 inst.instruction |= 0xfc000000;
19186 else
19187 inst.instruction |= 0xf0000000;
19188 }
19189 else
19190 {
19191 int_encode:
19192 {
19193 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19194 0x100, 0x180, 0x0, 0x080};
19195
19196 NEON_ENCODE (INTEGER, inst);
19197
19198 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19199 {
19200 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19201 return;
19202 }
19203
19204 if (flavour != neon_cvt_flavour_invalid)
19205 inst.instruction |= enctab[flavour];
19206
19207 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19208 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19209 inst.instruction |= LOW4 (inst.operands[1].reg);
19210 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19211 inst.instruction |= neon_quad (rs) << 6;
19212 if (flavour >= neon_cvt_flavour_s16_f16
19213 && flavour <= neon_cvt_flavour_f16_u16)
19214 /* Half precision. */
19215 inst.instruction |= 1 << 18;
19216 else
19217 inst.instruction |= 2 << 18;
19218
19219 neon_dp_fixup (&inst);
19220 }
19221 }
19222 break;
19223
19224 /* Half-precision conversions for Advanced SIMD -- neon. */
19225 case NS_QD:
19226 case NS_DQ:
19227 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19228 return;
19229
19230 if ((rs == NS_DQ)
19231 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19232 {
19233 as_bad (_("operand size must match register width"));
19234 break;
19235 }
19236
19237 if ((rs == NS_QD)
19238 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19239 {
19240 as_bad (_("operand size must match register width"));
19241 break;
19242 }
19243
19244 if (rs == NS_DQ)
19245 {
19246 if (flavour == neon_cvt_flavour_bf16_f32)
19247 {
19248 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19249 return;
19250 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19251 /* VCVT.bf16.f32. */
19252 inst.instruction = 0x11b60640;
19253 }
19254 else
19255 /* VCVT.f16.f32. */
19256 inst.instruction = 0x3b60600;
19257 }
19258 else
19259 /* VCVT.f32.f16. */
19260 inst.instruction = 0x3b60700;
19261
19262 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19263 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19264 inst.instruction |= LOW4 (inst.operands[1].reg);
19265 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19266 neon_dp_fixup (&inst);
19267 break;
19268
19269 default:
19270 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
19271 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19272 do_vfp_nsyn_cvt (rs, flavour);
19273 else
19274 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19275 }
19276 }
19277
19278 static void
19279 do_neon_cvtr (void)
19280 {
19281 do_neon_cvt_1 (neon_cvt_mode_x);
19282 }
19283
19284 static void
19285 do_neon_cvt (void)
19286 {
19287 do_neon_cvt_1 (neon_cvt_mode_z);
19288 }
19289
19290 static void
19291 do_neon_cvta (void)
19292 {
19293 do_neon_cvt_1 (neon_cvt_mode_a);
19294 }
19295
19296 static void
19297 do_neon_cvtn (void)
19298 {
19299 do_neon_cvt_1 (neon_cvt_mode_n);
19300 }
19301
19302 static void
19303 do_neon_cvtp (void)
19304 {
19305 do_neon_cvt_1 (neon_cvt_mode_p);
19306 }
19307
19308 static void
19309 do_neon_cvtm (void)
19310 {
19311 do_neon_cvt_1 (neon_cvt_mode_m);
19312 }
19313
19314 static void
19315 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
19316 {
19317 if (is_double)
19318 mark_feature_used (&fpu_vfp_ext_armv8);
19319
19320 encode_arm_vfp_reg (inst.operands[0].reg,
19321 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19322 encode_arm_vfp_reg (inst.operands[1].reg,
19323 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19324 inst.instruction |= to ? 0x10000 : 0;
19325 inst.instruction |= t ? 0x80 : 0;
19326 inst.instruction |= is_double ? 0x100 : 0;
19327 do_vfp_cond_or_thumb ();
19328 }
19329
19330 static void
19331 do_neon_cvttb_1 (bfd_boolean t)
19332 {
19333 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19334 NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19335
19336 if (rs == NS_NULL)
19337 return;
19338 else if (rs == NS_QQ || rs == NS_QQI)
19339 {
19340 int single_to_half = 0;
19341 if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
19342 return;
19343
19344 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19345
19346 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19347 && (flavour == neon_cvt_flavour_u16_f16
19348 || flavour == neon_cvt_flavour_s16_f16
19349 || flavour == neon_cvt_flavour_f16_s16
19350 || flavour == neon_cvt_flavour_f16_u16
19351 || flavour == neon_cvt_flavour_u32_f32
19352 || flavour == neon_cvt_flavour_s32_f32
19353 || flavour == neon_cvt_flavour_f32_s32
19354 || flavour == neon_cvt_flavour_f32_u32))
19355 {
19356 inst.cond = 0xf;
19357 inst.instruction = N_MNEM_vcvt;
19358 set_pred_insn_type (INSIDE_VPT_INSN);
19359 do_neon_cvt_1 (neon_cvt_mode_z);
19360 return;
19361 }
19362 else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19363 single_to_half = 1;
19364 else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19365 {
19366 first_error (BAD_FPU);
19367 return;
19368 }
19369
19370 inst.instruction = 0xee3f0e01;
19371 inst.instruction |= single_to_half << 28;
19372 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19373 inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19374 inst.instruction |= t << 12;
19375 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19376 inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19377 inst.is_neon = 1;
19378 }
19379 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19380 {
19381 inst.error = NULL;
19382 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19383 }
19384 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19385 {
19386 inst.error = NULL;
19387 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
19388 }
19389 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19390 {
19391 /* The VCVTB and VCVTT instructions with D-register operands
19392 don't work for SP only targets. */
19393 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19394 _(BAD_FPU));
19395
19396 inst.error = NULL;
19397 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
19398 }
19399 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19400 {
19401 /* The VCVTB and VCVTT instructions with D-register operands
19402 don't work for SP only targets. */
19403 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19404 _(BAD_FPU));
19405
19406 inst.error = NULL;
19407 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
19408 }
19409 else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19410 {
19411 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19412 inst.error = NULL;
19413 inst.instruction |= (1 << 8);
19414 inst.instruction &= ~(1 << 9);
19415 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19416 }
19417 else
19418 return;
19419 }
19420
19421 static void
19422 do_neon_cvtb (void)
19423 {
19424 do_neon_cvttb_1 (FALSE);
19425 }
19426
19427
19428 static void
19429 do_neon_cvtt (void)
19430 {
19431 do_neon_cvttb_1 (TRUE);
19432 }
19433
19434 static void
19435 neon_move_immediate (void)
19436 {
19437 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19438 struct neon_type_el et = neon_check_type (2, rs,
19439 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19440 unsigned immlo, immhi = 0, immbits;
19441 int op, cmode, float_p;
19442
19443 constraint (et.type == NT_invtype,
19444 _("operand size must be specified for immediate VMOV"));
19445
19446 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
19447 op = (inst.instruction & (1 << 5)) != 0;
19448
19449 immlo = inst.operands[1].imm;
19450 if (inst.operands[1].regisimm)
19451 immhi = inst.operands[1].reg;
19452
19453 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19454 _("immediate has bits set outside the operand size"));
19455
19456 float_p = inst.operands[1].immisfloat;
19457
19458 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19459 et.size, et.type)) == FAIL)
19460 {
19461 /* Invert relevant bits only. */
19462 neon_invert_size (&immlo, &immhi, et.size);
19463 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19464 with one or the other; those cases are caught by
19465 neon_cmode_for_move_imm. */
19466 op = !op;
19467 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19468 &op, et.size, et.type)) == FAIL)
19469 {
19470 first_error (_("immediate out of range"));
19471 return;
19472 }
19473 }
19474
19475 inst.instruction &= ~(1 << 5);
19476 inst.instruction |= op << 5;
19477
19478 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19479 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19480 inst.instruction |= neon_quad (rs) << 6;
19481 inst.instruction |= cmode << 8;
19482
19483 neon_write_immbits (immbits);
19484 }
19485
19486 static void
19487 do_neon_mvn (void)
19488 {
19489 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
19490 return;
19491
19492 if (inst.operands[1].isreg)
19493 {
19494 enum neon_shape rs;
19495 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19496 rs = neon_select_shape (NS_QQ, NS_NULL);
19497 else
19498 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19499
19500 NEON_ENCODE (INTEGER, inst);
19501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19502 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19503 inst.instruction |= LOW4 (inst.operands[1].reg);
19504 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19505 inst.instruction |= neon_quad (rs) << 6;
19506 }
19507 else
19508 {
19509 NEON_ENCODE (IMMED, inst);
19510 neon_move_immediate ();
19511 }
19512
19513 neon_dp_fixup (&inst);
19514
19515 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19516 {
19517 constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19518 }
19519 }
19520
19521 /* Encode instructions of form:
19522
19523 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
19524 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
19525
19526 static void
19527 neon_mixed_length (struct neon_type_el et, unsigned size)
19528 {
19529 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19530 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19531 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19532 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19533 inst.instruction |= LOW4 (inst.operands[2].reg);
19534 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19535 inst.instruction |= (et.type == NT_unsigned) << 24;
19536 inst.instruction |= neon_logbits (size) << 20;
19537
19538 neon_dp_fixup (&inst);
19539 }
19540
19541 static void
19542 do_neon_dyadic_long (void)
19543 {
19544 enum neon_shape rs = neon_select_shape (NS_QDD, NS_QQQ, NS_QQR, NS_NULL);
19545 if (rs == NS_QDD)
19546 {
19547 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19548 return;
19549
19550 NEON_ENCODE (INTEGER, inst);
19551 /* FIXME: Type checking for lengthening op. */
19552 struct neon_type_el et = neon_check_type (3, NS_QDD,
19553 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19554 neon_mixed_length (et, et.size);
19555 }
19556 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19557 && (inst.cond == 0xf || inst.cond == 0x10))
19558 {
19559 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19560 in an IT block with le/lt conditions. */
19561
19562 if (inst.cond == 0xf)
19563 inst.cond = 0xb;
19564 else if (inst.cond == 0x10)
19565 inst.cond = 0xd;
19566
19567 inst.pred_insn_type = INSIDE_IT_INSN;
19568
19569 if (inst.instruction == N_MNEM_vaddl)
19570 {
19571 inst.instruction = N_MNEM_vadd;
19572 do_neon_addsub_if_i ();
19573 }
19574 else if (inst.instruction == N_MNEM_vsubl)
19575 {
19576 inst.instruction = N_MNEM_vsub;
19577 do_neon_addsub_if_i ();
19578 }
19579 else if (inst.instruction == N_MNEM_vabdl)
19580 {
19581 inst.instruction = N_MNEM_vabd;
19582 do_neon_dyadic_if_su ();
19583 }
19584 }
19585 else
19586 first_error (BAD_FPU);
19587 }
19588
19589 static void
19590 do_neon_abal (void)
19591 {
19592 struct neon_type_el et = neon_check_type (3, NS_QDD,
19593 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19594 neon_mixed_length (et, et.size);
19595 }
19596
19597 static void
19598 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19599 {
19600 if (inst.operands[2].isscalar)
19601 {
19602 struct neon_type_el et = neon_check_type (3, NS_QDS,
19603 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19604 NEON_ENCODE (SCALAR, inst);
19605 neon_mul_mac (et, et.type == NT_unsigned);
19606 }
19607 else
19608 {
19609 struct neon_type_el et = neon_check_type (3, NS_QDD,
19610 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19611 NEON_ENCODE (INTEGER, inst);
19612 neon_mixed_length (et, et.size);
19613 }
19614 }
19615
19616 static void
19617 do_neon_mac_maybe_scalar_long (void)
19618 {
19619 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19620 }
19621
19622 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19623 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
19624
19625 static unsigned
19626 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19627 {
19628 unsigned regno = NEON_SCALAR_REG (scalar);
19629 unsigned elno = NEON_SCALAR_INDEX (scalar);
19630
19631 if (quad_p)
19632 {
19633 if (regno > 7 || elno > 3)
19634 goto bad_scalar;
19635
19636 return ((regno & 0x7)
19637 | ((elno & 0x1) << 3)
19638 | (((elno >> 1) & 0x1) << 5));
19639 }
19640 else
19641 {
19642 if (regno > 15 || elno > 1)
19643 goto bad_scalar;
19644
19645 return (((regno & 0x1) << 5)
19646 | ((regno >> 1) & 0x7)
19647 | ((elno & 0x1) << 3));
19648 }
19649
19650 bad_scalar:
19651 first_error (_("scalar out of range for multiply instruction"));
19652 return 0;
19653 }
19654
19655 static void
19656 do_neon_fmac_maybe_scalar_long (int subtype)
19657 {
19658 enum neon_shape rs;
19659 int high8;
19660 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
19661 field (bits[21:20]) has different meaning. For scalar index variant, it's
19662 used to differentiate add and subtract, otherwise it's with fixed value
19663 0x2. */
19664 int size = -1;
19665
19666 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19667 be a scalar index register. */
19668 if (inst.operands[2].isscalar)
19669 {
19670 high8 = 0xfe000000;
19671 if (subtype)
19672 size = 16;
19673 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19674 }
19675 else
19676 {
19677 high8 = 0xfc000000;
19678 size = 32;
19679 if (subtype)
19680 inst.instruction |= (0x1 << 23);
19681 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19682 }
19683
19684
19685 if (inst.cond != COND_ALWAYS)
19686 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19687 "behaviour is UNPREDICTABLE"));
19688
19689 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19690 _(BAD_FP16));
19691
19692 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19693 _(BAD_FPU));
19694
19695 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
19696 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19697 so we simply pass -1 as size. */
19698 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19699 neon_three_same (quad_p, 0, size);
19700
19701 /* Undo neon_dp_fixup. Redo the high eight bits. */
19702 inst.instruction &= 0x00ffffff;
19703 inst.instruction |= high8;
19704
19705 #define LOW1(R) ((R) & 0x1)
19706 #define HI4(R) (((R) >> 1) & 0xf)
19707 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19708 whether the instruction is in Q form and whether Vm is a scalar indexed
19709 operand. */
19710 if (inst.operands[2].isscalar)
19711 {
19712 unsigned rm
19713 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19714 inst.instruction &= 0xffffffd0;
19715 inst.instruction |= rm;
19716
19717 if (!quad_p)
19718 {
19719 /* Redo Rn as well. */
19720 inst.instruction &= 0xfff0ff7f;
19721 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19722 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19723 }
19724 }
19725 else if (!quad_p)
19726 {
19727 /* Redo Rn and Rm. */
19728 inst.instruction &= 0xfff0ff50;
19729 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19730 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19731 inst.instruction |= HI4 (inst.operands[2].reg);
19732 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19733 }
19734 }
19735
19736 static void
19737 do_neon_vfmal (void)
19738 {
19739 return do_neon_fmac_maybe_scalar_long (0);
19740 }
19741
19742 static void
19743 do_neon_vfmsl (void)
19744 {
19745 return do_neon_fmac_maybe_scalar_long (1);
19746 }
19747
19748 static void
19749 do_neon_dyadic_wide (void)
19750 {
19751 struct neon_type_el et = neon_check_type (3, NS_QQD,
19752 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19753 neon_mixed_length (et, et.size);
19754 }
19755
19756 static void
19757 do_neon_dyadic_narrow (void)
19758 {
19759 struct neon_type_el et = neon_check_type (3, NS_QDD,
19760 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19761 /* Operand sign is unimportant, and the U bit is part of the opcode,
19762 so force the operand type to integer. */
19763 et.type = NT_integer;
19764 neon_mixed_length (et, et.size / 2);
19765 }
19766
19767 static void
19768 do_neon_mul_sat_scalar_long (void)
19769 {
19770 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19771 }
19772
19773 static void
19774 do_neon_vmull (void)
19775 {
19776 if (inst.operands[2].isscalar)
19777 do_neon_mac_maybe_scalar_long ();
19778 else
19779 {
19780 struct neon_type_el et = neon_check_type (3, NS_QDD,
19781 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19782
19783 if (et.type == NT_poly)
19784 NEON_ENCODE (POLY, inst);
19785 else
19786 NEON_ENCODE (INTEGER, inst);
19787
19788 /* For polynomial encoding the U bit must be zero, and the size must
19789 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19790 obviously, as 0b10). */
19791 if (et.size == 64)
19792 {
19793 /* Check we're on the correct architecture. */
19794 if (!mark_feature_used (&fpu_crypto_ext_armv8))
19795 inst.error =
19796 _("Instruction form not available on this architecture.");
19797
19798 et.size = 32;
19799 }
19800
19801 neon_mixed_length (et, et.size);
19802 }
19803 }
19804
19805 static void
19806 do_neon_ext (void)
19807 {
19808 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19809 struct neon_type_el et = neon_check_type (3, rs,
19810 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19811 unsigned imm = (inst.operands[3].imm * et.size) / 8;
19812
19813 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19814 _("shift out of range"));
19815 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19816 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19817 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19818 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19819 inst.instruction |= LOW4 (inst.operands[2].reg);
19820 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19821 inst.instruction |= neon_quad (rs) << 6;
19822 inst.instruction |= imm << 8;
19823
19824 neon_dp_fixup (&inst);
19825 }
19826
19827 static void
19828 do_neon_rev (void)
19829 {
19830 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
19831 return;
19832
19833 enum neon_shape rs;
19834 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19835 rs = neon_select_shape (NS_QQ, NS_NULL);
19836 else
19837 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19838
19839 struct neon_type_el et = neon_check_type (2, rs,
19840 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19841
19842 unsigned op = (inst.instruction >> 7) & 3;
19843 /* N (width of reversed regions) is encoded as part of the bitmask. We
19844 extract it here to check the elements to be reversed are smaller.
19845 Otherwise we'd get a reserved instruction. */
19846 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19847
19848 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19849 && inst.operands[0].reg == inst.operands[1].reg)
19850 as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19851 " operands makes instruction UNPREDICTABLE"));
19852
19853 gas_assert (elsize != 0);
19854 constraint (et.size >= elsize,
19855 _("elements must be smaller than reversal region"));
19856 neon_two_same (neon_quad (rs), 1, et.size);
19857 }
19858
19859 static void
19860 do_neon_dup (void)
19861 {
19862 if (inst.operands[1].isscalar)
19863 {
19864 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19865 BAD_FPU);
19866 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19867 struct neon_type_el et = neon_check_type (2, rs,
19868 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19869 unsigned sizebits = et.size >> 3;
19870 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19871 int logsize = neon_logbits (et.size);
19872 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19873
19874 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19875 return;
19876
19877 NEON_ENCODE (SCALAR, inst);
19878 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19879 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19880 inst.instruction |= LOW4 (dm);
19881 inst.instruction |= HI1 (dm) << 5;
19882 inst.instruction |= neon_quad (rs) << 6;
19883 inst.instruction |= x << 17;
19884 inst.instruction |= sizebits << 16;
19885
19886 neon_dp_fixup (&inst);
19887 }
19888 else
19889 {
19890 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19891 struct neon_type_el et = neon_check_type (2, rs,
19892 N_8 | N_16 | N_32 | N_KEY, N_EQK);
19893 if (rs == NS_QR)
19894 {
19895 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
19896 return;
19897 }
19898 else
19899 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19900 BAD_FPU);
19901
19902 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19903 {
19904 if (inst.operands[1].reg == REG_SP)
19905 as_tsktsk (MVE_BAD_SP);
19906 else if (inst.operands[1].reg == REG_PC)
19907 as_tsktsk (MVE_BAD_PC);
19908 }
19909
19910 /* Duplicate ARM register to lanes of vector. */
19911 NEON_ENCODE (ARMREG, inst);
19912 switch (et.size)
19913 {
19914 case 8: inst.instruction |= 0x400000; break;
19915 case 16: inst.instruction |= 0x000020; break;
19916 case 32: inst.instruction |= 0x000000; break;
19917 default: break;
19918 }
19919 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19920 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19921 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19922 inst.instruction |= neon_quad (rs) << 21;
19923 /* The encoding for this instruction is identical for the ARM and Thumb
19924 variants, except for the condition field. */
19925 do_vfp_cond_or_thumb ();
19926 }
19927 }
19928
19929 static void
19930 do_mve_mov (int toQ)
19931 {
19932 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19933 return;
19934 if (inst.cond > COND_ALWAYS)
19935 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
19936
19937 unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
19938 if (toQ)
19939 {
19940 Q0 = 0;
19941 Q1 = 1;
19942 Rt = 2;
19943 Rt2 = 3;
19944 }
19945
19946 constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
19947 _("Index one must be [2,3] and index two must be two less than"
19948 " index one."));
19949 constraint (inst.operands[Rt].reg == inst.operands[Rt2].reg,
19950 _("General purpose registers may not be the same"));
19951 constraint (inst.operands[Rt].reg == REG_SP
19952 || inst.operands[Rt2].reg == REG_SP,
19953 BAD_SP);
19954 constraint (inst.operands[Rt].reg == REG_PC
19955 || inst.operands[Rt2].reg == REG_PC,
19956 BAD_PC);
19957
19958 inst.instruction = 0xec000f00;
19959 inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
19960 inst.instruction |= !!toQ << 20;
19961 inst.instruction |= inst.operands[Rt2].reg << 16;
19962 inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
19963 inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
19964 inst.instruction |= inst.operands[Rt].reg;
19965 }
19966
19967 static void
19968 do_mve_movn (void)
19969 {
19970 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19971 return;
19972
19973 if (inst.cond > COND_ALWAYS)
19974 inst.pred_insn_type = INSIDE_VPT_INSN;
19975 else
19976 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
19977
19978 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
19979 | N_KEY);
19980
19981 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19982 inst.instruction |= (neon_logbits (et.size) - 1) << 18;
19983 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19984 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19985 inst.instruction |= LOW4 (inst.operands[1].reg);
19986 inst.is_neon = 1;
19987
19988 }
19989
19990 /* VMOV has particularly many variations. It can be one of:
19991 0. VMOV<c><q> <Qd>, <Qm>
19992 1. VMOV<c><q> <Dd>, <Dm>
19993 (Register operations, which are VORR with Rm = Rn.)
19994 2. VMOV<c><q>.<dt> <Qd>, #<imm>
19995 3. VMOV<c><q>.<dt> <Dd>, #<imm>
19996 (Immediate loads.)
19997 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
19998 (ARM register to scalar.)
19999 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20000 (Two ARM registers to vector.)
20001 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20002 (Scalar to ARM register.)
20003 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20004 (Vector to two ARM registers.)
20005 8. VMOV.F32 <Sd>, <Sm>
20006 9. VMOV.F64 <Dd>, <Dm>
20007 (VFP register moves.)
20008 10. VMOV.F32 <Sd>, #imm
20009 11. VMOV.F64 <Dd>, #imm
20010 (VFP float immediate load.)
20011 12. VMOV <Rd>, <Sm>
20012 (VFP single to ARM reg.)
20013 13. VMOV <Sd>, <Rm>
20014 (ARM reg to VFP single.)
20015 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20016 (Two ARM regs to two VFP singles.)
20017 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20018 (Two VFP singles to two ARM regs.)
20019 16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20020 17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20021 18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20022 19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20023
20024 These cases can be disambiguated using neon_select_shape, except cases 1/9
20025 and 3/11 which depend on the operand type too.
20026
20027 All the encoded bits are hardcoded by this function.
20028
20029 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20030 Cases 5, 7 may be used with VFPv2 and above.
20031
20032 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20033 can specify a type where it doesn't make sense to, and is ignored). */
20034
20035 static void
20036 do_neon_mov (void)
20037 {
20038 enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20039 NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20040 NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20041 NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20042 NS_NULL);
20043 struct neon_type_el et;
20044 const char *ldconst = 0;
20045
20046 switch (rs)
20047 {
20048 case NS_DD: /* case 1/9. */
20049 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20050 /* It is not an error here if no type is given. */
20051 inst.error = NULL;
20052
20053 /* In MVE we interpret the following instructions as same, so ignoring
20054 the following type (float) and size (64) checks.
20055 a: VMOV<c><q> <Dd>, <Dm>
20056 b: VMOV<c><q>.F64 <Dd>, <Dm>. */
20057 if ((et.type == NT_float && et.size == 64)
20058 || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20059 {
20060 do_vfp_nsyn_opcode ("fcpyd");
20061 break;
20062 }
20063 /* fall through. */
20064
20065 case NS_QQ: /* case 0/1. */
20066 {
20067 if (!check_simd_pred_availability (FALSE,
20068 NEON_CHECK_CC | NEON_CHECK_ARCH))
20069 return;
20070 /* The architecture manual I have doesn't explicitly state which
20071 value the U bit should have for register->register moves, but
20072 the equivalent VORR instruction has U = 0, so do that. */
20073 inst.instruction = 0x0200110;
20074 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20075 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20076 inst.instruction |= LOW4 (inst.operands[1].reg);
20077 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20078 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20079 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20080 inst.instruction |= neon_quad (rs) << 6;
20081
20082 neon_dp_fixup (&inst);
20083 }
20084 break;
20085
20086 case NS_DI: /* case 3/11. */
20087 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20088 inst.error = NULL;
20089 if (et.type == NT_float && et.size == 64)
20090 {
20091 /* case 11 (fconstd). */
20092 ldconst = "fconstd";
20093 goto encode_fconstd;
20094 }
20095 /* fall through. */
20096
20097 case NS_QI: /* case 2/3. */
20098 if (!check_simd_pred_availability (FALSE,
20099 NEON_CHECK_CC | NEON_CHECK_ARCH))
20100 return;
20101 inst.instruction = 0x0800010;
20102 neon_move_immediate ();
20103 neon_dp_fixup (&inst);
20104 break;
20105
20106 case NS_SR: /* case 4. */
20107 {
20108 unsigned bcdebits = 0;
20109 int logsize;
20110 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20111 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20112
20113 /* .<size> is optional here, defaulting to .32. */
20114 if (inst.vectype.elems == 0
20115 && inst.operands[0].vectype.type == NT_invtype
20116 && inst.operands[1].vectype.type == NT_invtype)
20117 {
20118 inst.vectype.el[0].type = NT_untyped;
20119 inst.vectype.el[0].size = 32;
20120 inst.vectype.elems = 1;
20121 }
20122
20123 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20124 logsize = neon_logbits (et.size);
20125
20126 if (et.size != 32)
20127 {
20128 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20129 && vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20130 return;
20131 }
20132 else
20133 {
20134 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20135 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20136 _(BAD_FPU));
20137 }
20138
20139 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20140 {
20141 if (inst.operands[1].reg == REG_SP)
20142 as_tsktsk (MVE_BAD_SP);
20143 else if (inst.operands[1].reg == REG_PC)
20144 as_tsktsk (MVE_BAD_PC);
20145 }
20146 unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20147
20148 constraint (et.type == NT_invtype, _("bad type for scalar"));
20149 constraint (x >= size / et.size, _("scalar index out of range"));
20150
20151
20152 switch (et.size)
20153 {
20154 case 8: bcdebits = 0x8; break;
20155 case 16: bcdebits = 0x1; break;
20156 case 32: bcdebits = 0x0; break;
20157 default: ;
20158 }
20159
20160 bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20161
20162 inst.instruction = 0xe000b10;
20163 do_vfp_cond_or_thumb ();
20164 inst.instruction |= LOW4 (dn) << 16;
20165 inst.instruction |= HI1 (dn) << 7;
20166 inst.instruction |= inst.operands[1].reg << 12;
20167 inst.instruction |= (bcdebits & 3) << 5;
20168 inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20169 inst.instruction |= (x >> (3-logsize)) << 16;
20170 }
20171 break;
20172
20173 case NS_DRR: /* case 5 (fmdrr). */
20174 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20175 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20176 _(BAD_FPU));
20177
20178 inst.instruction = 0xc400b10;
20179 do_vfp_cond_or_thumb ();
20180 inst.instruction |= LOW4 (inst.operands[0].reg);
20181 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20182 inst.instruction |= inst.operands[1].reg << 12;
20183 inst.instruction |= inst.operands[2].reg << 16;
20184 break;
20185
20186 case NS_RS: /* case 6. */
20187 {
20188 unsigned logsize;
20189 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20190 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20191 unsigned abcdebits = 0;
20192
20193 /* .<dt> is optional here, defaulting to .32. */
20194 if (inst.vectype.elems == 0
20195 && inst.operands[0].vectype.type == NT_invtype
20196 && inst.operands[1].vectype.type == NT_invtype)
20197 {
20198 inst.vectype.el[0].type = NT_untyped;
20199 inst.vectype.el[0].size = 32;
20200 inst.vectype.elems = 1;
20201 }
20202
20203 et = neon_check_type (2, NS_NULL,
20204 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20205 logsize = neon_logbits (et.size);
20206
20207 if (et.size != 32)
20208 {
20209 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20210 && vfp_or_neon_is_neon (NEON_CHECK_CC
20211 | NEON_CHECK_ARCH) == FAIL)
20212 return;
20213 }
20214 else
20215 {
20216 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20217 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20218 _(BAD_FPU));
20219 }
20220
20221 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20222 {
20223 if (inst.operands[0].reg == REG_SP)
20224 as_tsktsk (MVE_BAD_SP);
20225 else if (inst.operands[0].reg == REG_PC)
20226 as_tsktsk (MVE_BAD_PC);
20227 }
20228
20229 unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20230
20231 constraint (et.type == NT_invtype, _("bad type for scalar"));
20232 constraint (x >= size / et.size, _("scalar index out of range"));
20233
20234 switch (et.size)
20235 {
20236 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20237 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20238 case 32: abcdebits = 0x00; break;
20239 default: ;
20240 }
20241
20242 abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20243 inst.instruction = 0xe100b10;
20244 do_vfp_cond_or_thumb ();
20245 inst.instruction |= LOW4 (dn) << 16;
20246 inst.instruction |= HI1 (dn) << 7;
20247 inst.instruction |= inst.operands[0].reg << 12;
20248 inst.instruction |= (abcdebits & 3) << 5;
20249 inst.instruction |= (abcdebits >> 2) << 21;
20250 inst.instruction |= (x >> (3-logsize)) << 16;
20251 }
20252 break;
20253
20254 case NS_RRD: /* case 7 (fmrrd). */
20255 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20256 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20257 _(BAD_FPU));
20258
20259 inst.instruction = 0xc500b10;
20260 do_vfp_cond_or_thumb ();
20261 inst.instruction |= inst.operands[0].reg << 12;
20262 inst.instruction |= inst.operands[1].reg << 16;
20263 inst.instruction |= LOW4 (inst.operands[2].reg);
20264 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20265 break;
20266
20267 case NS_FF: /* case 8 (fcpys). */
20268 do_vfp_nsyn_opcode ("fcpys");
20269 break;
20270
20271 case NS_HI:
20272 case NS_FI: /* case 10 (fconsts). */
20273 ldconst = "fconsts";
20274 encode_fconstd:
20275 if (!inst.operands[1].immisfloat)
20276 {
20277 unsigned new_imm;
20278 /* Immediate has to fit in 8 bits so float is enough. */
20279 float imm = (float) inst.operands[1].imm;
20280 memcpy (&new_imm, &imm, sizeof (float));
20281 /* But the assembly may have been written to provide an integer
20282 bit pattern that equates to a float, so check that the
20283 conversion has worked. */
20284 if (is_quarter_float (new_imm))
20285 {
20286 if (is_quarter_float (inst.operands[1].imm))
20287 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20288
20289 inst.operands[1].imm = new_imm;
20290 inst.operands[1].immisfloat = 1;
20291 }
20292 }
20293
20294 if (is_quarter_float (inst.operands[1].imm))
20295 {
20296 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20297 do_vfp_nsyn_opcode (ldconst);
20298
20299 /* ARMv8.2 fp16 vmov.f16 instruction. */
20300 if (rs == NS_HI)
20301 do_scalar_fp16_v82_encode ();
20302 }
20303 else
20304 first_error (_("immediate out of range"));
20305 break;
20306
20307 case NS_RH:
20308 case NS_RF: /* case 12 (fmrs). */
20309 do_vfp_nsyn_opcode ("fmrs");
20310 /* ARMv8.2 fp16 vmov.f16 instruction. */
20311 if (rs == NS_RH)
20312 do_scalar_fp16_v82_encode ();
20313 break;
20314
20315 case NS_HR:
20316 case NS_FR: /* case 13 (fmsr). */
20317 do_vfp_nsyn_opcode ("fmsr");
20318 /* ARMv8.2 fp16 vmov.f16 instruction. */
20319 if (rs == NS_HR)
20320 do_scalar_fp16_v82_encode ();
20321 break;
20322
20323 case NS_RRSS:
20324 do_mve_mov (0);
20325 break;
20326 case NS_SSRR:
20327 do_mve_mov (1);
20328 break;
20329
20330 /* The encoders for the fmrrs and fmsrr instructions expect three operands
20331 (one of which is a list), but we have parsed four. Do some fiddling to
20332 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20333 expect. */
20334 case NS_RRFF: /* case 14 (fmrrs). */
20335 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20336 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20337 _(BAD_FPU));
20338 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20339 _("VFP registers must be adjacent"));
20340 inst.operands[2].imm = 2;
20341 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20342 do_vfp_nsyn_opcode ("fmrrs");
20343 break;
20344
20345 case NS_FFRR: /* case 15 (fmsrr). */
20346 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20347 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20348 _(BAD_FPU));
20349 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20350 _("VFP registers must be adjacent"));
20351 inst.operands[1] = inst.operands[2];
20352 inst.operands[2] = inst.operands[3];
20353 inst.operands[0].imm = 2;
20354 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20355 do_vfp_nsyn_opcode ("fmsrr");
20356 break;
20357
20358 case NS_NULL:
20359 /* neon_select_shape has determined that the instruction
20360 shape is wrong and has already set the error message. */
20361 break;
20362
20363 default:
20364 abort ();
20365 }
20366 }
20367
20368 static void
20369 do_mve_movl (void)
20370 {
20371 if (!(inst.operands[0].present && inst.operands[0].isquad
20372 && inst.operands[1].present && inst.operands[1].isquad
20373 && !inst.operands[2].present))
20374 {
20375 inst.instruction = 0;
20376 inst.cond = 0xb;
20377 if (thumb_mode)
20378 set_pred_insn_type (INSIDE_IT_INSN);
20379 do_neon_mov ();
20380 return;
20381 }
20382
20383 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20384 return;
20385
20386 if (inst.cond != COND_ALWAYS)
20387 inst.pred_insn_type = INSIDE_VPT_INSN;
20388
20389 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20390 | N_S16 | N_U16 | N_KEY);
20391
20392 inst.instruction |= (et.type == NT_unsigned) << 28;
20393 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20394 inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20395 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20396 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20397 inst.instruction |= LOW4 (inst.operands[1].reg);
20398 inst.is_neon = 1;
20399 }
20400
20401 static void
20402 do_neon_rshift_round_imm (void)
20403 {
20404 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20405 return;
20406
20407 enum neon_shape rs;
20408 struct neon_type_el et;
20409
20410 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20411 {
20412 rs = neon_select_shape (NS_QQI, NS_NULL);
20413 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20414 }
20415 else
20416 {
20417 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20418 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20419 }
20420 int imm = inst.operands[2].imm;
20421
20422 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
20423 if (imm == 0)
20424 {
20425 inst.operands[2].present = 0;
20426 do_neon_mov ();
20427 return;
20428 }
20429
20430 constraint (imm < 1 || (unsigned)imm > et.size,
20431 _("immediate out of range for shift"));
20432 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
20433 et.size - imm);
20434 }
20435
20436 static void
20437 do_neon_movhf (void)
20438 {
20439 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20440 constraint (rs != NS_HH, _("invalid suffix"));
20441
20442 if (inst.cond != COND_ALWAYS)
20443 {
20444 if (thumb_mode)
20445 {
20446 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20447 " the behaviour is UNPREDICTABLE"));
20448 }
20449 else
20450 {
20451 inst.error = BAD_COND;
20452 return;
20453 }
20454 }
20455
20456 do_vfp_sp_monadic ();
20457
20458 inst.is_neon = 1;
20459 inst.instruction |= 0xf0000000;
20460 }
20461
20462 static void
20463 do_neon_movl (void)
20464 {
20465 struct neon_type_el et = neon_check_type (2, NS_QD,
20466 N_EQK | N_DBL, N_SU_32 | N_KEY);
20467 unsigned sizebits = et.size >> 3;
20468 inst.instruction |= sizebits << 19;
20469 neon_two_same (0, et.type == NT_unsigned, -1);
20470 }
20471
20472 static void
20473 do_neon_trn (void)
20474 {
20475 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20476 struct neon_type_el et = neon_check_type (2, rs,
20477 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20478 NEON_ENCODE (INTEGER, inst);
20479 neon_two_same (neon_quad (rs), 1, et.size);
20480 }
20481
20482 static void
20483 do_neon_zip_uzp (void)
20484 {
20485 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20486 struct neon_type_el et = neon_check_type (2, rs,
20487 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20488 if (rs == NS_DD && et.size == 32)
20489 {
20490 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
20491 inst.instruction = N_MNEM_vtrn;
20492 do_neon_trn ();
20493 return;
20494 }
20495 neon_two_same (neon_quad (rs), 1, et.size);
20496 }
20497
20498 static void
20499 do_neon_sat_abs_neg (void)
20500 {
20501 if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
20502 return;
20503
20504 enum neon_shape rs;
20505 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20506 rs = neon_select_shape (NS_QQ, NS_NULL);
20507 else
20508 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20509 struct neon_type_el et = neon_check_type (2, rs,
20510 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20511 neon_two_same (neon_quad (rs), 1, et.size);
20512 }
20513
20514 static void
20515 do_neon_pair_long (void)
20516 {
20517 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20518 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20519 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
20520 inst.instruction |= (et.type == NT_unsigned) << 7;
20521 neon_two_same (neon_quad (rs), 1, et.size);
20522 }
20523
20524 static void
20525 do_neon_recip_est (void)
20526 {
20527 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20528 struct neon_type_el et = neon_check_type (2, rs,
20529 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20530 inst.instruction |= (et.type == NT_float) << 8;
20531 neon_two_same (neon_quad (rs), 1, et.size);
20532 }
20533
20534 static void
20535 do_neon_cls (void)
20536 {
20537 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20538 return;
20539
20540 enum neon_shape rs;
20541 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20542 rs = neon_select_shape (NS_QQ, NS_NULL);
20543 else
20544 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20545
20546 struct neon_type_el et = neon_check_type (2, rs,
20547 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20548 neon_two_same (neon_quad (rs), 1, et.size);
20549 }
20550
20551 static void
20552 do_neon_clz (void)
20553 {
20554 if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20555 return;
20556
20557 enum neon_shape rs;
20558 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20559 rs = neon_select_shape (NS_QQ, NS_NULL);
20560 else
20561 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20562
20563 struct neon_type_el et = neon_check_type (2, rs,
20564 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20565 neon_two_same (neon_quad (rs), 1, et.size);
20566 }
20567
20568 static void
20569 do_neon_cnt (void)
20570 {
20571 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20572 struct neon_type_el et = neon_check_type (2, rs,
20573 N_EQK | N_INT, N_8 | N_KEY);
20574 neon_two_same (neon_quad (rs), 1, et.size);
20575 }
20576
20577 static void
20578 do_neon_swp (void)
20579 {
20580 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20581 neon_two_same (neon_quad (rs), 1, -1);
20582 }
20583
20584 static void
20585 do_neon_tbl_tbx (void)
20586 {
20587 unsigned listlenbits;
20588 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20589
20590 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20591 {
20592 first_error (_("bad list length for table lookup"));
20593 return;
20594 }
20595
20596 listlenbits = inst.operands[1].imm - 1;
20597 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20598 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20599 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20600 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20601 inst.instruction |= LOW4 (inst.operands[2].reg);
20602 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20603 inst.instruction |= listlenbits << 8;
20604
20605 neon_dp_fixup (&inst);
20606 }
20607
20608 static void
20609 do_neon_ldm_stm (void)
20610 {
20611 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20612 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20613 _(BAD_FPU));
20614 /* P, U and L bits are part of bitmask. */
20615 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20616 unsigned offsetbits = inst.operands[1].imm * 2;
20617
20618 if (inst.operands[1].issingle)
20619 {
20620 do_vfp_nsyn_ldm_stm (is_dbmode);
20621 return;
20622 }
20623
20624 constraint (is_dbmode && !inst.operands[0].writeback,
20625 _("writeback (!) must be used for VLDMDB and VSTMDB"));
20626
20627 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20628 _("register list must contain at least 1 and at most 16 "
20629 "registers"));
20630
20631 inst.instruction |= inst.operands[0].reg << 16;
20632 inst.instruction |= inst.operands[0].writeback << 21;
20633 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20634 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20635
20636 inst.instruction |= offsetbits;
20637
20638 do_vfp_cond_or_thumb ();
20639 }
20640
20641 static void
20642 do_vfp_nsyn_pop (void)
20643 {
20644 nsyn_insert_sp ();
20645 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20646 return do_vfp_nsyn_opcode ("vldm");
20647 }
20648
20649 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20650 _(BAD_FPU));
20651
20652 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20653 _("register list must contain at least 1 and at most 16 "
20654 "registers"));
20655
20656 if (inst.operands[1].issingle)
20657 do_vfp_nsyn_opcode ("fldmias");
20658 else
20659 do_vfp_nsyn_opcode ("fldmiad");
20660 }
20661
20662 static void
20663 do_vfp_nsyn_push (void)
20664 {
20665 nsyn_insert_sp ();
20666 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20667 return do_vfp_nsyn_opcode ("vstmdb");
20668 }
20669
20670 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20671 _(BAD_FPU));
20672
20673 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20674 _("register list must contain at least 1 and at most 16 "
20675 "registers"));
20676
20677 if (inst.operands[1].issingle)
20678 do_vfp_nsyn_opcode ("fstmdbs");
20679 else
20680 do_vfp_nsyn_opcode ("fstmdbd");
20681 }
20682
20683
20684 static void
20685 do_neon_ldr_str (void)
20686 {
20687 int is_ldr = (inst.instruction & (1 << 20)) != 0;
20688
20689 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20690 And is UNPREDICTABLE in thumb mode. */
20691 if (!is_ldr
20692 && inst.operands[1].reg == REG_PC
20693 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20694 {
20695 if (thumb_mode)
20696 inst.error = _("Use of PC here is UNPREDICTABLE");
20697 else if (warn_on_deprecated)
20698 as_tsktsk (_("Use of PC here is deprecated"));
20699 }
20700
20701 if (inst.operands[0].issingle)
20702 {
20703 if (is_ldr)
20704 do_vfp_nsyn_opcode ("flds");
20705 else
20706 do_vfp_nsyn_opcode ("fsts");
20707
20708 /* ARMv8.2 vldr.16/vstr.16 instruction. */
20709 if (inst.vectype.el[0].size == 16)
20710 do_scalar_fp16_v82_encode ();
20711 }
20712 else
20713 {
20714 if (is_ldr)
20715 do_vfp_nsyn_opcode ("fldd");
20716 else
20717 do_vfp_nsyn_opcode ("fstd");
20718 }
20719 }
20720
20721 static void
20722 do_t_vldr_vstr_sysreg (void)
20723 {
20724 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20725 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20726
20727 /* Use of PC is UNPREDICTABLE. */
20728 if (inst.operands[1].reg == REG_PC)
20729 inst.error = _("Use of PC here is UNPREDICTABLE");
20730
20731 if (inst.operands[1].immisreg)
20732 inst.error = _("instruction does not accept register index");
20733
20734 if (!inst.operands[1].isreg)
20735 inst.error = _("instruction does not accept PC-relative addressing");
20736
20737 if (abs (inst.operands[1].imm) >= (1 << 7))
20738 inst.error = _("immediate value out of range");
20739
20740 inst.instruction = 0xec000f80;
20741 if (is_vldr)
20742 inst.instruction |= 1 << sysreg_vldr_bitno;
20743 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20744 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20745 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20746 }
20747
20748 static void
20749 do_vldr_vstr (void)
20750 {
20751 bfd_boolean sysreg_op = !inst.operands[0].isreg;
20752
20753 /* VLDR/VSTR (System Register). */
20754 if (sysreg_op)
20755 {
20756 if (!mark_feature_used (&arm_ext_v8_1m_main))
20757 as_bad (_("Instruction not permitted on this architecture"));
20758
20759 do_t_vldr_vstr_sysreg ();
20760 }
20761 /* VLDR/VSTR. */
20762 else
20763 {
20764 if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20765 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20766 as_bad (_("Instruction not permitted on this architecture"));
20767 do_neon_ldr_str ();
20768 }
20769 }
20770
20771 /* "interleave" version also handles non-interleaving register VLD1/VST1
20772 instructions. */
20773
20774 static void
20775 do_neon_ld_st_interleave (void)
20776 {
20777 struct neon_type_el et = neon_check_type (1, NS_NULL,
20778 N_8 | N_16 | N_32 | N_64);
20779 unsigned alignbits = 0;
20780 unsigned idx;
20781 /* The bits in this table go:
20782 0: register stride of one (0) or two (1)
20783 1,2: register list length, minus one (1, 2, 3, 4).
20784 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20785 We use -1 for invalid entries. */
20786 const int typetable[] =
20787 {
20788 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
20789 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
20790 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
20791 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
20792 };
20793 int typebits;
20794
20795 if (et.type == NT_invtype)
20796 return;
20797
20798 if (inst.operands[1].immisalign)
20799 switch (inst.operands[1].imm >> 8)
20800 {
20801 case 64: alignbits = 1; break;
20802 case 128:
20803 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20804 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20805 goto bad_alignment;
20806 alignbits = 2;
20807 break;
20808 case 256:
20809 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20810 goto bad_alignment;
20811 alignbits = 3;
20812 break;
20813 default:
20814 bad_alignment:
20815 first_error (_("bad alignment"));
20816 return;
20817 }
20818
20819 inst.instruction |= alignbits << 4;
20820 inst.instruction |= neon_logbits (et.size) << 6;
20821
20822 /* Bits [4:6] of the immediate in a list specifier encode register stride
20823 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20824 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20825 up the right value for "type" in a table based on this value and the given
20826 list style, then stick it back. */
20827 idx = ((inst.operands[0].imm >> 4) & 7)
20828 | (((inst.instruction >> 8) & 3) << 3);
20829
20830 typebits = typetable[idx];
20831
20832 constraint (typebits == -1, _("bad list type for instruction"));
20833 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20834 BAD_EL_TYPE);
20835
20836 inst.instruction &= ~0xf00;
20837 inst.instruction |= typebits << 8;
20838 }
20839
20840 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20841 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20842 otherwise. The variable arguments are a list of pairs of legal (size, align)
20843 values, terminated with -1. */
20844
20845 static int
20846 neon_alignment_bit (int size, int align, int *do_alignment, ...)
20847 {
20848 va_list ap;
20849 int result = FAIL, thissize, thisalign;
20850
20851 if (!inst.operands[1].immisalign)
20852 {
20853 *do_alignment = 0;
20854 return SUCCESS;
20855 }
20856
20857 va_start (ap, do_alignment);
20858
20859 do
20860 {
20861 thissize = va_arg (ap, int);
20862 if (thissize == -1)
20863 break;
20864 thisalign = va_arg (ap, int);
20865
20866 if (size == thissize && align == thisalign)
20867 result = SUCCESS;
20868 }
20869 while (result != SUCCESS);
20870
20871 va_end (ap);
20872
20873 if (result == SUCCESS)
20874 *do_alignment = 1;
20875 else
20876 first_error (_("unsupported alignment for instruction"));
20877
20878 return result;
20879 }
20880
20881 static void
20882 do_neon_ld_st_lane (void)
20883 {
20884 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20885 int align_good, do_alignment = 0;
20886 int logsize = neon_logbits (et.size);
20887 int align = inst.operands[1].imm >> 8;
20888 int n = (inst.instruction >> 8) & 3;
20889 int max_el = 64 / et.size;
20890
20891 if (et.type == NT_invtype)
20892 return;
20893
20894 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20895 _("bad list length"));
20896 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20897 _("scalar index out of range"));
20898 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20899 && et.size == 8,
20900 _("stride of 2 unavailable when element size is 8"));
20901
20902 switch (n)
20903 {
20904 case 0: /* VLD1 / VST1. */
20905 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20906 32, 32, -1);
20907 if (align_good == FAIL)
20908 return;
20909 if (do_alignment)
20910 {
20911 unsigned alignbits = 0;
20912 switch (et.size)
20913 {
20914 case 16: alignbits = 0x1; break;
20915 case 32: alignbits = 0x3; break;
20916 default: ;
20917 }
20918 inst.instruction |= alignbits << 4;
20919 }
20920 break;
20921
20922 case 1: /* VLD2 / VST2. */
20923 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20924 16, 32, 32, 64, -1);
20925 if (align_good == FAIL)
20926 return;
20927 if (do_alignment)
20928 inst.instruction |= 1 << 4;
20929 break;
20930
20931 case 2: /* VLD3 / VST3. */
20932 constraint (inst.operands[1].immisalign,
20933 _("can't use alignment with this instruction"));
20934 break;
20935
20936 case 3: /* VLD4 / VST4. */
20937 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20938 16, 64, 32, 64, 32, 128, -1);
20939 if (align_good == FAIL)
20940 return;
20941 if (do_alignment)
20942 {
20943 unsigned alignbits = 0;
20944 switch (et.size)
20945 {
20946 case 8: alignbits = 0x1; break;
20947 case 16: alignbits = 0x1; break;
20948 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
20949 default: ;
20950 }
20951 inst.instruction |= alignbits << 4;
20952 }
20953 break;
20954
20955 default: ;
20956 }
20957
20958 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
20959 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
20960 inst.instruction |= 1 << (4 + logsize);
20961
20962 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
20963 inst.instruction |= logsize << 10;
20964 }
20965
20966 /* Encode single n-element structure to all lanes VLD<n> instructions. */
20967
20968 static void
20969 do_neon_ld_dup (void)
20970 {
20971 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20972 int align_good, do_alignment = 0;
20973
20974 if (et.type == NT_invtype)
20975 return;
20976
20977 switch ((inst.instruction >> 8) & 3)
20978 {
20979 case 0: /* VLD1. */
20980 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
20981 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20982 &do_alignment, 16, 16, 32, 32, -1);
20983 if (align_good == FAIL)
20984 return;
20985 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
20986 {
20987 case 1: break;
20988 case 2: inst.instruction |= 1 << 5; break;
20989 default: first_error (_("bad list length")); return;
20990 }
20991 inst.instruction |= neon_logbits (et.size) << 6;
20992 break;
20993
20994 case 1: /* VLD2. */
20995 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
20996 &do_alignment, 8, 16, 16, 32, 32, 64,
20997 -1);
20998 if (align_good == FAIL)
20999 return;
21000 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21001 _("bad list length"));
21002 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21003 inst.instruction |= 1 << 5;
21004 inst.instruction |= neon_logbits (et.size) << 6;
21005 break;
21006
21007 case 2: /* VLD3. */
21008 constraint (inst.operands[1].immisalign,
21009 _("can't use alignment with this instruction"));
21010 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21011 _("bad list length"));
21012 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21013 inst.instruction |= 1 << 5;
21014 inst.instruction |= neon_logbits (et.size) << 6;
21015 break;
21016
21017 case 3: /* VLD4. */
21018 {
21019 int align = inst.operands[1].imm >> 8;
21020 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21021 16, 64, 32, 64, 32, 128, -1);
21022 if (align_good == FAIL)
21023 return;
21024 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21025 _("bad list length"));
21026 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21027 inst.instruction |= 1 << 5;
21028 if (et.size == 32 && align == 128)
21029 inst.instruction |= 0x3 << 6;
21030 else
21031 inst.instruction |= neon_logbits (et.size) << 6;
21032 }
21033 break;
21034
21035 default: ;
21036 }
21037
21038 inst.instruction |= do_alignment << 4;
21039 }
21040
21041 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21042 apart from bits [11:4]. */
21043
21044 static void
21045 do_neon_ldx_stx (void)
21046 {
21047 if (inst.operands[1].isreg)
21048 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21049
21050 switch (NEON_LANE (inst.operands[0].imm))
21051 {
21052 case NEON_INTERLEAVE_LANES:
21053 NEON_ENCODE (INTERLV, inst);
21054 do_neon_ld_st_interleave ();
21055 break;
21056
21057 case NEON_ALL_LANES:
21058 NEON_ENCODE (DUP, inst);
21059 if (inst.instruction == N_INV)
21060 {
21061 first_error ("only loads support such operands");
21062 break;
21063 }
21064 do_neon_ld_dup ();
21065 break;
21066
21067 default:
21068 NEON_ENCODE (LANE, inst);
21069 do_neon_ld_st_lane ();
21070 }
21071
21072 /* L bit comes from bit mask. */
21073 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21074 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21075 inst.instruction |= inst.operands[1].reg << 16;
21076
21077 if (inst.operands[1].postind)
21078 {
21079 int postreg = inst.operands[1].imm & 0xf;
21080 constraint (!inst.operands[1].immisreg,
21081 _("post-index must be a register"));
21082 constraint (postreg == 0xd || postreg == 0xf,
21083 _("bad register for post-index"));
21084 inst.instruction |= postreg;
21085 }
21086 else
21087 {
21088 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21089 constraint (inst.relocs[0].exp.X_op != O_constant
21090 || inst.relocs[0].exp.X_add_number != 0,
21091 BAD_ADDR_MODE);
21092
21093 if (inst.operands[1].writeback)
21094 {
21095 inst.instruction |= 0xd;
21096 }
21097 else
21098 inst.instruction |= 0xf;
21099 }
21100
21101 if (thumb_mode)
21102 inst.instruction |= 0xf9000000;
21103 else
21104 inst.instruction |= 0xf4000000;
21105 }
21106
21107 /* FP v8. */
21108 static void
21109 do_vfp_nsyn_fpv8 (enum neon_shape rs)
21110 {
21111 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21112 D register operands. */
21113 if (neon_shape_class[rs] == SC_DOUBLE)
21114 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21115 _(BAD_FPU));
21116
21117 NEON_ENCODE (FPV8, inst);
21118
21119 if (rs == NS_FFF || rs == NS_HHH)
21120 {
21121 do_vfp_sp_dyadic ();
21122
21123 /* ARMv8.2 fp16 instruction. */
21124 if (rs == NS_HHH)
21125 do_scalar_fp16_v82_encode ();
21126 }
21127 else
21128 do_vfp_dp_rd_rn_rm ();
21129
21130 if (rs == NS_DDD)
21131 inst.instruction |= 0x100;
21132
21133 inst.instruction |= 0xf0000000;
21134 }
21135
21136 static void
21137 do_vsel (void)
21138 {
21139 set_pred_insn_type (OUTSIDE_PRED_INSN);
21140
21141 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21142 first_error (_("invalid instruction shape"));
21143 }
21144
21145 static void
21146 do_vmaxnm (void)
21147 {
21148 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21149 set_pred_insn_type (OUTSIDE_PRED_INSN);
21150
21151 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21152 return;
21153
21154 if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21155 return;
21156
21157 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21158 }
21159
21160 static void
21161 do_vrint_1 (enum neon_cvt_mode mode)
21162 {
21163 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21164 struct neon_type_el et;
21165
21166 if (rs == NS_NULL)
21167 return;
21168
21169 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21170 D register operands. */
21171 if (neon_shape_class[rs] == SC_DOUBLE)
21172 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21173 _(BAD_FPU));
21174
21175 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21176 | N_VFP);
21177 if (et.type != NT_invtype)
21178 {
21179 /* VFP encodings. */
21180 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21181 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21182 set_pred_insn_type (OUTSIDE_PRED_INSN);
21183
21184 NEON_ENCODE (FPV8, inst);
21185 if (rs == NS_FF || rs == NS_HH)
21186 do_vfp_sp_monadic ();
21187 else
21188 do_vfp_dp_rd_rm ();
21189
21190 switch (mode)
21191 {
21192 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21193 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21194 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21195 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21196 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21197 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21198 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21199 default: abort ();
21200 }
21201
21202 inst.instruction |= (rs == NS_DD) << 8;
21203 do_vfp_cond_or_thumb ();
21204
21205 /* ARMv8.2 fp16 vrint instruction. */
21206 if (rs == NS_HH)
21207 do_scalar_fp16_v82_encode ();
21208 }
21209 else
21210 {
21211 /* Neon encodings (or something broken...). */
21212 inst.error = NULL;
21213 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21214
21215 if (et.type == NT_invtype)
21216 return;
21217
21218 if (!check_simd_pred_availability (TRUE,
21219 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21220 return;
21221
21222 NEON_ENCODE (FLOAT, inst);
21223
21224 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21225 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21226 inst.instruction |= LOW4 (inst.operands[1].reg);
21227 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21228 inst.instruction |= neon_quad (rs) << 6;
21229 /* Mask off the original size bits and reencode them. */
21230 inst.instruction = ((inst.instruction & 0xfff3ffff)
21231 | neon_logbits (et.size) << 18);
21232
21233 switch (mode)
21234 {
21235 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21236 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21237 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21238 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21239 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21240 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21241 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21242 default: abort ();
21243 }
21244
21245 if (thumb_mode)
21246 inst.instruction |= 0xfc000000;
21247 else
21248 inst.instruction |= 0xf0000000;
21249 }
21250 }
21251
21252 static void
21253 do_vrintx (void)
21254 {
21255 do_vrint_1 (neon_cvt_mode_x);
21256 }
21257
21258 static void
21259 do_vrintz (void)
21260 {
21261 do_vrint_1 (neon_cvt_mode_z);
21262 }
21263
21264 static void
21265 do_vrintr (void)
21266 {
21267 do_vrint_1 (neon_cvt_mode_r);
21268 }
21269
21270 static void
21271 do_vrinta (void)
21272 {
21273 do_vrint_1 (neon_cvt_mode_a);
21274 }
21275
21276 static void
21277 do_vrintn (void)
21278 {
21279 do_vrint_1 (neon_cvt_mode_n);
21280 }
21281
21282 static void
21283 do_vrintp (void)
21284 {
21285 do_vrint_1 (neon_cvt_mode_p);
21286 }
21287
21288 static void
21289 do_vrintm (void)
21290 {
21291 do_vrint_1 (neon_cvt_mode_m);
21292 }
21293
21294 static unsigned
21295 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21296 {
21297 unsigned regno = NEON_SCALAR_REG (opnd);
21298 unsigned elno = NEON_SCALAR_INDEX (opnd);
21299
21300 if (elsize == 16 && elno < 2 && regno < 16)
21301 return regno | (elno << 4);
21302 else if (elsize == 32 && elno == 0)
21303 return regno;
21304
21305 first_error (_("scalar out of range"));
21306 return 0;
21307 }
21308
21309 static void
21310 do_vcmla (void)
21311 {
21312 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21313 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21314 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21315 constraint (inst.relocs[0].exp.X_op != O_constant,
21316 _("expression too complex"));
21317 unsigned rot = inst.relocs[0].exp.X_add_number;
21318 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21319 _("immediate out of range"));
21320 rot /= 90;
21321
21322 if (!check_simd_pred_availability (TRUE,
21323 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21324 return;
21325
21326 if (inst.operands[2].isscalar)
21327 {
21328 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21329 first_error (_("invalid instruction shape"));
21330 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21331 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21332 N_KEY | N_F16 | N_F32).size;
21333 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21334 inst.is_neon = 1;
21335 inst.instruction = 0xfe000800;
21336 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21337 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21338 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21339 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21340 inst.instruction |= LOW4 (m);
21341 inst.instruction |= HI1 (m) << 5;
21342 inst.instruction |= neon_quad (rs) << 6;
21343 inst.instruction |= rot << 20;
21344 inst.instruction |= (size == 32) << 23;
21345 }
21346 else
21347 {
21348 enum neon_shape rs;
21349 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21350 rs = neon_select_shape (NS_QQQI, NS_NULL);
21351 else
21352 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21353
21354 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21355 N_KEY | N_F16 | N_F32).size;
21356 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21357 && (inst.operands[0].reg == inst.operands[1].reg
21358 || inst.operands[0].reg == inst.operands[2].reg))
21359 as_tsktsk (BAD_MVE_SRCDEST);
21360
21361 neon_three_same (neon_quad (rs), 0, -1);
21362 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21363 inst.instruction |= 0xfc200800;
21364 inst.instruction |= rot << 23;
21365 inst.instruction |= (size == 32) << 20;
21366 }
21367 }
21368
21369 static void
21370 do_vcadd (void)
21371 {
21372 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21373 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21374 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21375 constraint (inst.relocs[0].exp.X_op != O_constant,
21376 _("expression too complex"));
21377
21378 unsigned rot = inst.relocs[0].exp.X_add_number;
21379 constraint (rot != 90 && rot != 270, _("immediate out of range"));
21380 enum neon_shape rs;
21381 struct neon_type_el et;
21382 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21383 {
21384 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21385 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21386 }
21387 else
21388 {
21389 rs = neon_select_shape (NS_QQQI, NS_NULL);
21390 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21391 | N_I16 | N_I32);
21392 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21393 as_tsktsk (_("Warning: 32-bit element size and same first and third "
21394 "operand makes instruction UNPREDICTABLE"));
21395 }
21396
21397 if (et.type == NT_invtype)
21398 return;
21399
21400 if (!check_simd_pred_availability (et.type == NT_float,
21401 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21402 return;
21403
21404 if (et.type == NT_float)
21405 {
21406 neon_three_same (neon_quad (rs), 0, -1);
21407 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21408 inst.instruction |= 0xfc800800;
21409 inst.instruction |= (rot == 270) << 24;
21410 inst.instruction |= (et.size == 32) << 20;
21411 }
21412 else
21413 {
21414 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21415 inst.instruction = 0xfe000f00;
21416 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21417 inst.instruction |= neon_logbits (et.size) << 20;
21418 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21419 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21420 inst.instruction |= (rot == 270) << 12;
21421 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21422 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21423 inst.instruction |= LOW4 (inst.operands[2].reg);
21424 inst.is_neon = 1;
21425 }
21426 }
21427
21428 /* Dot Product instructions encoding support. */
21429
21430 static void
21431 do_neon_dotproduct (int unsigned_p)
21432 {
21433 enum neon_shape rs;
21434 unsigned scalar_oprd2 = 0;
21435 int high8;
21436
21437 if (inst.cond != COND_ALWAYS)
21438 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
21439 "is UNPREDICTABLE"));
21440
21441 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21442 _(BAD_FPU));
21443
21444 /* Dot Product instructions are in three-same D/Q register format or the third
21445 operand can be a scalar index register. */
21446 if (inst.operands[2].isscalar)
21447 {
21448 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21449 high8 = 0xfe000000;
21450 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21451 }
21452 else
21453 {
21454 high8 = 0xfc000000;
21455 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21456 }
21457
21458 if (unsigned_p)
21459 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21460 else
21461 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21462
21463 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21464 Product instruction, so we pass 0 as the "ubit" parameter. And the
21465 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
21466 neon_three_same (neon_quad (rs), 0, 32);
21467
21468 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
21469 different NEON three-same encoding. */
21470 inst.instruction &= 0x00ffffff;
21471 inst.instruction |= high8;
21472 /* Encode 'U' bit which indicates signedness. */
21473 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21474 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
21475 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21476 the instruction encoding. */
21477 if (inst.operands[2].isscalar)
21478 {
21479 inst.instruction &= 0xffffffd0;
21480 inst.instruction |= LOW4 (scalar_oprd2);
21481 inst.instruction |= HI1 (scalar_oprd2) << 5;
21482 }
21483 }
21484
21485 /* Dot Product instructions for signed integer. */
21486
21487 static void
21488 do_neon_dotproduct_s (void)
21489 {
21490 return do_neon_dotproduct (0);
21491 }
21492
21493 /* Dot Product instructions for unsigned integer. */
21494
21495 static void
21496 do_neon_dotproduct_u (void)
21497 {
21498 return do_neon_dotproduct (1);
21499 }
21500
21501 static void
21502 do_vusdot (void)
21503 {
21504 enum neon_shape rs;
21505 set_pred_insn_type (OUTSIDE_PRED_INSN);
21506 if (inst.operands[2].isscalar)
21507 {
21508 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21509 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21510
21511 inst.instruction |= (1 << 25);
21512 int index = inst.operands[2].reg & 0xf;
21513 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21514 inst.operands[2].reg >>= 4;
21515 constraint (!(inst.operands[2].reg < 16),
21516 _("indexed register must be less than 16"));
21517 neon_three_args (rs == NS_QQS);
21518 inst.instruction |= (index << 5);
21519 }
21520 else
21521 {
21522 inst.instruction |= (1 << 21);
21523 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21524 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21525 neon_three_args (rs == NS_QQQ);
21526 }
21527 }
21528
21529 static void
21530 do_vsudot (void)
21531 {
21532 enum neon_shape rs;
21533 set_pred_insn_type (OUTSIDE_PRED_INSN);
21534 if (inst.operands[2].isscalar)
21535 {
21536 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21537 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21538
21539 inst.instruction |= (1 << 25);
21540 int index = inst.operands[2].reg & 0xf;
21541 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21542 inst.operands[2].reg >>= 4;
21543 constraint (!(inst.operands[2].reg < 16),
21544 _("indexed register must be less than 16"));
21545 neon_three_args (rs == NS_QQS);
21546 inst.instruction |= (index << 5);
21547 }
21548 }
21549
21550 static void
21551 do_vsmmla (void)
21552 {
21553 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21554 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21555
21556 set_pred_insn_type (OUTSIDE_PRED_INSN);
21557
21558 neon_three_args (1);
21559
21560 }
21561
21562 static void
21563 do_vummla (void)
21564 {
21565 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21566 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21567
21568 set_pred_insn_type (OUTSIDE_PRED_INSN);
21569
21570 neon_three_args (1);
21571
21572 }
21573
21574 /* Crypto v1 instructions. */
21575 static void
21576 do_crypto_2op_1 (unsigned elttype, int op)
21577 {
21578 set_pred_insn_type (OUTSIDE_PRED_INSN);
21579
21580 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
21581 == NT_invtype)
21582 return;
21583
21584 inst.error = NULL;
21585
21586 NEON_ENCODE (INTEGER, inst);
21587 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21588 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21589 inst.instruction |= LOW4 (inst.operands[1].reg);
21590 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21591 if (op != -1)
21592 inst.instruction |= op << 6;
21593
21594 if (thumb_mode)
21595 inst.instruction |= 0xfc000000;
21596 else
21597 inst.instruction |= 0xf0000000;
21598 }
21599
21600 static void
21601 do_crypto_3op_1 (int u, int op)
21602 {
21603 set_pred_insn_type (OUTSIDE_PRED_INSN);
21604
21605 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
21606 N_32 | N_UNT | N_KEY).type == NT_invtype)
21607 return;
21608
21609 inst.error = NULL;
21610
21611 NEON_ENCODE (INTEGER, inst);
21612 neon_three_same (1, u, 8 << op);
21613 }
21614
21615 static void
21616 do_aese (void)
21617 {
21618 do_crypto_2op_1 (N_8, 0);
21619 }
21620
21621 static void
21622 do_aesd (void)
21623 {
21624 do_crypto_2op_1 (N_8, 1);
21625 }
21626
21627 static void
21628 do_aesmc (void)
21629 {
21630 do_crypto_2op_1 (N_8, 2);
21631 }
21632
21633 static void
21634 do_aesimc (void)
21635 {
21636 do_crypto_2op_1 (N_8, 3);
21637 }
21638
21639 static void
21640 do_sha1c (void)
21641 {
21642 do_crypto_3op_1 (0, 0);
21643 }
21644
21645 static void
21646 do_sha1p (void)
21647 {
21648 do_crypto_3op_1 (0, 1);
21649 }
21650
21651 static void
21652 do_sha1m (void)
21653 {
21654 do_crypto_3op_1 (0, 2);
21655 }
21656
21657 static void
21658 do_sha1su0 (void)
21659 {
21660 do_crypto_3op_1 (0, 3);
21661 }
21662
21663 static void
21664 do_sha256h (void)
21665 {
21666 do_crypto_3op_1 (1, 0);
21667 }
21668
21669 static void
21670 do_sha256h2 (void)
21671 {
21672 do_crypto_3op_1 (1, 1);
21673 }
21674
21675 static void
21676 do_sha256su1 (void)
21677 {
21678 do_crypto_3op_1 (1, 2);
21679 }
21680
21681 static void
21682 do_sha1h (void)
21683 {
21684 do_crypto_2op_1 (N_32, -1);
21685 }
21686
21687 static void
21688 do_sha1su1 (void)
21689 {
21690 do_crypto_2op_1 (N_32, 0);
21691 }
21692
21693 static void
21694 do_sha256su0 (void)
21695 {
21696 do_crypto_2op_1 (N_32, 1);
21697 }
21698
21699 static void
21700 do_crc32_1 (unsigned int poly, unsigned int sz)
21701 {
21702 unsigned int Rd = inst.operands[0].reg;
21703 unsigned int Rn = inst.operands[1].reg;
21704 unsigned int Rm = inst.operands[2].reg;
21705
21706 set_pred_insn_type (OUTSIDE_PRED_INSN);
21707 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
21708 inst.instruction |= LOW4 (Rn) << 16;
21709 inst.instruction |= LOW4 (Rm);
21710 inst.instruction |= sz << (thumb_mode ? 4 : 21);
21711 inst.instruction |= poly << (thumb_mode ? 20 : 9);
21712
21713 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
21714 as_warn (UNPRED_REG ("r15"));
21715 }
21716
21717 static void
21718 do_crc32b (void)
21719 {
21720 do_crc32_1 (0, 0);
21721 }
21722
21723 static void
21724 do_crc32h (void)
21725 {
21726 do_crc32_1 (0, 1);
21727 }
21728
21729 static void
21730 do_crc32w (void)
21731 {
21732 do_crc32_1 (0, 2);
21733 }
21734
21735 static void
21736 do_crc32cb (void)
21737 {
21738 do_crc32_1 (1, 0);
21739 }
21740
21741 static void
21742 do_crc32ch (void)
21743 {
21744 do_crc32_1 (1, 1);
21745 }
21746
21747 static void
21748 do_crc32cw (void)
21749 {
21750 do_crc32_1 (1, 2);
21751 }
21752
21753 static void
21754 do_vjcvt (void)
21755 {
21756 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21757 _(BAD_FPU));
21758 neon_check_type (2, NS_FD, N_S32, N_F64);
21759 do_vfp_sp_dp_cvt ();
21760 do_vfp_cond_or_thumb ();
21761 }
21762
21763 static void
21764 do_vdot (void)
21765 {
21766 enum neon_shape rs;
21767 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21768 set_pred_insn_type (OUTSIDE_PRED_INSN);
21769 if (inst.operands[2].isscalar)
21770 {
21771 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21772 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21773
21774 inst.instruction |= (1 << 25);
21775 int index = inst.operands[2].reg & 0xf;
21776 constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21777 inst.operands[2].reg >>= 4;
21778 constraint (!(inst.operands[2].reg < 16),
21779 _("indexed register must be less than 16"));
21780 neon_three_args (rs == NS_QQS);
21781 inst.instruction |= (index << 5);
21782 }
21783 else
21784 {
21785 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21786 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21787 neon_three_args (rs == NS_QQQ);
21788 }
21789 }
21790
21791 static void
21792 do_vmmla (void)
21793 {
21794 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21795 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
21796
21797 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
21798 set_pred_insn_type (OUTSIDE_PRED_INSN);
21799
21800 neon_three_args (1);
21801 }
21802
21803 \f
21804 /* Overall per-instruction processing. */
21805
21806 /* We need to be able to fix up arbitrary expressions in some statements.
21807 This is so that we can handle symbols that are an arbitrary distance from
21808 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
21809 which returns part of an address in a form which will be valid for
21810 a data instruction. We do this by pushing the expression into a symbol
21811 in the expr_section, and creating a fix for that. */
21812
21813 static void
21814 fix_new_arm (fragS * frag,
21815 int where,
21816 short int size,
21817 expressionS * exp,
21818 int pc_rel,
21819 int reloc)
21820 {
21821 fixS * new_fix;
21822
21823 switch (exp->X_op)
21824 {
21825 case O_constant:
21826 if (pc_rel)
21827 {
21828 /* Create an absolute valued symbol, so we have something to
21829 refer to in the object file. Unfortunately for us, gas's
21830 generic expression parsing will already have folded out
21831 any use of .set foo/.type foo %function that may have
21832 been used to set type information of the target location,
21833 that's being specified symbolically. We have to presume
21834 the user knows what they are doing. */
21835 char name[16 + 8];
21836 symbolS *symbol;
21837
21838 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
21839
21840 symbol = symbol_find_or_make (name);
21841 S_SET_SEGMENT (symbol, absolute_section);
21842 symbol_set_frag (symbol, &zero_address_frag);
21843 S_SET_VALUE (symbol, exp->X_add_number);
21844 exp->X_op = O_symbol;
21845 exp->X_add_symbol = symbol;
21846 exp->X_add_number = 0;
21847 }
21848 /* FALLTHROUGH */
21849 case O_symbol:
21850 case O_add:
21851 case O_subtract:
21852 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
21853 (enum bfd_reloc_code_real) reloc);
21854 break;
21855
21856 default:
21857 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
21858 pc_rel, (enum bfd_reloc_code_real) reloc);
21859 break;
21860 }
21861
21862 /* Mark whether the fix is to a THUMB instruction, or an ARM
21863 instruction. */
21864 new_fix->tc_fix_data = thumb_mode;
21865 }
21866
21867 /* Create a frg for an instruction requiring relaxation. */
21868 static void
21869 output_relax_insn (void)
21870 {
21871 char * to;
21872 symbolS *sym;
21873 int offset;
21874
21875 /* The size of the instruction is unknown, so tie the debug info to the
21876 start of the instruction. */
21877 dwarf2_emit_insn (0);
21878
21879 switch (inst.relocs[0].exp.X_op)
21880 {
21881 case O_symbol:
21882 sym = inst.relocs[0].exp.X_add_symbol;
21883 offset = inst.relocs[0].exp.X_add_number;
21884 break;
21885 case O_constant:
21886 sym = NULL;
21887 offset = inst.relocs[0].exp.X_add_number;
21888 break;
21889 default:
21890 sym = make_expr_symbol (&inst.relocs[0].exp);
21891 offset = 0;
21892 break;
21893 }
21894 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
21895 inst.relax, sym, offset, NULL/*offset, opcode*/);
21896 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
21897 }
21898
21899 /* Write a 32-bit thumb instruction to buf. */
21900 static void
21901 put_thumb32_insn (char * buf, unsigned long insn)
21902 {
21903 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
21904 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
21905 }
21906
21907 static void
21908 output_inst (const char * str)
21909 {
21910 char * to = NULL;
21911
21912 if (inst.error)
21913 {
21914 as_bad ("%s -- `%s'", inst.error, str);
21915 return;
21916 }
21917 if (inst.relax)
21918 {
21919 output_relax_insn ();
21920 return;
21921 }
21922 if (inst.size == 0)
21923 return;
21924
21925 to = frag_more (inst.size);
21926 /* PR 9814: Record the thumb mode into the current frag so that we know
21927 what type of NOP padding to use, if necessary. We override any previous
21928 setting so that if the mode has changed then the NOPS that we use will
21929 match the encoding of the last instruction in the frag. */
21930 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21931
21932 if (thumb_mode && (inst.size > THUMB_SIZE))
21933 {
21934 gas_assert (inst.size == (2 * THUMB_SIZE));
21935 put_thumb32_insn (to, inst.instruction);
21936 }
21937 else if (inst.size > INSN_SIZE)
21938 {
21939 gas_assert (inst.size == (2 * INSN_SIZE));
21940 md_number_to_chars (to, inst.instruction, INSN_SIZE);
21941 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
21942 }
21943 else
21944 md_number_to_chars (to, inst.instruction, inst.size);
21945
21946 int r;
21947 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
21948 {
21949 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
21950 fix_new_arm (frag_now, to - frag_now->fr_literal,
21951 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
21952 inst.relocs[r].type);
21953 }
21954
21955 dwarf2_emit_insn (inst.size);
21956 }
21957
21958 static char *
21959 output_it_inst (int cond, int mask, char * to)
21960 {
21961 unsigned long instruction = 0xbf00;
21962
21963 mask &= 0xf;
21964 instruction |= mask;
21965 instruction |= cond << 4;
21966
21967 if (to == NULL)
21968 {
21969 to = frag_more (2);
21970 #ifdef OBJ_ELF
21971 dwarf2_emit_insn (2);
21972 #endif
21973 }
21974
21975 md_number_to_chars (to, instruction, 2);
21976
21977 return to;
21978 }
21979
21980 /* Tag values used in struct asm_opcode's tag field. */
21981 enum opcode_tag
21982 {
21983 OT_unconditional, /* Instruction cannot be conditionalized.
21984 The ARM condition field is still 0xE. */
21985 OT_unconditionalF, /* Instruction cannot be conditionalized
21986 and carries 0xF in its ARM condition field. */
21987 OT_csuffix, /* Instruction takes a conditional suffix. */
21988 OT_csuffixF, /* Some forms of the instruction take a scalar
21989 conditional suffix, others place 0xF where the
21990 condition field would be, others take a vector
21991 conditional suffix. */
21992 OT_cinfix3, /* Instruction takes a conditional infix,
21993 beginning at character index 3. (In
21994 unified mode, it becomes a suffix.) */
21995 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
21996 tsts, cmps, cmns, and teqs. */
21997 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
21998 character index 3, even in unified mode. Used for
21999 legacy instructions where suffix and infix forms
22000 may be ambiguous. */
22001 OT_csuf_or_in3, /* Instruction takes either a conditional
22002 suffix or an infix at character index 3. */
22003 OT_odd_infix_unc, /* This is the unconditional variant of an
22004 instruction that takes a conditional infix
22005 at an unusual position. In unified mode,
22006 this variant will accept a suffix. */
22007 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
22008 are the conditional variants of instructions that
22009 take conditional infixes in unusual positions.
22010 The infix appears at character index
22011 (tag - OT_odd_infix_0). These are not accepted
22012 in unified mode. */
22013 };
22014
22015 /* Subroutine of md_assemble, responsible for looking up the primary
22016 opcode from the mnemonic the user wrote. STR points to the
22017 beginning of the mnemonic.
22018
22019 This is not simply a hash table lookup, because of conditional
22020 variants. Most instructions have conditional variants, which are
22021 expressed with a _conditional affix_ to the mnemonic. If we were
22022 to encode each conditional variant as a literal string in the opcode
22023 table, it would have approximately 20,000 entries.
22024
22025 Most mnemonics take this affix as a suffix, and in unified syntax,
22026 'most' is upgraded to 'all'. However, in the divided syntax, some
22027 instructions take the affix as an infix, notably the s-variants of
22028 the arithmetic instructions. Of those instructions, all but six
22029 have the infix appear after the third character of the mnemonic.
22030
22031 Accordingly, the algorithm for looking up primary opcodes given
22032 an identifier is:
22033
22034 1. Look up the identifier in the opcode table.
22035 If we find a match, go to step U.
22036
22037 2. Look up the last two characters of the identifier in the
22038 conditions table. If we find a match, look up the first N-2
22039 characters of the identifier in the opcode table. If we
22040 find a match, go to step CE.
22041
22042 3. Look up the fourth and fifth characters of the identifier in
22043 the conditions table. If we find a match, extract those
22044 characters from the identifier, and look up the remaining
22045 characters in the opcode table. If we find a match, go
22046 to step CM.
22047
22048 4. Fail.
22049
22050 U. Examine the tag field of the opcode structure, in case this is
22051 one of the six instructions with its conditional infix in an
22052 unusual place. If it is, the tag tells us where to find the
22053 infix; look it up in the conditions table and set inst.cond
22054 accordingly. Otherwise, this is an unconditional instruction.
22055 Again set inst.cond accordingly. Return the opcode structure.
22056
22057 CE. Examine the tag field to make sure this is an instruction that
22058 should receive a conditional suffix. If it is not, fail.
22059 Otherwise, set inst.cond from the suffix we already looked up,
22060 and return the opcode structure.
22061
22062 CM. Examine the tag field to make sure this is an instruction that
22063 should receive a conditional infix after the third character.
22064 If it is not, fail. Otherwise, undo the edits to the current
22065 line of input and proceed as for case CE. */
22066
22067 static const struct asm_opcode *
22068 opcode_lookup (char **str)
22069 {
22070 char *end, *base;
22071 char *affix;
22072 const struct asm_opcode *opcode;
22073 const struct asm_cond *cond;
22074 char save[2];
22075
22076 /* Scan up to the end of the mnemonic, which must end in white space,
22077 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
22078 for (base = end = *str; *end != '\0'; end++)
22079 if (*end == ' ' || *end == '.')
22080 break;
22081
22082 if (end == base)
22083 return NULL;
22084
22085 /* Handle a possible width suffix and/or Neon type suffix. */
22086 if (end[0] == '.')
22087 {
22088 int offset = 2;
22089
22090 /* The .w and .n suffixes are only valid if the unified syntax is in
22091 use. */
22092 if (unified_syntax && end[1] == 'w')
22093 inst.size_req = 4;
22094 else if (unified_syntax && end[1] == 'n')
22095 inst.size_req = 2;
22096 else
22097 offset = 0;
22098
22099 inst.vectype.elems = 0;
22100
22101 *str = end + offset;
22102
22103 if (end[offset] == '.')
22104 {
22105 /* See if we have a Neon type suffix (possible in either unified or
22106 non-unified ARM syntax mode). */
22107 if (parse_neon_type (&inst.vectype, str) == FAIL)
22108 return NULL;
22109 }
22110 else if (end[offset] != '\0' && end[offset] != ' ')
22111 return NULL;
22112 }
22113 else
22114 *str = end;
22115
22116 /* Look for unaffixed or special-case affixed mnemonic. */
22117 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22118 end - base);
22119 if (opcode)
22120 {
22121 /* step U */
22122 if (opcode->tag < OT_odd_infix_0)
22123 {
22124 inst.cond = COND_ALWAYS;
22125 return opcode;
22126 }
22127
22128 if (warn_on_deprecated && unified_syntax)
22129 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22130 affix = base + (opcode->tag - OT_odd_infix_0);
22131 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22132 gas_assert (cond);
22133
22134 inst.cond = cond->value;
22135 return opcode;
22136 }
22137 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22138 {
22139 /* Cannot have a conditional suffix on a mnemonic of less than a character.
22140 */
22141 if (end - base < 2)
22142 return NULL;
22143 affix = end - 1;
22144 cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
22145 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22146 affix - base);
22147 /* If this opcode can not be vector predicated then don't accept it with a
22148 vector predication code. */
22149 if (opcode && !opcode->mayBeVecPred)
22150 opcode = NULL;
22151 }
22152 if (!opcode || !cond)
22153 {
22154 /* Cannot have a conditional suffix on a mnemonic of less than two
22155 characters. */
22156 if (end - base < 3)
22157 return NULL;
22158
22159 /* Look for suffixed mnemonic. */
22160 affix = end - 2;
22161 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22162 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22163 affix - base);
22164 }
22165
22166 if (opcode && cond)
22167 {
22168 /* step CE */
22169 switch (opcode->tag)
22170 {
22171 case OT_cinfix3_legacy:
22172 /* Ignore conditional suffixes matched on infix only mnemonics. */
22173 break;
22174
22175 case OT_cinfix3:
22176 case OT_cinfix3_deprecated:
22177 case OT_odd_infix_unc:
22178 if (!unified_syntax)
22179 return NULL;
22180 /* Fall through. */
22181
22182 case OT_csuffix:
22183 case OT_csuffixF:
22184 case OT_csuf_or_in3:
22185 inst.cond = cond->value;
22186 return opcode;
22187
22188 case OT_unconditional:
22189 case OT_unconditionalF:
22190 if (thumb_mode)
22191 inst.cond = cond->value;
22192 else
22193 {
22194 /* Delayed diagnostic. */
22195 inst.error = BAD_COND;
22196 inst.cond = COND_ALWAYS;
22197 }
22198 return opcode;
22199
22200 default:
22201 return NULL;
22202 }
22203 }
22204
22205 /* Cannot have a usual-position infix on a mnemonic of less than
22206 six characters (five would be a suffix). */
22207 if (end - base < 6)
22208 return NULL;
22209
22210 /* Look for infixed mnemonic in the usual position. */
22211 affix = base + 3;
22212 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
22213 if (!cond)
22214 return NULL;
22215
22216 memcpy (save, affix, 2);
22217 memmove (affix, affix + 2, (end - affix) - 2);
22218 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
22219 (end - base) - 2);
22220 memmove (affix + 2, affix, (end - affix) - 2);
22221 memcpy (affix, save, 2);
22222
22223 if (opcode
22224 && (opcode->tag == OT_cinfix3
22225 || opcode->tag == OT_cinfix3_deprecated
22226 || opcode->tag == OT_csuf_or_in3
22227 || opcode->tag == OT_cinfix3_legacy))
22228 {
22229 /* Step CM. */
22230 if (warn_on_deprecated && unified_syntax
22231 && (opcode->tag == OT_cinfix3
22232 || opcode->tag == OT_cinfix3_deprecated))
22233 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22234
22235 inst.cond = cond->value;
22236 return opcode;
22237 }
22238
22239 return NULL;
22240 }
22241
22242 /* This function generates an initial IT instruction, leaving its block
22243 virtually open for the new instructions. Eventually,
22244 the mask will be updated by now_pred_add_mask () each time
22245 a new instruction needs to be included in the IT block.
22246 Finally, the block is closed with close_automatic_it_block ().
22247 The block closure can be requested either from md_assemble (),
22248 a tencode (), or due to a label hook. */
22249
22250 static void
22251 new_automatic_it_block (int cond)
22252 {
22253 now_pred.state = AUTOMATIC_PRED_BLOCK;
22254 now_pred.mask = 0x18;
22255 now_pred.cc = cond;
22256 now_pred.block_length = 1;
22257 mapping_state (MAP_THUMB);
22258 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22259 now_pred.warn_deprecated = FALSE;
22260 now_pred.insn_cond = TRUE;
22261 }
22262
22263 /* Close an automatic IT block.
22264 See comments in new_automatic_it_block (). */
22265
22266 static void
22267 close_automatic_it_block (void)
22268 {
22269 now_pred.mask = 0x10;
22270 now_pred.block_length = 0;
22271 }
22272
22273 /* Update the mask of the current automatically-generated IT
22274 instruction. See comments in new_automatic_it_block (). */
22275
22276 static void
22277 now_pred_add_mask (int cond)
22278 {
22279 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
22280 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
22281 | ((bitvalue) << (nbit)))
22282 const int resulting_bit = (cond & 1);
22283
22284 now_pred.mask &= 0xf;
22285 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22286 resulting_bit,
22287 (5 - now_pred.block_length));
22288 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22289 1,
22290 ((5 - now_pred.block_length) - 1));
22291 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22292
22293 #undef CLEAR_BIT
22294 #undef SET_BIT_VALUE
22295 }
22296
22297 /* The IT blocks handling machinery is accessed through the these functions:
22298 it_fsm_pre_encode () from md_assemble ()
22299 set_pred_insn_type () optional, from the tencode functions
22300 set_pred_insn_type_last () ditto
22301 in_pred_block () ditto
22302 it_fsm_post_encode () from md_assemble ()
22303 force_automatic_it_block_close () from label handling functions
22304
22305 Rationale:
22306 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22307 initializing the IT insn type with a generic initial value depending
22308 on the inst.condition.
22309 2) During the tencode function, two things may happen:
22310 a) The tencode function overrides the IT insn type by
22311 calling either set_pred_insn_type (type) or
22312 set_pred_insn_type_last ().
22313 b) The tencode function queries the IT block state by
22314 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22315
22316 Both set_pred_insn_type and in_pred_block run the internal FSM state
22317 handling function (handle_pred_state), because: a) setting the IT insn
22318 type may incur in an invalid state (exiting the function),
22319 and b) querying the state requires the FSM to be updated.
22320 Specifically we want to avoid creating an IT block for conditional
22321 branches, so it_fsm_pre_encode is actually a guess and we can't
22322 determine whether an IT block is required until the tencode () routine
22323 has decided what type of instruction this actually it.
22324 Because of this, if set_pred_insn_type and in_pred_block have to be
22325 used, set_pred_insn_type has to be called first.
22326
22327 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22328 that determines the insn IT type depending on the inst.cond code.
22329 When a tencode () routine encodes an instruction that can be
22330 either outside an IT block, or, in the case of being inside, has to be
22331 the last one, set_pred_insn_type_last () will determine the proper
22332 IT instruction type based on the inst.cond code. Otherwise,
22333 set_pred_insn_type can be called for overriding that logic or
22334 for covering other cases.
22335
22336 Calling handle_pred_state () may not transition the IT block state to
22337 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22338 still queried. Instead, if the FSM determines that the state should
22339 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22340 after the tencode () function: that's what it_fsm_post_encode () does.
22341
22342 Since in_pred_block () calls the state handling function to get an
22343 updated state, an error may occur (due to invalid insns combination).
22344 In that case, inst.error is set.
22345 Therefore, inst.error has to be checked after the execution of
22346 the tencode () routine.
22347
22348 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22349 any pending state change (if any) that didn't take place in
22350 handle_pred_state () as explained above. */
22351
22352 static void
22353 it_fsm_pre_encode (void)
22354 {
22355 if (inst.cond != COND_ALWAYS)
22356 inst.pred_insn_type = INSIDE_IT_INSN;
22357 else
22358 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22359
22360 now_pred.state_handled = 0;
22361 }
22362
22363 /* IT state FSM handling function. */
22364 /* MVE instructions and non-MVE instructions are handled differently because of
22365 the introduction of VPT blocks.
22366 Specifications say that any non-MVE instruction inside a VPT block is
22367 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
22368 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
22369 few exceptions we have MVE_UNPREDICABLE_INSN.
22370 The error messages provided depending on the different combinations possible
22371 are described in the cases below:
22372 For 'most' MVE instructions:
22373 1) In an IT block, with an IT code: syntax error
22374 2) In an IT block, with a VPT code: error: must be in a VPT block
22375 3) In an IT block, with no code: warning: UNPREDICTABLE
22376 4) In a VPT block, with an IT code: syntax error
22377 5) In a VPT block, with a VPT code: OK!
22378 6) In a VPT block, with no code: error: missing code
22379 7) Outside a pred block, with an IT code: error: syntax error
22380 8) Outside a pred block, with a VPT code: error: should be in a VPT block
22381 9) Outside a pred block, with no code: OK!
22382 For non-MVE instructions:
22383 10) In an IT block, with an IT code: OK!
22384 11) In an IT block, with a VPT code: syntax error
22385 12) In an IT block, with no code: error: missing code
22386 13) In a VPT block, with an IT code: error: should be in an IT block
22387 14) In a VPT block, with a VPT code: syntax error
22388 15) In a VPT block, with no code: UNPREDICTABLE
22389 16) Outside a pred block, with an IT code: error: should be in an IT block
22390 17) Outside a pred block, with a VPT code: syntax error
22391 18) Outside a pred block, with no code: OK!
22392 */
22393
22394
22395 static int
22396 handle_pred_state (void)
22397 {
22398 now_pred.state_handled = 1;
22399 now_pred.insn_cond = FALSE;
22400
22401 switch (now_pred.state)
22402 {
22403 case OUTSIDE_PRED_BLOCK:
22404 switch (inst.pred_insn_type)
22405 {
22406 case MVE_UNPREDICABLE_INSN:
22407 case MVE_OUTSIDE_PRED_INSN:
22408 if (inst.cond < COND_ALWAYS)
22409 {
22410 /* Case 7: Outside a pred block, with an IT code: error: syntax
22411 error. */
22412 inst.error = BAD_SYNTAX;
22413 return FAIL;
22414 }
22415 /* Case 9: Outside a pred block, with no code: OK! */
22416 break;
22417 case OUTSIDE_PRED_INSN:
22418 if (inst.cond > COND_ALWAYS)
22419 {
22420 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22421 */
22422 inst.error = BAD_SYNTAX;
22423 return FAIL;
22424 }
22425 /* Case 18: Outside a pred block, with no code: OK! */
22426 break;
22427
22428 case INSIDE_VPT_INSN:
22429 /* Case 8: Outside a pred block, with a VPT code: error: should be in
22430 a VPT block. */
22431 inst.error = BAD_OUT_VPT;
22432 return FAIL;
22433
22434 case INSIDE_IT_INSN:
22435 case INSIDE_IT_LAST_INSN:
22436 if (inst.cond < COND_ALWAYS)
22437 {
22438 /* Case 16: Outside a pred block, with an IT code: error: should
22439 be in an IT block. */
22440 if (thumb_mode == 0)
22441 {
22442 if (unified_syntax
22443 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22444 as_tsktsk (_("Warning: conditional outside an IT block"\
22445 " for Thumb."));
22446 }
22447 else
22448 {
22449 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22450 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22451 {
22452 /* Automatically generate the IT instruction. */
22453 new_automatic_it_block (inst.cond);
22454 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22455 close_automatic_it_block ();
22456 }
22457 else
22458 {
22459 inst.error = BAD_OUT_IT;
22460 return FAIL;
22461 }
22462 }
22463 break;
22464 }
22465 else if (inst.cond > COND_ALWAYS)
22466 {
22467 /* Case 17: Outside a pred block, with a VPT code: syntax error.
22468 */
22469 inst.error = BAD_SYNTAX;
22470 return FAIL;
22471 }
22472 else
22473 gas_assert (0);
22474 case IF_INSIDE_IT_LAST_INSN:
22475 case NEUTRAL_IT_INSN:
22476 break;
22477
22478 case VPT_INSN:
22479 if (inst.cond != COND_ALWAYS)
22480 first_error (BAD_SYNTAX);
22481 now_pred.state = MANUAL_PRED_BLOCK;
22482 now_pred.block_length = 0;
22483 now_pred.type = VECTOR_PRED;
22484 now_pred.cc = 0;
22485 break;
22486 case IT_INSN:
22487 now_pred.state = MANUAL_PRED_BLOCK;
22488 now_pred.block_length = 0;
22489 now_pred.type = SCALAR_PRED;
22490 break;
22491 }
22492 break;
22493
22494 case AUTOMATIC_PRED_BLOCK:
22495 /* Three things may happen now:
22496 a) We should increment current it block size;
22497 b) We should close current it block (closing insn or 4 insns);
22498 c) We should close current it block and start a new one (due
22499 to incompatible conditions or
22500 4 insns-length block reached). */
22501
22502 switch (inst.pred_insn_type)
22503 {
22504 case INSIDE_VPT_INSN:
22505 case VPT_INSN:
22506 case MVE_UNPREDICABLE_INSN:
22507 case MVE_OUTSIDE_PRED_INSN:
22508 gas_assert (0);
22509 case OUTSIDE_PRED_INSN:
22510 /* The closure of the block shall happen immediately,
22511 so any in_pred_block () call reports the block as closed. */
22512 force_automatic_it_block_close ();
22513 break;
22514
22515 case INSIDE_IT_INSN:
22516 case INSIDE_IT_LAST_INSN:
22517 case IF_INSIDE_IT_LAST_INSN:
22518 now_pred.block_length++;
22519
22520 if (now_pred.block_length > 4
22521 || !now_pred_compatible (inst.cond))
22522 {
22523 force_automatic_it_block_close ();
22524 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
22525 new_automatic_it_block (inst.cond);
22526 }
22527 else
22528 {
22529 now_pred.insn_cond = TRUE;
22530 now_pred_add_mask (inst.cond);
22531 }
22532
22533 if (now_pred.state == AUTOMATIC_PRED_BLOCK
22534 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
22535 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
22536 close_automatic_it_block ();
22537 break;
22538
22539 case NEUTRAL_IT_INSN:
22540 now_pred.block_length++;
22541 now_pred.insn_cond = TRUE;
22542
22543 if (now_pred.block_length > 4)
22544 force_automatic_it_block_close ();
22545 else
22546 now_pred_add_mask (now_pred.cc & 1);
22547 break;
22548
22549 case IT_INSN:
22550 close_automatic_it_block ();
22551 now_pred.state = MANUAL_PRED_BLOCK;
22552 break;
22553 }
22554 break;
22555
22556 case MANUAL_PRED_BLOCK:
22557 {
22558 int cond, is_last;
22559 if (now_pred.type == SCALAR_PRED)
22560 {
22561 /* Check conditional suffixes. */
22562 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
22563 now_pred.mask <<= 1;
22564 now_pred.mask &= 0x1f;
22565 is_last = (now_pred.mask == 0x10);
22566 }
22567 else
22568 {
22569 now_pred.cc ^= (now_pred.mask >> 4);
22570 cond = now_pred.cc + 0xf;
22571 now_pred.mask <<= 1;
22572 now_pred.mask &= 0x1f;
22573 is_last = now_pred.mask == 0x10;
22574 }
22575 now_pred.insn_cond = TRUE;
22576
22577 switch (inst.pred_insn_type)
22578 {
22579 case OUTSIDE_PRED_INSN:
22580 if (now_pred.type == SCALAR_PRED)
22581 {
22582 if (inst.cond == COND_ALWAYS)
22583 {
22584 /* Case 12: In an IT block, with no code: error: missing
22585 code. */
22586 inst.error = BAD_NOT_IT;
22587 return FAIL;
22588 }
22589 else if (inst.cond > COND_ALWAYS)
22590 {
22591 /* Case 11: In an IT block, with a VPT code: syntax error.
22592 */
22593 inst.error = BAD_SYNTAX;
22594 return FAIL;
22595 }
22596 else if (thumb_mode)
22597 {
22598 /* This is for some special cases where a non-MVE
22599 instruction is not allowed in an IT block, such as cbz,
22600 but are put into one with a condition code.
22601 You could argue this should be a syntax error, but we
22602 gave the 'not allowed in IT block' diagnostic in the
22603 past so we will keep doing so. */
22604 inst.error = BAD_NOT_IT;
22605 return FAIL;
22606 }
22607 break;
22608 }
22609 else
22610 {
22611 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
22612 as_tsktsk (MVE_NOT_VPT);
22613 return SUCCESS;
22614 }
22615 case MVE_OUTSIDE_PRED_INSN:
22616 if (now_pred.type == SCALAR_PRED)
22617 {
22618 if (inst.cond == COND_ALWAYS)
22619 {
22620 /* Case 3: In an IT block, with no code: warning:
22621 UNPREDICTABLE. */
22622 as_tsktsk (MVE_NOT_IT);
22623 return SUCCESS;
22624 }
22625 else if (inst.cond < COND_ALWAYS)
22626 {
22627 /* Case 1: In an IT block, with an IT code: syntax error.
22628 */
22629 inst.error = BAD_SYNTAX;
22630 return FAIL;
22631 }
22632 else
22633 gas_assert (0);
22634 }
22635 else
22636 {
22637 if (inst.cond < COND_ALWAYS)
22638 {
22639 /* Case 4: In a VPT block, with an IT code: syntax error.
22640 */
22641 inst.error = BAD_SYNTAX;
22642 return FAIL;
22643 }
22644 else if (inst.cond == COND_ALWAYS)
22645 {
22646 /* Case 6: In a VPT block, with no code: error: missing
22647 code. */
22648 inst.error = BAD_NOT_VPT;
22649 return FAIL;
22650 }
22651 else
22652 {
22653 gas_assert (0);
22654 }
22655 }
22656 case MVE_UNPREDICABLE_INSN:
22657 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
22658 return SUCCESS;
22659 case INSIDE_IT_INSN:
22660 if (inst.cond > COND_ALWAYS)
22661 {
22662 /* Case 11: In an IT block, with a VPT code: syntax error. */
22663 /* Case 14: In a VPT block, with a VPT code: syntax error. */
22664 inst.error = BAD_SYNTAX;
22665 return FAIL;
22666 }
22667 else if (now_pred.type == SCALAR_PRED)
22668 {
22669 /* Case 10: In an IT block, with an IT code: OK! */
22670 if (cond != inst.cond)
22671 {
22672 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
22673 BAD_VPT_COND;
22674 return FAIL;
22675 }
22676 }
22677 else
22678 {
22679 /* Case 13: In a VPT block, with an IT code: error: should be
22680 in an IT block. */
22681 inst.error = BAD_OUT_IT;
22682 return FAIL;
22683 }
22684 break;
22685
22686 case INSIDE_VPT_INSN:
22687 if (now_pred.type == SCALAR_PRED)
22688 {
22689 /* Case 2: In an IT block, with a VPT code: error: must be in a
22690 VPT block. */
22691 inst.error = BAD_OUT_VPT;
22692 return FAIL;
22693 }
22694 /* Case 5: In a VPT block, with a VPT code: OK! */
22695 else if (cond != inst.cond)
22696 {
22697 inst.error = BAD_VPT_COND;
22698 return FAIL;
22699 }
22700 break;
22701 case INSIDE_IT_LAST_INSN:
22702 case IF_INSIDE_IT_LAST_INSN:
22703 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
22704 {
22705 /* Case 4: In a VPT block, with an IT code: syntax error. */
22706 /* Case 11: In an IT block, with a VPT code: syntax error. */
22707 inst.error = BAD_SYNTAX;
22708 return FAIL;
22709 }
22710 else if (cond != inst.cond)
22711 {
22712 inst.error = BAD_IT_COND;
22713 return FAIL;
22714 }
22715 if (!is_last)
22716 {
22717 inst.error = BAD_BRANCH;
22718 return FAIL;
22719 }
22720 break;
22721
22722 case NEUTRAL_IT_INSN:
22723 /* The BKPT instruction is unconditional even in a IT or VPT
22724 block. */
22725 break;
22726
22727 case IT_INSN:
22728 if (now_pred.type == SCALAR_PRED)
22729 {
22730 inst.error = BAD_IT_IT;
22731 return FAIL;
22732 }
22733 /* fall through. */
22734 case VPT_INSN:
22735 if (inst.cond == COND_ALWAYS)
22736 {
22737 /* Executing a VPT/VPST instruction inside an IT block or a
22738 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
22739 */
22740 if (now_pred.type == SCALAR_PRED)
22741 as_tsktsk (MVE_NOT_IT);
22742 else
22743 as_tsktsk (MVE_NOT_VPT);
22744 return SUCCESS;
22745 }
22746 else
22747 {
22748 /* VPT/VPST do not accept condition codes. */
22749 inst.error = BAD_SYNTAX;
22750 return FAIL;
22751 }
22752 }
22753 }
22754 break;
22755 }
22756
22757 return SUCCESS;
22758 }
22759
22760 struct depr_insn_mask
22761 {
22762 unsigned long pattern;
22763 unsigned long mask;
22764 const char* description;
22765 };
22766
22767 /* List of 16-bit instruction patterns deprecated in an IT block in
22768 ARMv8. */
22769 static const struct depr_insn_mask depr_it_insns[] = {
22770 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
22771 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
22772 { 0xa000, 0xb800, N_("ADR") },
22773 { 0x4800, 0xf800, N_("Literal loads") },
22774 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
22775 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
22776 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
22777 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
22778 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
22779 { 0, 0, NULL }
22780 };
22781
22782 static void
22783 it_fsm_post_encode (void)
22784 {
22785 int is_last;
22786
22787 if (!now_pred.state_handled)
22788 handle_pred_state ();
22789
22790 if (now_pred.insn_cond
22791 && !now_pred.warn_deprecated
22792 && warn_on_deprecated
22793 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
22794 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
22795 {
22796 if (inst.instruction >= 0x10000)
22797 {
22798 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
22799 "performance deprecated in ARMv8-A and ARMv8-R"));
22800 now_pred.warn_deprecated = TRUE;
22801 }
22802 else
22803 {
22804 const struct depr_insn_mask *p = depr_it_insns;
22805
22806 while (p->mask != 0)
22807 {
22808 if ((inst.instruction & p->mask) == p->pattern)
22809 {
22810 as_tsktsk (_("IT blocks containing 16-bit Thumb "
22811 "instructions of the following class are "
22812 "performance deprecated in ARMv8-A and "
22813 "ARMv8-R: %s"), p->description);
22814 now_pred.warn_deprecated = TRUE;
22815 break;
22816 }
22817
22818 ++p;
22819 }
22820 }
22821
22822 if (now_pred.block_length > 1)
22823 {
22824 as_tsktsk (_("IT blocks containing more than one conditional "
22825 "instruction are performance deprecated in ARMv8-A and "
22826 "ARMv8-R"));
22827 now_pred.warn_deprecated = TRUE;
22828 }
22829 }
22830
22831 is_last = (now_pred.mask == 0x10);
22832 if (is_last)
22833 {
22834 now_pred.state = OUTSIDE_PRED_BLOCK;
22835 now_pred.mask = 0;
22836 }
22837 }
22838
22839 static void
22840 force_automatic_it_block_close (void)
22841 {
22842 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
22843 {
22844 close_automatic_it_block ();
22845 now_pred.state = OUTSIDE_PRED_BLOCK;
22846 now_pred.mask = 0;
22847 }
22848 }
22849
22850 static int
22851 in_pred_block (void)
22852 {
22853 if (!now_pred.state_handled)
22854 handle_pred_state ();
22855
22856 return now_pred.state != OUTSIDE_PRED_BLOCK;
22857 }
22858
22859 /* Whether OPCODE only has T32 encoding. Since this function is only used by
22860 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
22861 here, hence the "known" in the function name. */
22862
22863 static bfd_boolean
22864 known_t32_only_insn (const struct asm_opcode *opcode)
22865 {
22866 /* Original Thumb-1 wide instruction. */
22867 if (opcode->tencode == do_t_blx
22868 || opcode->tencode == do_t_branch23
22869 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
22870 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
22871 return TRUE;
22872
22873 /* Wide-only instruction added to ARMv8-M Baseline. */
22874 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
22875 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
22876 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
22877 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
22878 return TRUE;
22879
22880 return FALSE;
22881 }
22882
22883 /* Whether wide instruction variant can be used if available for a valid OPCODE
22884 in ARCH. */
22885
22886 static bfd_boolean
22887 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
22888 {
22889 if (known_t32_only_insn (opcode))
22890 return TRUE;
22891
22892 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
22893 of variant T3 of B.W is checked in do_t_branch. */
22894 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22895 && opcode->tencode == do_t_branch)
22896 return TRUE;
22897
22898 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
22899 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
22900 && opcode->tencode == do_t_mov_cmp
22901 /* Make sure CMP instruction is not affected. */
22902 && opcode->aencode == do_mov)
22903 return TRUE;
22904
22905 /* Wide instruction variants of all instructions with narrow *and* wide
22906 variants become available with ARMv6t2. Other opcodes are either
22907 narrow-only or wide-only and are thus available if OPCODE is valid. */
22908 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
22909 return TRUE;
22910
22911 /* OPCODE with narrow only instruction variant or wide variant not
22912 available. */
22913 return FALSE;
22914 }
22915
22916 void
22917 md_assemble (char *str)
22918 {
22919 char *p = str;
22920 const struct asm_opcode * opcode;
22921
22922 /* Align the previous label if needed. */
22923 if (last_label_seen != NULL)
22924 {
22925 symbol_set_frag (last_label_seen, frag_now);
22926 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
22927 S_SET_SEGMENT (last_label_seen, now_seg);
22928 }
22929
22930 memset (&inst, '\0', sizeof (inst));
22931 int r;
22932 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22933 inst.relocs[r].type = BFD_RELOC_UNUSED;
22934
22935 opcode = opcode_lookup (&p);
22936 if (!opcode)
22937 {
22938 /* It wasn't an instruction, but it might be a register alias of
22939 the form alias .req reg, or a Neon .dn/.qn directive. */
22940 if (! create_register_alias (str, p)
22941 && ! create_neon_reg_alias (str, p))
22942 as_bad (_("bad instruction `%s'"), str);
22943
22944 return;
22945 }
22946
22947 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
22948 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
22949
22950 /* The value which unconditional instructions should have in place of the
22951 condition field. */
22952 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
22953
22954 if (thumb_mode)
22955 {
22956 arm_feature_set variant;
22957
22958 variant = cpu_variant;
22959 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
22960 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
22961 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
22962 /* Check that this instruction is supported for this CPU. */
22963 if (!opcode->tvariant
22964 || (thumb_mode == 1
22965 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
22966 {
22967 if (opcode->tencode == do_t_swi)
22968 as_bad (_("SVC is not permitted on this architecture"));
22969 else
22970 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
22971 return;
22972 }
22973 if (inst.cond != COND_ALWAYS && !unified_syntax
22974 && opcode->tencode != do_t_branch)
22975 {
22976 as_bad (_("Thumb does not support conditional execution"));
22977 return;
22978 }
22979
22980 /* Two things are addressed here:
22981 1) Implicit require narrow instructions on Thumb-1.
22982 This avoids relaxation accidentally introducing Thumb-2
22983 instructions.
22984 2) Reject wide instructions in non Thumb-2 cores.
22985
22986 Only instructions with narrow and wide variants need to be handled
22987 but selecting all non wide-only instructions is easier. */
22988 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
22989 && !t32_insn_ok (variant, opcode))
22990 {
22991 if (inst.size_req == 0)
22992 inst.size_req = 2;
22993 else if (inst.size_req == 4)
22994 {
22995 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
22996 as_bad (_("selected processor does not support 32bit wide "
22997 "variant of instruction `%s'"), str);
22998 else
22999 as_bad (_("selected processor does not support `%s' in "
23000 "Thumb-2 mode"), str);
23001 return;
23002 }
23003 }
23004
23005 inst.instruction = opcode->tvalue;
23006
23007 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
23008 {
23009 /* Prepare the pred_insn_type for those encodings that don't set
23010 it. */
23011 it_fsm_pre_encode ();
23012
23013 opcode->tencode ();
23014
23015 it_fsm_post_encode ();
23016 }
23017
23018 if (!(inst.error || inst.relax))
23019 {
23020 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23021 inst.size = (inst.instruction > 0xffff ? 4 : 2);
23022 if (inst.size_req && inst.size_req != inst.size)
23023 {
23024 as_bad (_("cannot honor width suffix -- `%s'"), str);
23025 return;
23026 }
23027 }
23028
23029 /* Something has gone badly wrong if we try to relax a fixed size
23030 instruction. */
23031 gas_assert (inst.size_req == 0 || !inst.relax);
23032
23033 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23034 *opcode->tvariant);
23035 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23036 set those bits when Thumb-2 32-bit instructions are seen. The impact
23037 of relaxable instructions will be considered later after we finish all
23038 relaxation. */
23039 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23040 variant = arm_arch_none;
23041 else
23042 variant = cpu_variant;
23043 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23044 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23045 arm_ext_v6t2);
23046
23047 check_neon_suffixes;
23048
23049 if (!inst.error)
23050 {
23051 mapping_state (MAP_THUMB);
23052 }
23053 }
23054 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23055 {
23056 bfd_boolean is_bx;
23057
23058 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
23059 is_bx = (opcode->aencode == do_bx);
23060
23061 /* Check that this instruction is supported for this CPU. */
23062 if (!(is_bx && fix_v4bx)
23063 && !(opcode->avariant &&
23064 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23065 {
23066 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23067 return;
23068 }
23069 if (inst.size_req)
23070 {
23071 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23072 return;
23073 }
23074
23075 inst.instruction = opcode->avalue;
23076 if (opcode->tag == OT_unconditionalF)
23077 inst.instruction |= 0xFU << 28;
23078 else
23079 inst.instruction |= inst.cond << 28;
23080 inst.size = INSN_SIZE;
23081 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
23082 {
23083 it_fsm_pre_encode ();
23084 opcode->aencode ();
23085 it_fsm_post_encode ();
23086 }
23087 /* Arm mode bx is marked as both v4T and v5 because it's still required
23088 on a hypothetical non-thumb v5 core. */
23089 if (is_bx)
23090 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23091 else
23092 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23093 *opcode->avariant);
23094
23095 check_neon_suffixes;
23096
23097 if (!inst.error)
23098 {
23099 mapping_state (MAP_ARM);
23100 }
23101 }
23102 else
23103 {
23104 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23105 "-- `%s'"), str);
23106 return;
23107 }
23108 output_inst (str);
23109 }
23110
23111 static void
23112 check_pred_blocks_finished (void)
23113 {
23114 #ifdef OBJ_ELF
23115 asection *sect;
23116
23117 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23118 if (seg_info (sect)->tc_segment_info_data.current_pred.state
23119 == MANUAL_PRED_BLOCK)
23120 {
23121 if (now_pred.type == SCALAR_PRED)
23122 as_warn (_("section '%s' finished with an open IT block."),
23123 sect->name);
23124 else
23125 as_warn (_("section '%s' finished with an open VPT/VPST block."),
23126 sect->name);
23127 }
23128 #else
23129 if (now_pred.state == MANUAL_PRED_BLOCK)
23130 {
23131 if (now_pred.type == SCALAR_PRED)
23132 as_warn (_("file finished with an open IT block."));
23133 else
23134 as_warn (_("file finished with an open VPT/VPST block."));
23135 }
23136 #endif
23137 }
23138
23139 /* Various frobbings of labels and their addresses. */
23140
23141 void
23142 arm_start_line_hook (void)
23143 {
23144 last_label_seen = NULL;
23145 }
23146
23147 void
23148 arm_frob_label (symbolS * sym)
23149 {
23150 last_label_seen = sym;
23151
23152 ARM_SET_THUMB (sym, thumb_mode);
23153
23154 #if defined OBJ_COFF || defined OBJ_ELF
23155 ARM_SET_INTERWORK (sym, support_interwork);
23156 #endif
23157
23158 force_automatic_it_block_close ();
23159
23160 /* Note - do not allow local symbols (.Lxxx) to be labelled
23161 as Thumb functions. This is because these labels, whilst
23162 they exist inside Thumb code, are not the entry points for
23163 possible ARM->Thumb calls. Also, these labels can be used
23164 as part of a computed goto or switch statement. eg gcc
23165 can generate code that looks like this:
23166
23167 ldr r2, [pc, .Laaa]
23168 lsl r3, r3, #2
23169 ldr r2, [r3, r2]
23170 mov pc, r2
23171
23172 .Lbbb: .word .Lxxx
23173 .Lccc: .word .Lyyy
23174 ..etc...
23175 .Laaa: .word Lbbb
23176
23177 The first instruction loads the address of the jump table.
23178 The second instruction converts a table index into a byte offset.
23179 The third instruction gets the jump address out of the table.
23180 The fourth instruction performs the jump.
23181
23182 If the address stored at .Laaa is that of a symbol which has the
23183 Thumb_Func bit set, then the linker will arrange for this address
23184 to have the bottom bit set, which in turn would mean that the
23185 address computation performed by the third instruction would end
23186 up with the bottom bit set. Since the ARM is capable of unaligned
23187 word loads, the instruction would then load the incorrect address
23188 out of the jump table, and chaos would ensue. */
23189 if (label_is_thumb_function_name
23190 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23191 && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23192 {
23193 /* When the address of a Thumb function is taken the bottom
23194 bit of that address should be set. This will allow
23195 interworking between Arm and Thumb functions to work
23196 correctly. */
23197
23198 THUMB_SET_FUNC (sym, 1);
23199
23200 label_is_thumb_function_name = FALSE;
23201 }
23202
23203 dwarf2_emit_label (sym);
23204 }
23205
23206 bfd_boolean
23207 arm_data_in_code (void)
23208 {
23209 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
23210 {
23211 *input_line_pointer = '/';
23212 input_line_pointer += 5;
23213 *input_line_pointer = 0;
23214 return TRUE;
23215 }
23216
23217 return FALSE;
23218 }
23219
23220 char *
23221 arm_canonicalize_symbol_name (char * name)
23222 {
23223 int len;
23224
23225 if (thumb_mode && (len = strlen (name)) > 5
23226 && streq (name + len - 5, "/data"))
23227 *(name + len - 5) = 0;
23228
23229 return name;
23230 }
23231 \f
23232 /* Table of all register names defined by default. The user can
23233 define additional names with .req. Note that all register names
23234 should appear in both upper and lowercase variants. Some registers
23235 also have mixed-case names. */
23236
23237 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
23238 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
23239 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23240 #define REGSET(p,t) \
23241 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23242 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23243 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23244 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23245 #define REGSETH(p,t) \
23246 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23247 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23248 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23249 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23250 #define REGSET2(p,t) \
23251 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23252 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23253 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23254 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23255 #define SPLRBANK(base,bank,t) \
23256 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23257 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23258 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23259 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23260 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23261 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23262
23263 static const struct reg_entry reg_names[] =
23264 {
23265 /* ARM integer registers. */
23266 REGSET(r, RN), REGSET(R, RN),
23267
23268 /* ATPCS synonyms. */
23269 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23270 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23271 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23272
23273 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23274 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23275 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23276
23277 /* Well-known aliases. */
23278 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23279 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23280
23281 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23282 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23283
23284 /* Defining the new Zero register from ARMv8.1-M. */
23285 REGDEF(zr,15,ZR),
23286 REGDEF(ZR,15,ZR),
23287
23288 /* Coprocessor numbers. */
23289 REGSET(p, CP), REGSET(P, CP),
23290
23291 /* Coprocessor register numbers. The "cr" variants are for backward
23292 compatibility. */
23293 REGSET(c, CN), REGSET(C, CN),
23294 REGSET(cr, CN), REGSET(CR, CN),
23295
23296 /* ARM banked registers. */
23297 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23298 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23299 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23300 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23301 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23302 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23303 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23304
23305 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23306 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23307 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23308 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23309 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23310 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23311 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23312 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23313
23314 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23315 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23316 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23317 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23318 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23319 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23320 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23321 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23322 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23323
23324 /* FPA registers. */
23325 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23326 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23327
23328 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23329 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23330
23331 /* VFP SP registers. */
23332 REGSET(s,VFS), REGSET(S,VFS),
23333 REGSETH(s,VFS), REGSETH(S,VFS),
23334
23335 /* VFP DP Registers. */
23336 REGSET(d,VFD), REGSET(D,VFD),
23337 /* Extra Neon DP registers. */
23338 REGSETH(d,VFD), REGSETH(D,VFD),
23339
23340 /* Neon QP registers. */
23341 REGSET2(q,NQ), REGSET2(Q,NQ),
23342
23343 /* VFP control registers. */
23344 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23345 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23346 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23347 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23348 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23349 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23350 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23351 REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23352 REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23353 REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23354 REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23355
23356 /* Maverick DSP coprocessor registers. */
23357 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
23358 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
23359
23360 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23361 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23362 REGDEF(dspsc,0,DSPSC),
23363
23364 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23365 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23366 REGDEF(DSPSC,0,DSPSC),
23367
23368 /* iWMMXt data registers - p0, c0-15. */
23369 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23370
23371 /* iWMMXt control registers - p1, c0-3. */
23372 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
23373 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
23374 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
23375 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
23376
23377 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
23378 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
23379 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
23380 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
23381 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
23382
23383 /* XScale accumulator registers. */
23384 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23385 };
23386 #undef REGDEF
23387 #undef REGNUM
23388 #undef REGSET
23389
23390 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
23391 within psr_required_here. */
23392 static const struct asm_psr psrs[] =
23393 {
23394 /* Backward compatibility notation. Note that "all" is no longer
23395 truly all possible PSR bits. */
23396 {"all", PSR_c | PSR_f},
23397 {"flg", PSR_f},
23398 {"ctl", PSR_c},
23399
23400 /* Individual flags. */
23401 {"f", PSR_f},
23402 {"c", PSR_c},
23403 {"x", PSR_x},
23404 {"s", PSR_s},
23405
23406 /* Combinations of flags. */
23407 {"fs", PSR_f | PSR_s},
23408 {"fx", PSR_f | PSR_x},
23409 {"fc", PSR_f | PSR_c},
23410 {"sf", PSR_s | PSR_f},
23411 {"sx", PSR_s | PSR_x},
23412 {"sc", PSR_s | PSR_c},
23413 {"xf", PSR_x | PSR_f},
23414 {"xs", PSR_x | PSR_s},
23415 {"xc", PSR_x | PSR_c},
23416 {"cf", PSR_c | PSR_f},
23417 {"cs", PSR_c | PSR_s},
23418 {"cx", PSR_c | PSR_x},
23419 {"fsx", PSR_f | PSR_s | PSR_x},
23420 {"fsc", PSR_f | PSR_s | PSR_c},
23421 {"fxs", PSR_f | PSR_x | PSR_s},
23422 {"fxc", PSR_f | PSR_x | PSR_c},
23423 {"fcs", PSR_f | PSR_c | PSR_s},
23424 {"fcx", PSR_f | PSR_c | PSR_x},
23425 {"sfx", PSR_s | PSR_f | PSR_x},
23426 {"sfc", PSR_s | PSR_f | PSR_c},
23427 {"sxf", PSR_s | PSR_x | PSR_f},
23428 {"sxc", PSR_s | PSR_x | PSR_c},
23429 {"scf", PSR_s | PSR_c | PSR_f},
23430 {"scx", PSR_s | PSR_c | PSR_x},
23431 {"xfs", PSR_x | PSR_f | PSR_s},
23432 {"xfc", PSR_x | PSR_f | PSR_c},
23433 {"xsf", PSR_x | PSR_s | PSR_f},
23434 {"xsc", PSR_x | PSR_s | PSR_c},
23435 {"xcf", PSR_x | PSR_c | PSR_f},
23436 {"xcs", PSR_x | PSR_c | PSR_s},
23437 {"cfs", PSR_c | PSR_f | PSR_s},
23438 {"cfx", PSR_c | PSR_f | PSR_x},
23439 {"csf", PSR_c | PSR_s | PSR_f},
23440 {"csx", PSR_c | PSR_s | PSR_x},
23441 {"cxf", PSR_c | PSR_x | PSR_f},
23442 {"cxs", PSR_c | PSR_x | PSR_s},
23443 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23444 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23445 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23446 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23447 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23448 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23449 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23450 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23451 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23452 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23453 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23454 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23455 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23456 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23457 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23458 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23459 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23460 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23461 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23462 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23463 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23464 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23465 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23466 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23467 };
23468
23469 /* Table of V7M psr names. */
23470 static const struct asm_psr v7m_psrs[] =
23471 {
23472 {"apsr", 0x0 }, {"APSR", 0x0 },
23473 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
23474 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
23475 {"psr", 0x3 }, {"PSR", 0x3 },
23476 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
23477 {"ipsr", 0x5 }, {"IPSR", 0x5 },
23478 {"epsr", 0x6 }, {"EPSR", 0x6 },
23479 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
23480 {"msp", 0x8 }, {"MSP", 0x8 },
23481 {"psp", 0x9 }, {"PSP", 0x9 },
23482 {"msplim", 0xa }, {"MSPLIM", 0xa },
23483 {"psplim", 0xb }, {"PSPLIM", 0xb },
23484 {"primask", 0x10}, {"PRIMASK", 0x10},
23485 {"basepri", 0x11}, {"BASEPRI", 0x11},
23486 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
23487 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
23488 {"control", 0x14}, {"CONTROL", 0x14},
23489 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
23490 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
23491 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
23492 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
23493 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
23494 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
23495 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
23496 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
23497 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
23498 };
23499
23500 /* Table of all shift-in-operand names. */
23501 static const struct asm_shift_name shift_names [] =
23502 {
23503 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
23504 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
23505 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
23506 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
23507 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
23508 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
23509 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
23510 };
23511
23512 /* Table of all explicit relocation names. */
23513 #ifdef OBJ_ELF
23514 static struct reloc_entry reloc_names[] =
23515 {
23516 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
23517 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
23518 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
23519 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
23520 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
23521 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
23522 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
23523 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
23524 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
23525 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
23526 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
23527 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
23528 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
23529 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
23530 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
23531 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
23532 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
23533 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
23534 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
23535 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
23536 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23537 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
23538 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
23539 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
23540 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
23541 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
23542 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
23543 };
23544 #endif
23545
23546 /* Table of all conditional affixes. */
23547 static const struct asm_cond conds[] =
23548 {
23549 {"eq", 0x0},
23550 {"ne", 0x1},
23551 {"cs", 0x2}, {"hs", 0x2},
23552 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
23553 {"mi", 0x4},
23554 {"pl", 0x5},
23555 {"vs", 0x6},
23556 {"vc", 0x7},
23557 {"hi", 0x8},
23558 {"ls", 0x9},
23559 {"ge", 0xa},
23560 {"lt", 0xb},
23561 {"gt", 0xc},
23562 {"le", 0xd},
23563 {"al", 0xe}
23564 };
23565 static const struct asm_cond vconds[] =
23566 {
23567 {"t", 0xf},
23568 {"e", 0x10}
23569 };
23570
23571 #define UL_BARRIER(L,U,CODE,FEAT) \
23572 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
23573 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
23574
23575 static struct asm_barrier_opt barrier_opt_names[] =
23576 {
23577 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
23578 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
23579 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
23580 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
23581 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
23582 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
23583 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
23584 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
23585 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
23586 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
23587 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
23588 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
23589 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
23590 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
23591 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
23592 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
23593 };
23594
23595 #undef UL_BARRIER
23596
23597 /* Table of ARM-format instructions. */
23598
23599 /* Macros for gluing together operand strings. N.B. In all cases
23600 other than OPS0, the trailing OP_stop comes from default
23601 zero-initialization of the unspecified elements of the array. */
23602 #define OPS0() { OP_stop, }
23603 #define OPS1(a) { OP_##a, }
23604 #define OPS2(a,b) { OP_##a,OP_##b, }
23605 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
23606 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
23607 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
23608 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
23609
23610 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
23611 This is useful when mixing operands for ARM and THUMB, i.e. using the
23612 MIX_ARM_THUMB_OPERANDS macro.
23613 In order to use these macros, prefix the number of operands with _
23614 e.g. _3. */
23615 #define OPS_1(a) { a, }
23616 #define OPS_2(a,b) { a,b, }
23617 #define OPS_3(a,b,c) { a,b,c, }
23618 #define OPS_4(a,b,c,d) { a,b,c,d, }
23619 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
23620 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
23621
23622 /* These macros abstract out the exact format of the mnemonic table and
23623 save some repeated characters. */
23624
23625 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
23626 #define TxCE(mnem, op, top, nops, ops, ae, te) \
23627 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
23628 THUMB_VARIANT, do_##ae, do_##te, 0 }
23629
23630 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
23631 a T_MNEM_xyz enumerator. */
23632 #define TCE(mnem, aop, top, nops, ops, ae, te) \
23633 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
23634 #define tCE(mnem, aop, top, nops, ops, ae, te) \
23635 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23636
23637 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
23638 infix after the third character. */
23639 #define TxC3(mnem, op, top, nops, ops, ae, te) \
23640 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
23641 THUMB_VARIANT, do_##ae, do_##te, 0 }
23642 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
23643 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
23644 THUMB_VARIANT, do_##ae, do_##te, 0 }
23645 #define TC3(mnem, aop, top, nops, ops, ae, te) \
23646 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
23647 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
23648 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
23649 #define tC3(mnem, aop, top, nops, ops, ae, te) \
23650 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23651 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
23652 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
23653
23654 /* Mnemonic that cannot be conditionalized. The ARM condition-code
23655 field is still 0xE. Many of the Thumb variants can be executed
23656 conditionally, so this is checked separately. */
23657 #define TUE(mnem, op, top, nops, ops, ae, te) \
23658 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23659 THUMB_VARIANT, do_##ae, do_##te, 0 }
23660
23661 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
23662 Used by mnemonics that have very minimal differences in the encoding for
23663 ARM and Thumb variants and can be handled in a common function. */
23664 #define TUEc(mnem, op, top, nops, ops, en) \
23665 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
23666 THUMB_VARIANT, do_##en, do_##en, 0 }
23667
23668 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
23669 condition code field. */
23670 #define TUF(mnem, op, top, nops, ops, ae, te) \
23671 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
23672 THUMB_VARIANT, do_##ae, do_##te, 0 }
23673
23674 /* ARM-only variants of all the above. */
23675 #define CE(mnem, op, nops, ops, ae) \
23676 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23677
23678 #define C3(mnem, op, nops, ops, ae) \
23679 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23680
23681 /* Thumb-only variants of TCE and TUE. */
23682 #define ToC(mnem, top, nops, ops, te) \
23683 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23684 do_##te, 0 }
23685
23686 #define ToU(mnem, top, nops, ops, te) \
23687 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
23688 NULL, do_##te, 0 }
23689
23690 /* T_MNEM_xyz enumerator variants of ToC. */
23691 #define toC(mnem, top, nops, ops, te) \
23692 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
23693 do_##te, 0 }
23694
23695 /* T_MNEM_xyz enumerator variants of ToU. */
23696 #define toU(mnem, top, nops, ops, te) \
23697 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
23698 NULL, do_##te, 0 }
23699
23700 /* Legacy mnemonics that always have conditional infix after the third
23701 character. */
23702 #define CL(mnem, op, nops, ops, ae) \
23703 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23704 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23705
23706 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
23707 #define cCE(mnem, op, nops, ops, ae) \
23708 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23709
23710 /* mov instructions that are shared between coprocessor and MVE. */
23711 #define mcCE(mnem, op, nops, ops, ae) \
23712 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
23713
23714 /* Legacy coprocessor instructions where conditional infix and conditional
23715 suffix are ambiguous. For consistency this includes all FPA instructions,
23716 not just the potentially ambiguous ones. */
23717 #define cCL(mnem, op, nops, ops, ae) \
23718 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
23719 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23720
23721 /* Coprocessor, takes either a suffix or a position-3 infix
23722 (for an FPA corner case). */
23723 #define C3E(mnem, op, nops, ops, ae) \
23724 { mnem, OPS##nops ops, OT_csuf_or_in3, \
23725 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
23726
23727 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
23728 { m1 #m2 m3, OPS##nops ops, \
23729 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
23730 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23731
23732 #define CM(m1, m2, op, nops, ops, ae) \
23733 xCM_ (m1, , m2, op, nops, ops, ae), \
23734 xCM_ (m1, eq, m2, op, nops, ops, ae), \
23735 xCM_ (m1, ne, m2, op, nops, ops, ae), \
23736 xCM_ (m1, cs, m2, op, nops, ops, ae), \
23737 xCM_ (m1, hs, m2, op, nops, ops, ae), \
23738 xCM_ (m1, cc, m2, op, nops, ops, ae), \
23739 xCM_ (m1, ul, m2, op, nops, ops, ae), \
23740 xCM_ (m1, lo, m2, op, nops, ops, ae), \
23741 xCM_ (m1, mi, m2, op, nops, ops, ae), \
23742 xCM_ (m1, pl, m2, op, nops, ops, ae), \
23743 xCM_ (m1, vs, m2, op, nops, ops, ae), \
23744 xCM_ (m1, vc, m2, op, nops, ops, ae), \
23745 xCM_ (m1, hi, m2, op, nops, ops, ae), \
23746 xCM_ (m1, ls, m2, op, nops, ops, ae), \
23747 xCM_ (m1, ge, m2, op, nops, ops, ae), \
23748 xCM_ (m1, lt, m2, op, nops, ops, ae), \
23749 xCM_ (m1, gt, m2, op, nops, ops, ae), \
23750 xCM_ (m1, le, m2, op, nops, ops, ae), \
23751 xCM_ (m1, al, m2, op, nops, ops, ae)
23752
23753 #define UE(mnem, op, nops, ops, ae) \
23754 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23755
23756 #define UF(mnem, op, nops, ops, ae) \
23757 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
23758
23759 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
23760 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
23761 use the same encoding function for each. */
23762 #define NUF(mnem, op, nops, ops, enc) \
23763 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
23764 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23765
23766 /* Neon data processing, version which indirects through neon_enc_tab for
23767 the various overloaded versions of opcodes. */
23768 #define nUF(mnem, op, nops, ops, enc) \
23769 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
23770 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
23771
23772 /* Neon insn with conditional suffix for the ARM version, non-overloaded
23773 version. */
23774 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
23775 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
23776 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23777
23778 #define NCE(mnem, op, nops, ops, enc) \
23779 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23780
23781 #define NCEF(mnem, op, nops, ops, enc) \
23782 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23783
23784 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
23785 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
23786 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
23787 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
23788
23789 #define nCE(mnem, op, nops, ops, enc) \
23790 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
23791
23792 #define nCEF(mnem, op, nops, ops, enc) \
23793 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
23794
23795 /* */
23796 #define mCEF(mnem, op, nops, ops, enc) \
23797 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
23798 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23799
23800
23801 /* nCEF but for MVE predicated instructions. */
23802 #define mnCEF(mnem, op, nops, ops, enc) \
23803 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23804
23805 /* nCE but for MVE predicated instructions. */
23806 #define mnCE(mnem, op, nops, ops, enc) \
23807 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23808
23809 /* NUF but for potentially MVE predicated instructions. */
23810 #define MNUF(mnem, op, nops, ops, enc) \
23811 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
23812 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23813
23814 /* nUF but for potentially MVE predicated instructions. */
23815 #define mnUF(mnem, op, nops, ops, enc) \
23816 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
23817 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
23818
23819 /* ToC but for potentially MVE predicated instructions. */
23820 #define mToC(mnem, top, nops, ops, te) \
23821 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
23822 do_##te, 1 }
23823
23824 /* NCE but for MVE predicated instructions. */
23825 #define MNCE(mnem, op, nops, ops, enc) \
23826 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
23827
23828 /* NCEF but for MVE predicated instructions. */
23829 #define MNCEF(mnem, op, nops, ops, enc) \
23830 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
23831 #define do_0 0
23832
23833 static const struct asm_opcode insns[] =
23834 {
23835 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
23836 #define THUMB_VARIANT & arm_ext_v4t
23837 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
23838 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
23839 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
23840 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
23841 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
23842 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
23843 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
23844 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
23845 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
23846 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
23847 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
23848 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
23849 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
23850 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
23851 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
23852 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
23853
23854 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
23855 for setting PSR flag bits. They are obsolete in V6 and do not
23856 have Thumb equivalents. */
23857 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
23858 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
23859 CL("tstp", 110f000, 2, (RR, SH), cmp),
23860 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
23861 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
23862 CL("cmpp", 150f000, 2, (RR, SH), cmp),
23863 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
23864 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
23865 CL("cmnp", 170f000, 2, (RR, SH), cmp),
23866
23867 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
23868 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
23869 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
23870 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
23871
23872 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
23873 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23874 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
23875 OP_RRnpc),
23876 OP_ADDRGLDR),ldst, t_ldst),
23877 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
23878
23879 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23880 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23881 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23882 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23883 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23884 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23885
23886 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
23887 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
23888
23889 /* Pseudo ops. */
23890 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
23891 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
23892 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
23893 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
23894
23895 /* Thumb-compatibility pseudo ops. */
23896 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
23897 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
23898 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
23899 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
23900 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
23901 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
23902 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
23903 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
23904 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
23905 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
23906 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
23907 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
23908
23909 /* These may simplify to neg. */
23910 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
23911 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
23912
23913 #undef THUMB_VARIANT
23914 #define THUMB_VARIANT & arm_ext_os
23915
23916 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
23917 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
23918
23919 #undef THUMB_VARIANT
23920 #define THUMB_VARIANT & arm_ext_v6
23921
23922 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
23923
23924 /* V1 instructions with no Thumb analogue prior to V6T2. */
23925 #undef THUMB_VARIANT
23926 #define THUMB_VARIANT & arm_ext_v6t2
23927
23928 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
23929 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
23930 CL("teqp", 130f000, 2, (RR, SH), cmp),
23931
23932 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23933 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23934 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
23935 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
23936
23937 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23938 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23939
23940 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23941 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
23942
23943 /* V1 instructions with no Thumb analogue at all. */
23944 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
23945 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
23946
23947 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
23948 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
23949 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
23950 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
23951 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
23952 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
23953 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
23954 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
23955
23956 #undef ARM_VARIANT
23957 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
23958 #undef THUMB_VARIANT
23959 #define THUMB_VARIANT & arm_ext_v4t
23960
23961 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
23962 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
23963
23964 #undef THUMB_VARIANT
23965 #define THUMB_VARIANT & arm_ext_v6t2
23966
23967 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
23968 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
23969
23970 /* Generic coprocessor instructions. */
23971 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
23972 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23973 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23974 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23975 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
23976 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
23977 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
23978
23979 #undef ARM_VARIANT
23980 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
23981
23982 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23983 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
23984
23985 #undef ARM_VARIANT
23986 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
23987 #undef THUMB_VARIANT
23988 #define THUMB_VARIANT & arm_ext_msr
23989
23990 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
23991 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
23992
23993 #undef ARM_VARIANT
23994 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
23995 #undef THUMB_VARIANT
23996 #define THUMB_VARIANT & arm_ext_v6t2
23997
23998 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
23999 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24000 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24001 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24002 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24003 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24004 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24005 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24006
24007 #undef ARM_VARIANT
24008 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
24009 #undef THUMB_VARIANT
24010 #define THUMB_VARIANT & arm_ext_v4t
24011
24012 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24013 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24014 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24015 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24016 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24017 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24018
24019 #undef ARM_VARIANT
24020 #define ARM_VARIANT & arm_ext_v4t_5
24021
24022 /* ARM Architecture 4T. */
24023 /* Note: bx (and blx) are required on V5, even if the processor does
24024 not support Thumb. */
24025 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
24026
24027 #undef ARM_VARIANT
24028 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
24029 #undef THUMB_VARIANT
24030 #define THUMB_VARIANT & arm_ext_v5t
24031
24032 /* Note: blx has 2 variants; the .value coded here is for
24033 BLX(2). Only this variant has conditional execution. */
24034 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
24035 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
24036
24037 #undef THUMB_VARIANT
24038 #define THUMB_VARIANT & arm_ext_v6t2
24039
24040 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
24041 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24042 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24043 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24044 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24045 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
24046 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24047 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24048
24049 #undef ARM_VARIANT
24050 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
24051 #undef THUMB_VARIANT
24052 #define THUMB_VARIANT & arm_ext_v5exp
24053
24054 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24055 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24056 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24057 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24058
24059 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24060 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24061
24062 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24063 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24064 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24065 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24066
24067 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24068 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24069 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24070 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24071
24072 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24073 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24074
24075 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24076 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24077 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24078 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24079
24080 #undef ARM_VARIANT
24081 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
24082 #undef THUMB_VARIANT
24083 #define THUMB_VARIANT & arm_ext_v6t2
24084
24085 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
24086 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24087 ldrd, t_ldstd),
24088 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24089 ADDRGLDRS), ldrd, t_ldstd),
24090
24091 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24092 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24093
24094 #undef ARM_VARIANT
24095 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
24096
24097 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
24098
24099 #undef ARM_VARIANT
24100 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
24101 #undef THUMB_VARIANT
24102 #define THUMB_VARIANT & arm_ext_v6
24103
24104 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
24105 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
24106 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24107 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24108 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24109 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24110 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24111 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24112 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24113 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
24114
24115 #undef THUMB_VARIANT
24116 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24117
24118 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
24119 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24120 strex, t_strex),
24121 #undef THUMB_VARIANT
24122 #define THUMB_VARIANT & arm_ext_v6t2
24123
24124 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24125 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24126
24127 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
24128 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
24129
24130 /* ARM V6 not included in V7M. */
24131 #undef THUMB_VARIANT
24132 #define THUMB_VARIANT & arm_ext_v6_notm
24133 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24134 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24135 UF(rfeib, 9900a00, 1, (RRw), rfe),
24136 UF(rfeda, 8100a00, 1, (RRw), rfe),
24137 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24138 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24139 UF(rfefa, 8100a00, 1, (RRw), rfe),
24140 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24141 UF(rfeed, 9900a00, 1, (RRw), rfe),
24142 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24143 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24144 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24145 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
24146 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
24147 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
24148 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
24149 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24150 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24151 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
24152
24153 /* ARM V6 not included in V7M (eg. integer SIMD). */
24154 #undef THUMB_VARIANT
24155 #define THUMB_VARIANT & arm_ext_v6_dsp
24156 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
24157 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
24158 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24159 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24160 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24161 /* Old name for QASX. */
24162 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24163 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24164 /* Old name for QSAX. */
24165 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24166 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24167 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24168 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24169 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24170 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24171 /* Old name for SASX. */
24172 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24173 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24174 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24175 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24176 /* Old name for SHASX. */
24177 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24178 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24179 /* Old name for SHSAX. */
24180 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24181 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24182 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24183 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24184 /* Old name for SSAX. */
24185 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24186 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24187 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24188 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24189 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24190 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24191 /* Old name for UASX. */
24192 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24193 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24194 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24195 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24196 /* Old name for UHASX. */
24197 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24198 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24199 /* Old name for UHSAX. */
24200 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24201 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24202 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24203 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24204 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24205 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24206 /* Old name for UQASX. */
24207 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24208 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24209 /* Old name for UQSAX. */
24210 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24211 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24212 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24213 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24214 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24215 /* Old name for USAX. */
24216 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24217 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24218 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24219 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24220 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24221 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24222 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24223 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24224 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24225 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24226 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24227 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24228 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24229 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24230 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24231 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24232 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24233 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24234 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24235 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24236 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24237 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24238 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24239 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24240 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24241 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24242 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24243 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24244 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24245 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
24246 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
24247 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24248 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24249 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
24250
24251 #undef ARM_VARIANT
24252 #define ARM_VARIANT & arm_ext_v6k_v6t2
24253 #undef THUMB_VARIANT
24254 #define THUMB_VARIANT & arm_ext_v6k_v6t2
24255
24256 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
24257 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
24258 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
24259 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
24260
24261 #undef THUMB_VARIANT
24262 #define THUMB_VARIANT & arm_ext_v6_notm
24263 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24264 ldrexd, t_ldrexd),
24265 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24266 RRnpcb), strexd, t_strexd),
24267
24268 #undef THUMB_VARIANT
24269 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24270 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24271 rd_rn, rd_rn),
24272 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24273 rd_rn, rd_rn),
24274 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24275 strex, t_strexbh),
24276 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24277 strex, t_strexbh),
24278 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
24279
24280 #undef ARM_VARIANT
24281 #define ARM_VARIANT & arm_ext_sec
24282 #undef THUMB_VARIANT
24283 #define THUMB_VARIANT & arm_ext_sec
24284
24285 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
24286
24287 #undef ARM_VARIANT
24288 #define ARM_VARIANT & arm_ext_virt
24289 #undef THUMB_VARIANT
24290 #define THUMB_VARIANT & arm_ext_virt
24291
24292 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24293 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
24294
24295 #undef ARM_VARIANT
24296 #define ARM_VARIANT & arm_ext_pan
24297 #undef THUMB_VARIANT
24298 #define THUMB_VARIANT & arm_ext_pan
24299
24300 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
24301
24302 #undef ARM_VARIANT
24303 #define ARM_VARIANT & arm_ext_v6t2
24304 #undef THUMB_VARIANT
24305 #define THUMB_VARIANT & arm_ext_v6t2
24306
24307 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
24308 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24309 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24310 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24311
24312 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24313 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
24314
24315 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24316 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24317 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24318 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24319
24320 #undef ARM_VARIANT
24321 #define ARM_VARIANT & arm_ext_v3
24322 #undef THUMB_VARIANT
24323 #define THUMB_VARIANT & arm_ext_v6t2
24324
24325 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
24326 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24327 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24328
24329 #undef ARM_VARIANT
24330 #define ARM_VARIANT & arm_ext_v6t2
24331 #undef THUMB_VARIANT
24332 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24333 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
24334 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
24335
24336 /* Thumb-only instructions. */
24337 #undef ARM_VARIANT
24338 #define ARM_VARIANT NULL
24339 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
24340 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
24341
24342 /* ARM does not really have an IT instruction, so always allow it.
24343 The opcode is copied from Thumb in order to allow warnings in
24344 -mimplicit-it=[never | arm] modes. */
24345 #undef ARM_VARIANT
24346 #define ARM_VARIANT & arm_ext_v1
24347 #undef THUMB_VARIANT
24348 #define THUMB_VARIANT & arm_ext_v6t2
24349
24350 TUE("it", bf08, bf08, 1, (COND), it, t_it),
24351 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
24352 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
24353 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
24354 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
24355 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
24356 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
24357 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
24358 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
24359 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
24360 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
24361 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
24362 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
24363 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
24364 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
24365 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
24366 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24367 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24368
24369 /* Thumb2 only instructions. */
24370 #undef ARM_VARIANT
24371 #define ARM_VARIANT NULL
24372
24373 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24374 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24375 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
24376 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
24377 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
24378 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
24379
24380 /* Hardware division instructions. */
24381 #undef ARM_VARIANT
24382 #define ARM_VARIANT & arm_ext_adiv
24383 #undef THUMB_VARIANT
24384 #define THUMB_VARIANT & arm_ext_div
24385
24386 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24387 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24388
24389 /* ARM V6M/V7 instructions. */
24390 #undef ARM_VARIANT
24391 #define ARM_VARIANT & arm_ext_barrier
24392 #undef THUMB_VARIANT
24393 #define THUMB_VARIANT & arm_ext_barrier
24394
24395 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24396 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24397 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24398
24399 /* ARM V7 instructions. */
24400 #undef ARM_VARIANT
24401 #define ARM_VARIANT & arm_ext_v7
24402 #undef THUMB_VARIANT
24403 #define THUMB_VARIANT & arm_ext_v7
24404
24405 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
24406 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
24407
24408 #undef ARM_VARIANT
24409 #define ARM_VARIANT & arm_ext_mp
24410 #undef THUMB_VARIANT
24411 #define THUMB_VARIANT & arm_ext_mp
24412
24413 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
24414
24415 /* AArchv8 instructions. */
24416 #undef ARM_VARIANT
24417 #define ARM_VARIANT & arm_ext_v8
24418
24419 /* Instructions shared between armv8-a and armv8-m. */
24420 #undef THUMB_VARIANT
24421 #define THUMB_VARIANT & arm_ext_atomics
24422
24423 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24424 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24425 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24426 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24427 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24428 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
24429 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24430 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
24431 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
24432 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24433 stlex, t_stlex),
24434 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24435 stlex, t_stlex),
24436 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24437 stlex, t_stlex),
24438 #undef THUMB_VARIANT
24439 #define THUMB_VARIANT & arm_ext_v8
24440
24441 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
24442 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24443 ldrexd, t_ldrexd),
24444 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24445 strexd, t_strexd),
24446
24447 /* Defined in V8 but is in undefined encoding space for earlier
24448 architectures. However earlier architectures are required to treat
24449 this instuction as a semihosting trap as well. Hence while not explicitly
24450 defined as such, it is in fact correct to define the instruction for all
24451 architectures. */
24452 #undef THUMB_VARIANT
24453 #define THUMB_VARIANT & arm_ext_v1
24454 #undef ARM_VARIANT
24455 #define ARM_VARIANT & arm_ext_v1
24456 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
24457
24458 /* ARMv8 T32 only. */
24459 #undef ARM_VARIANT
24460 #define ARM_VARIANT NULL
24461 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
24462 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
24463 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
24464
24465 /* FP for ARMv8. */
24466 #undef ARM_VARIANT
24467 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
24468 #undef THUMB_VARIANT
24469 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24470
24471 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
24472 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
24473 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
24474 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
24475 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
24476 mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintz),
24477 mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintx),
24478 mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrinta),
24479 mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintn),
24480 mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintp),
24481 mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintm),
24482
24483 /* Crypto v1 extensions. */
24484 #undef ARM_VARIANT
24485 #define ARM_VARIANT & fpu_crypto_ext_armv8
24486 #undef THUMB_VARIANT
24487 #define THUMB_VARIANT & fpu_crypto_ext_armv8
24488
24489 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
24490 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
24491 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
24492 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
24493 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
24494 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
24495 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
24496 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
24497 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
24498 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
24499 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
24500 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
24501 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
24502 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
24503
24504 #undef ARM_VARIANT
24505 #define ARM_VARIANT & crc_ext_armv8
24506 #undef THUMB_VARIANT
24507 #define THUMB_VARIANT & crc_ext_armv8
24508 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
24509 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
24510 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
24511 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
24512 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
24513 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
24514
24515 /* ARMv8.2 RAS extension. */
24516 #undef ARM_VARIANT
24517 #define ARM_VARIANT & arm_ext_ras
24518 #undef THUMB_VARIANT
24519 #define THUMB_VARIANT & arm_ext_ras
24520 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
24521
24522 #undef ARM_VARIANT
24523 #define ARM_VARIANT & arm_ext_v8_3
24524 #undef THUMB_VARIANT
24525 #define THUMB_VARIANT & arm_ext_v8_3
24526 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
24527
24528 #undef ARM_VARIANT
24529 #define ARM_VARIANT & fpu_neon_ext_dotprod
24530 #undef THUMB_VARIANT
24531 #define THUMB_VARIANT & fpu_neon_ext_dotprod
24532 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
24533 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
24534
24535 #undef ARM_VARIANT
24536 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
24537 #undef THUMB_VARIANT
24538 #define THUMB_VARIANT NULL
24539
24540 cCE("wfs", e200110, 1, (RR), rd),
24541 cCE("rfs", e300110, 1, (RR), rd),
24542 cCE("wfc", e400110, 1, (RR), rd),
24543 cCE("rfc", e500110, 1, (RR), rd),
24544
24545 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
24546 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
24547 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
24548 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
24549
24550 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
24551 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
24552 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
24553 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
24554
24555 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
24556 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
24557 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
24558 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
24559 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
24560 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
24561 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
24562 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
24563 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
24564 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
24565 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
24566 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
24567
24568 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
24569 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
24570 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
24571 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
24572 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
24573 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
24574 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
24575 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
24576 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
24577 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
24578 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
24579 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
24580
24581 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
24582 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
24583 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
24584 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
24585 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
24586 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
24587 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
24588 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
24589 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
24590 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
24591 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
24592 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
24593
24594 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
24595 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
24596 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
24597 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
24598 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
24599 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
24600 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
24601 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
24602 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
24603 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
24604 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
24605 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
24606
24607 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
24608 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
24609 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
24610 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
24611 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
24612 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
24613 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
24614 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
24615 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
24616 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
24617 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
24618 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
24619
24620 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
24621 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
24622 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
24623 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
24624 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
24625 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
24626 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
24627 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
24628 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
24629 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
24630 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
24631 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
24632
24633 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
24634 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
24635 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
24636 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
24637 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
24638 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
24639 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
24640 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
24641 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
24642 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
24643 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
24644 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
24645
24646 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
24647 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
24648 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
24649 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
24650 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
24651 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
24652 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
24653 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
24654 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
24655 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
24656 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
24657 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
24658
24659 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
24660 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
24661 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
24662 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
24663 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
24664 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
24665 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
24666 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
24667 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
24668 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
24669 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
24670 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
24671
24672 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
24673 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
24674 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
24675 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
24676 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
24677 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
24678 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
24679 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
24680 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
24681 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
24682 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
24683 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
24684
24685 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
24686 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
24687 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
24688 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
24689 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
24690 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
24691 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
24692 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
24693 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
24694 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
24695 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
24696 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
24697
24698 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
24699 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
24700 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
24701 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
24702 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
24703 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
24704 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
24705 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
24706 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
24707 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
24708 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
24709 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
24710
24711 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
24712 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
24713 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
24714 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
24715 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
24716 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
24717 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
24718 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
24719 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
24720 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
24721 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
24722 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
24723
24724 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
24725 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
24726 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
24727 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
24728 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
24729 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
24730 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
24731 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
24732 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
24733 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
24734 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
24735 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
24736
24737 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
24738 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
24739 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
24740 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
24741 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
24742 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
24743 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
24744 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
24745 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
24746 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
24747 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
24748 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
24749
24750 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
24751 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
24752 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
24753 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
24754 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
24755 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
24756 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
24757 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
24758 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
24759 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
24760 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
24761 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
24762
24763 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
24764 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
24765 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
24766 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
24767 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
24768 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24769 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24770 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24771 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
24772 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
24773 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
24774 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
24775
24776 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
24777 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
24778 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
24779 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
24780 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
24781 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24782 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24783 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24784 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
24785 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
24786 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
24787 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
24788
24789 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
24790 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
24791 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
24792 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
24793 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
24794 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24795 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24796 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24797 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
24798 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
24799 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
24800 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
24801
24802 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
24803 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
24804 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
24805 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
24806 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
24807 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24808 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24809 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24810 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
24811 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
24812 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
24813 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
24814
24815 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
24816 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
24817 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
24818 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
24819 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
24820 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24821 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24822 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24823 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
24824 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
24825 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
24826 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
24827
24828 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
24829 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
24830 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
24831 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
24832 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
24833 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24834 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24835 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24836 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
24837 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
24838 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
24839 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
24840
24841 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
24842 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
24843 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
24844 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
24845 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
24846 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24847 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24848 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24849 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
24850 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
24851 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
24852 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
24853
24854 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
24855 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
24856 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
24857 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
24858 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
24859 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24860 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24861 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24862 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
24863 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
24864 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
24865 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
24866
24867 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
24868 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
24869 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
24870 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
24871 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
24872 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24873 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24874 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24875 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
24876 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
24877 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
24878 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
24879
24880 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
24881 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
24882 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
24883 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
24884 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
24885 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24886 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24887 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24888 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
24889 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
24890 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
24891 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
24892
24893 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24894 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24895 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24896 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24897 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24898 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24899 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24900 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24901 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24902 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24903 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24904 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24905
24906 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24907 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24908 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24909 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24910 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24911 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24912 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24913 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24914 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24915 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24916 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24917 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24918
24919 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
24920 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
24921 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
24922 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
24923 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
24924 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
24925 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
24926 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
24927 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
24928 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
24929 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
24930 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
24931
24932 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
24933 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
24934 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
24935 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
24936
24937 cCL("flts", e000110, 2, (RF, RR), rn_rd),
24938 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
24939 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
24940 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
24941 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
24942 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
24943 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
24944 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
24945 cCL("flte", e080110, 2, (RF, RR), rn_rd),
24946 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
24947 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
24948 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
24949
24950 /* The implementation of the FIX instruction is broken on some
24951 assemblers, in that it accepts a precision specifier as well as a
24952 rounding specifier, despite the fact that this is meaningless.
24953 To be more compatible, we accept it as well, though of course it
24954 does not set any bits. */
24955 cCE("fix", e100110, 2, (RR, RF), rd_rm),
24956 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
24957 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
24958 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
24959 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
24960 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
24961 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
24962 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
24963 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
24964 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
24965 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
24966 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
24967 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
24968
24969 /* Instructions that were new with the real FPA, call them V2. */
24970 #undef ARM_VARIANT
24971 #define ARM_VARIANT & fpu_fpa_ext_v2
24972
24973 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24974 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24975 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24976 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24977 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24978 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
24979
24980 #undef ARM_VARIANT
24981 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
24982 #undef THUMB_VARIANT
24983 #define THUMB_VARIANT & arm_ext_v6t2
24984 mcCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs),
24985 mcCE(vmsr, ee00a10, 2, (RVC, RR), vmsr),
24986 mcCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
24987 mcCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
24988 mcCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
24989 mcCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
24990 #undef THUMB_VARIANT
24991
24992 /* Moves and type conversions. */
24993 cCE("fmstat", ef1fa10, 0, (), noargs),
24994 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
24995 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
24996 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
24997 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
24998 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
24999 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
25000 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
25001 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
25002
25003 /* Memory operations. */
25004 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25005 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25006 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25007 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25008 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25009 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25010 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25011 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25012 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25013 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25014 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25015 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25016 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25017 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25018 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25019 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25020
25021 /* Monadic operations. */
25022 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
25023 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
25024 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
25025
25026 /* Dyadic operations. */
25027 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25028 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25029 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25030 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25031 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25032 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25033 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25034 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25035 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25036
25037 /* Comparisons. */
25038 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
25039 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
25040 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
25041 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
25042
25043 /* Double precision load/store are still present on single precision
25044 implementations. */
25045 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25046 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25047 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25048 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25049 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25050 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25051 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25052 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25053
25054 #undef ARM_VARIANT
25055 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
25056
25057 /* Moves and type conversions. */
25058 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25059 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25060 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
25061 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
25062 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
25063 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
25064 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25065 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
25066 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25067 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25068 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25069 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25070
25071 /* Monadic operations. */
25072 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25073 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25074 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25075
25076 /* Dyadic operations. */
25077 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25078 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25079 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25080 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25081 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25082 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25083 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25084 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25085 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25086
25087 /* Comparisons. */
25088 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25089 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
25090 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25091 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
25092
25093 /* Instructions which may belong to either the Neon or VFP instruction sets.
25094 Individual encoder functions perform additional architecture checks. */
25095 #undef ARM_VARIANT
25096 #define ARM_VARIANT & fpu_vfp_ext_v1xd
25097 #undef THUMB_VARIANT
25098 #define THUMB_VARIANT & arm_ext_v6t2
25099
25100 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25101 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25102 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25103 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25104 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25105 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25106
25107 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
25108 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
25109
25110 #undef THUMB_VARIANT
25111 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
25112
25113 /* These mnemonics are unique to VFP. */
25114 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
25115 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25116 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25117 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25118 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25119 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
25120
25121 /* Mnemonics shared by Neon and VFP. */
25122 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25123
25124 mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25125 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
25126 MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25127 MNCEF(vcvtt, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25128
25129
25130 /* NOTE: All VMOV encoding is special-cased! */
25131 NCE(vmovq, 0, 1, (VMOV), neon_mov),
25132
25133 #undef THUMB_VARIANT
25134 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25135 by different feature bits. Since we are setting the Thumb guard, we can
25136 require Thumb-1 which makes it a nop guard and set the right feature bit in
25137 do_vldr_vstr (). */
25138 #define THUMB_VARIANT & arm_ext_v4t
25139 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25140 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25141
25142 #undef ARM_VARIANT
25143 #define ARM_VARIANT & arm_ext_fp16
25144 #undef THUMB_VARIANT
25145 #define THUMB_VARIANT & arm_ext_fp16
25146 /* New instructions added from v8.2, allowing the extraction and insertion of
25147 the upper 16 bits of a 32-bit vector register. */
25148 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
25149 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
25150
25151 /* New backported fma/fms instructions optional in v8.2. */
25152 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25153 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25154
25155 #undef THUMB_VARIANT
25156 #define THUMB_VARIANT & fpu_neon_ext_v1
25157 #undef ARM_VARIANT
25158 #define ARM_VARIANT & fpu_neon_ext_v1
25159
25160 /* Data processing with three registers of the same length. */
25161 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
25162 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
25163 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
25164 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25165 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25166 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25167 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
25168 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25169 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25170 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25171 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25172 /* If not immediate, fall back to neon_dyadic_i64_su.
25173 shl should accept I8 I16 I32 I64,
25174 qshl should accept S8 S16 S32 S64 U8 U16 U32 U64. */
25175 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl),
25176 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl),
25177 /* Logic ops, types optional & ignored. */
25178 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25179 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25180 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25181 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25182 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
25183 /* Bitfield ops, untyped. */
25184 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25185 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25186 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25187 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25188 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25189 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25190 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
25191 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25192 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25193 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25194 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25195 back to neon_dyadic_if_su. */
25196 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25197 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25198 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25199 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25200 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25201 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25202 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25203 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25204 /* Comparison. Type I8 I16 I32 F32. */
25205 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25206 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
25207 /* As above, D registers only. */
25208 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25209 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25210 /* Int and float variants, signedness unimportant. */
25211 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25212 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25213 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
25214 /* Add/sub take types I8 I16 I32 I64 F32. */
25215 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25216 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25217 /* vtst takes sizes 8, 16, 32. */
25218 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25219 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
25220 /* VMUL takes I8 I16 I32 F32 P8. */
25221 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
25222 /* VQD{R}MULH takes S16 S32. */
25223 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25224 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25225 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25226 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25227 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25228 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25229 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25230 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25231 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25232 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25233 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25234 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25235 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25236 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25237 /* ARM v8.1 extension. */
25238 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25239 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25240 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25241
25242 /* Two address, int/float. Types S8 S16 S32 F32. */
25243 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
25244 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
25245
25246 /* Data processing with two registers and a shift amount. */
25247 /* Right shifts, and variants with rounding.
25248 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
25249 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25250 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25251 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25252 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25253 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25254 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25255 /* Shift and insert. Sizes accepted 8 16 32 64. */
25256 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
25257 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
25258 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
25259 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
25260 /* Right shift immediate, saturating & narrowing, with rounding variants.
25261 Types accepted S16 S32 S64 U16 U32 U64. */
25262 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25263 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25264 /* As above, unsigned. Types accepted S16 S32 S64. */
25265 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25266 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25267 /* Right shift narrowing. Types accepted I16 I32 I64. */
25268 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25269 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25270 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
25271 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
25272 /* CVT with optional immediate for fixed-point variant. */
25273 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
25274
25275 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
25276
25277 /* Data processing, three registers of different lengths. */
25278 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
25279 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
25280 /* If not scalar, fall back to neon_dyadic_long.
25281 Vector types as above, scalar types S16 S32 U16 U32. */
25282 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25283 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25284 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
25285 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25286 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25287 /* Dyadic, narrowing insns. Types I16 I32 I64. */
25288 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25289 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25290 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25291 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25292 /* Saturating doubling multiplies. Types S16 S32. */
25293 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25294 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25295 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25296 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25297 S16 S32 U16 U32. */
25298 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
25299
25300 /* Extract. Size 8. */
25301 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25302 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
25303
25304 /* Two registers, miscellaneous. */
25305 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
25306 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
25307 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
25308 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
25309 /* Vector replicate. Sizes 8 16 32. */
25310 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
25311 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
25312 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
25313 /* VMOVN. Types I16 I32 I64. */
25314 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
25315 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
25316 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
25317 /* VQMOVUN. Types S16 S32 S64. */
25318 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
25319 /* VZIP / VUZP. Sizes 8 16 32. */
25320 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
25321 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
25322 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
25323 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
25324 /* VQABS / VQNEG. Types S8 S16 S32. */
25325 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
25326 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
25327 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
25328 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
25329 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
25330 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
25331 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
25332 /* Reciprocal estimates. Types U32 F16 F32. */
25333 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
25334 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
25335 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
25336 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
25337 /* VCLS. Types S8 S16 S32. */
25338 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
25339 /* VCLZ. Types I8 I16 I32. */
25340 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
25341 /* VCNT. Size 8. */
25342 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
25343 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
25344 /* Two address, untyped. */
25345 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
25346 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
25347 /* VTRN. Sizes 8 16 32. */
25348 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
25349 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
25350
25351 /* Table lookup. Size 8. */
25352 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25353 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25354
25355 #undef THUMB_VARIANT
25356 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
25357 #undef ARM_VARIANT
25358 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
25359
25360 /* Neon element/structure load/store. */
25361 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25362 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25363 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25364 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25365 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25366 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25367 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25368 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25369
25370 #undef THUMB_VARIANT
25371 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
25372 #undef ARM_VARIANT
25373 #define ARM_VARIANT & fpu_vfp_ext_v3xd
25374 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
25375 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25376 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25377 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25378 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25379 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25380 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25381 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25382 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25383
25384 #undef THUMB_VARIANT
25385 #define THUMB_VARIANT & fpu_vfp_ext_v3
25386 #undef ARM_VARIANT
25387 #define ARM_VARIANT & fpu_vfp_ext_v3
25388
25389 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
25390 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25391 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25392 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25393 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25394 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25395 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25396 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25397 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25398
25399 #undef ARM_VARIANT
25400 #define ARM_VARIANT & fpu_vfp_ext_fma
25401 #undef THUMB_VARIANT
25402 #define THUMB_VARIANT & fpu_vfp_ext_fma
25403 /* Mnemonics shared by Neon, VFP, MVE and BF16. These are included in the
25404 VFP FMA variant; NEON and VFP FMA always includes the NEON
25405 FMA instructions. */
25406 mnCEF(vfma, _vfma, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25407 TUF ("vfmat", c300850, fc300850, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25408 mnCEF(vfms, _vfms, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), neon_fmac),
25409
25410 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25411 the v form should always be used. */
25412 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25413 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25414 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25415 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25416 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25417 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25418
25419 #undef THUMB_VARIANT
25420 #undef ARM_VARIANT
25421 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
25422
25423 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25424 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25425 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25426 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25427 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25428 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25429 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25430 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25431
25432 #undef ARM_VARIANT
25433 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
25434
25435 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
25436 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
25437 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
25438 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
25439 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
25440 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
25441 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
25442 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
25443 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
25444 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25445 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25446 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
25447 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25448 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25449 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
25450 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25451 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25452 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
25453 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
25454 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
25455 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25456 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25457 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25458 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25459 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25460 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
25461 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
25462 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
25463 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
25464 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
25465 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
25466 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
25467 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
25468 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
25469 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
25470 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
25471 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
25472 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25473 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25474 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25475 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25476 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25477 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25478 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25479 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25480 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25481 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
25482 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25483 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25484 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25485 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25486 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25487 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25488 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25489 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25490 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25491 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25492 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25493 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25494 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25495 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25496 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25497 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25498 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25499 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25500 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25501 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25502 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25503 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
25504 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
25505 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25506 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25507 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25508 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25509 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25510 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25511 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25512 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25513 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25514 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25515 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25516 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25517 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25518 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25519 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25520 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25521 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25522 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25523 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
25524 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25525 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25526 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25527 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25528 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25529 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25530 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25531 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25532 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25533 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25534 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25535 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25536 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25537 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25538 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25539 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25540 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25541 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25542 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25543 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25544 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25545 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
25546 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25547 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25548 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25549 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25550 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25551 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25552 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25553 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25554 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25555 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25556 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25557 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25558 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25559 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25560 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25561 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25562 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
25563 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
25564 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25565 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
25566 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
25567 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
25568 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25569 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25570 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25571 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25572 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25573 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25574 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25575 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25576 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25577 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
25578 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
25579 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
25580 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
25581 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
25582 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
25583 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25584 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25585 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25586 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
25587 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
25588 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
25589 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
25590 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
25591 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
25592 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25593 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25594 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25595 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25596 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
25597
25598 #undef ARM_VARIANT
25599 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
25600
25601 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
25602 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
25603 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
25604 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
25605 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
25606 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
25607 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25608 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25609 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25610 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25611 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25612 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25613 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25614 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25615 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25616 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25617 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25618 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25619 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25620 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25621 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
25622 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25623 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25624 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25625 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25626 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25627 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25628 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25629 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25630 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25631 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25632 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25633 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25634 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25635 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25636 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25637 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25638 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25639 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25640 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25641 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25642 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25643 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25644 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25645 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25646 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25647 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25648 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25649 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25650 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25651 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25652 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25653 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25654 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25655 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25656 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25657 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
25658
25659 #undef ARM_VARIANT
25660 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
25661
25662 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
25663 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
25664 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
25665 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
25666 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
25667 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
25668 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
25669 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
25670 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
25671 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
25672 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
25673 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
25674 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
25675 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
25676 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
25677 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
25678 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
25679 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
25680 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
25681 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
25682 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
25683 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
25684 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
25685 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
25686 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
25687 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
25688 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
25689 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
25690 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
25691 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
25692 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
25693 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
25694 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
25695 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
25696 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
25697 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
25698 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
25699 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
25700 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
25701 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
25702 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
25703 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
25704 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
25705 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
25706 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
25707 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
25708 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
25709 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
25710 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
25711 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
25712 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
25713 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
25714 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
25715 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
25716 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
25717 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
25718 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
25719 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
25720 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
25721 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
25722 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
25723 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
25724 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
25725 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
25726 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25727 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25728 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25729 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25730 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25731 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
25732 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25733 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
25734 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25735 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
25736 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25737 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
25738
25739 /* ARMv8.5-A instructions. */
25740 #undef ARM_VARIANT
25741 #define ARM_VARIANT & arm_ext_sb
25742 #undef THUMB_VARIANT
25743 #define THUMB_VARIANT & arm_ext_sb
25744 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
25745
25746 #undef ARM_VARIANT
25747 #define ARM_VARIANT & arm_ext_predres
25748 #undef THUMB_VARIANT
25749 #define THUMB_VARIANT & arm_ext_predres
25750 CE("cfprctx", e070f93, 1, (RRnpc), rd),
25751 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
25752 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
25753
25754 /* ARMv8-M instructions. */
25755 #undef ARM_VARIANT
25756 #define ARM_VARIANT NULL
25757 #undef THUMB_VARIANT
25758 #define THUMB_VARIANT & arm_ext_v8m
25759 ToU("sg", e97fe97f, 0, (), noargs),
25760 ToC("blxns", 4784, 1, (RRnpc), t_blx),
25761 ToC("bxns", 4704, 1, (RRnpc), t_bx),
25762 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
25763 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
25764 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
25765 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
25766
25767 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
25768 instructions behave as nop if no VFP is present. */
25769 #undef THUMB_VARIANT
25770 #define THUMB_VARIANT & arm_ext_v8m_main
25771 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
25772 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
25773
25774 /* Armv8.1-M Mainline instructions. */
25775 #undef THUMB_VARIANT
25776 #define THUMB_VARIANT & arm_ext_v8_1m_main
25777 toU("cinc", _cinc, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25778 toU("cinv", _cinv, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25779 toU("cneg", _cneg, 3, (RRnpcsp, RR_ZR, COND), t_cond),
25780 toU("csel", _csel, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25781 toU("csetm", _csetm, 2, (RRnpcsp, COND), t_cond),
25782 toU("cset", _cset, 2, (RRnpcsp, COND), t_cond),
25783 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25784 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25785 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
25786
25787 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
25788 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
25789 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
25790 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
25791 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
25792
25793 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
25794 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
25795 toU("le", _le, 2, (oLR, EXP), t_loloop),
25796
25797 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
25798 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
25799
25800 #undef THUMB_VARIANT
25801 #define THUMB_VARIANT & mve_ext
25802 ToC("lsll", ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25803 ToC("lsrl", ea50011f, 3, (RRe, RRo, I32), mve_scalar_shift),
25804 ToC("asrl", ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
25805 ToC("uqrshll", ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25806 ToC("sqrshrl", ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
25807 ToC("uqshll", ea51010f, 3, (RRe, RRo, I32), mve_scalar_shift),
25808 ToC("urshrl", ea51011f, 3, (RRe, RRo, I32), mve_scalar_shift),
25809 ToC("srshrl", ea51012f, 3, (RRe, RRo, I32), mve_scalar_shift),
25810 ToC("sqshll", ea51013f, 3, (RRe, RRo, I32), mve_scalar_shift),
25811 ToC("uqrshl", ea500f0d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
25812 ToC("sqrshr", ea500f2d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
25813 ToC("uqshl", ea500f0f, 2, (RRnpcsp, I32), mve_scalar_shift),
25814 ToC("urshr", ea500f1f, 2, (RRnpcsp, I32), mve_scalar_shift),
25815 ToC("srshr", ea500f2f, 2, (RRnpcsp, I32), mve_scalar_shift),
25816 ToC("sqshl", ea500f3f, 2, (RRnpcsp, I32), mve_scalar_shift),
25817
25818 ToC("vpt", ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25819 ToC("vptt", ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25820 ToC("vpte", ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25821 ToC("vpttt", ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25822 ToC("vptte", ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25823 ToC("vptet", ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25824 ToC("vptee", ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25825 ToC("vptttt", ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25826 ToC("vpttte", ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25827 ToC("vpttet", ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25828 ToC("vpttee", ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25829 ToC("vptett", ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25830 ToC("vptete", ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25831 ToC("vpteet", ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25832 ToC("vpteee", ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
25833
25834 ToC("vpst", fe710f4d, 0, (), mve_vpt),
25835 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
25836 ToC("vpste", fe718f4d, 0, (), mve_vpt),
25837 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
25838 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
25839 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
25840 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
25841 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
25842 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
25843 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
25844 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
25845 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
25846 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
25847 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
25848 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
25849
25850 /* MVE and MVE FP only. */
25851 mToC("vhcadd", ee000f00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vhcadd),
25852 mCEF(vctp, _vctp, 1, (RRnpc), mve_vctp),
25853 mCEF(vadc, _vadc, 3, (RMQ, RMQ, RMQ), mve_vadc),
25854 mCEF(vadci, _vadci, 3, (RMQ, RMQ, RMQ), mve_vadc),
25855 mToC("vsbc", fe300f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
25856 mToC("vsbci", fe301f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
25857 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
25858 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
25859 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25860 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25861 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
25862 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
25863 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25864 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25865 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
25866 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
25867 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
25868 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
25869
25870 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25871 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25872 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25873 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25874 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25875 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25876 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25877 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
25878 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25879 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25880 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25881 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
25882 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25883 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25884 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25885 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25886 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25887 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25888 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25889 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
25890
25891 mCEF(vmovnt, _vmovnt, 2, (RMQ, RMQ), mve_movn),
25892 mCEF(vmovnb, _vmovnb, 2, (RMQ, RMQ), mve_movn),
25893 mCEF(vbrsr, _vbrsr, 3, (RMQ, RMQ, RR), mve_vbrsr),
25894 mCEF(vaddlv, _vaddlv, 3, (RRe, RRo, RMQ), mve_vaddlv),
25895 mCEF(vaddlva, _vaddlva, 3, (RRe, RRo, RMQ), mve_vaddlv),
25896 mCEF(vaddv, _vaddv, 2, (RRe, RMQ), mve_vaddv),
25897 mCEF(vaddva, _vaddva, 2, (RRe, RMQ), mve_vaddv),
25898 mCEF(vddup, _vddup, 3, (RMQ, RRe, EXPi), mve_viddup),
25899 mCEF(vdwdup, _vdwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
25900 mCEF(vidup, _vidup, 3, (RMQ, RRe, EXPi), mve_viddup),
25901 mCEF(viwdup, _viwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
25902 mToC("vmaxa", ee330e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
25903 mToC("vmina", ee331e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
25904 mCEF(vmaxv, _vmaxv, 2, (RR, RMQ), mve_vmaxv),
25905 mCEF(vmaxav, _vmaxav, 2, (RR, RMQ), mve_vmaxv),
25906 mCEF(vminv, _vminv, 2, (RR, RMQ), mve_vmaxv),
25907 mCEF(vminav, _vminav, 2, (RR, RMQ), mve_vmaxv),
25908
25909 mCEF(vmlaldav, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25910 mCEF(vmlaldava, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25911 mCEF(vmlaldavx, _vmlaldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25912 mCEF(vmlaldavax, _vmlaldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25913 mCEF(vmlalv, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25914 mCEF(vmlalva, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25915 mCEF(vmlsldav, _vmlsldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25916 mCEF(vmlsldava, _vmlsldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25917 mCEF(vmlsldavx, _vmlsldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25918 mCEF(vmlsldavax, _vmlsldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
25919 mToC("vrmlaldavh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25920 mToC("vrmlaldavha",ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25921 mCEF(vrmlaldavhx, _vrmlaldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25922 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25923 mToC("vrmlalvh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25924 mToC("vrmlalvha", ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25925 mCEF(vrmlsldavh, _vrmlsldavh, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25926 mCEF(vrmlsldavha, _vrmlsldavha, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25927 mCEF(vrmlsldavhx, _vrmlsldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25928 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
25929
25930 mToC("vmlas", ee011e40, 3, (RMQ, RMQ, RR), mve_vmlas),
25931 mToC("vmulh", ee010e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
25932 mToC("vrmulh", ee011e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
25933 mToC("vpnot", fe310f4d, 0, (), mve_vpnot),
25934 mToC("vpsel", fe310f01, 3, (RMQ, RMQ, RMQ), mve_vpsel),
25935
25936 mToC("vqdmladh", ee000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25937 mToC("vqdmladhx", ee001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25938 mToC("vqrdmladh", ee000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25939 mToC("vqrdmladhx",ee001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25940 mToC("vqdmlsdh", fe000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25941 mToC("vqdmlsdhx", fe001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25942 mToC("vqrdmlsdh", fe000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25943 mToC("vqrdmlsdhx",fe001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
25944 mToC("vqdmlah", ee000e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25945 mToC("vqdmlash", ee001e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25946 mToC("vqrdmlash", ee001e40, 3, (RMQ, RMQ, RR), mve_vqdmlah),
25947 mToC("vqdmullt", ee301f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
25948 mToC("vqdmullb", ee300f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
25949 mCEF(vqmovnt, _vqmovnt, 2, (RMQ, RMQ), mve_vqmovn),
25950 mCEF(vqmovnb, _vqmovnb, 2, (RMQ, RMQ), mve_vqmovn),
25951 mCEF(vqmovunt, _vqmovunt, 2, (RMQ, RMQ), mve_vqmovn),
25952 mCEF(vqmovunb, _vqmovunb, 2, (RMQ, RMQ), mve_vqmovn),
25953
25954 mCEF(vshrnt, _vshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25955 mCEF(vshrnb, _vshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25956 mCEF(vrshrnt, _vrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25957 mCEF(vrshrnb, _vrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25958 mCEF(vqshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25959 mCEF(vqshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25960 mCEF(vqshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25961 mCEF(vqshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25962 mCEF(vqrshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25963 mCEF(vqrshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25964 mCEF(vqrshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
25965 mCEF(vqrshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
25966
25967 mToC("vshlc", eea00fc0, 3, (RMQ, RR, I32z), mve_vshlc),
25968 mToC("vshllt", ee201e00, 3, (RMQ, RMQ, I32), mve_vshll),
25969 mToC("vshllb", ee200e00, 3, (RMQ, RMQ, I32), mve_vshll),
25970
25971 toU("dlstp", _dlstp, 2, (LR, RR), t_loloop),
25972 toU("wlstp", _wlstp, 3, (LR, RR, EXP), t_loloop),
25973 toU("letp", _letp, 2, (LR, EXP), t_loloop),
25974 toU("lctp", _lctp, 0, (), t_loloop),
25975
25976 #undef THUMB_VARIANT
25977 #define THUMB_VARIANT & mve_fp_ext
25978 mToC("vcmul", ee300e00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vcmul),
25979 mToC("vfmas", ee311e40, 3, (RMQ, RMQ, RR), mve_vfmas),
25980 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
25981 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
25982 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ), mve_vmaxnmv),
25983 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ), mve_vmaxnmv),
25984 mToC("vminnmv", eeee0f80, 2, (RR, RMQ), mve_vmaxnmv),
25985 mToC("vminnmav",eeec0f80, 2, (RR, RMQ), mve_vmaxnmv),
25986
25987 #undef ARM_VARIANT
25988 #define ARM_VARIANT & fpu_vfp_ext_v1
25989 #undef THUMB_VARIANT
25990 #define THUMB_VARIANT & arm_ext_v6t2
25991 mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
25992 mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
25993
25994 mcCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25995
25996 #undef ARM_VARIANT
25997 #define ARM_VARIANT & fpu_vfp_ext_v1xd
25998
25999 MNCE(vmov, 0, 1, (VMOV), neon_mov),
26000 mcCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
26001 mcCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
26002 mcCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
26003
26004 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
26005 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26006 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26007
26008 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26009 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26010
26011 mCEF(vmovlt, _vmovlt, 1, (VMOV), mve_movl),
26012 mCEF(vmovlb, _vmovlb, 1, (VMOV), mve_movl),
26013
26014 mnCE(vcmp, _vcmp, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26015 mnCE(vcmpe, _vcmpe, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26016
26017 #undef ARM_VARIANT
26018 #define ARM_VARIANT & fpu_vfp_ext_v2
26019
26020 mcCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26021 mcCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26022 mcCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
26023 mcCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
26024
26025 #undef ARM_VARIANT
26026 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
26027 mnUF(vcvta, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvta),
26028 mnUF(vcvtp, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtp),
26029 mnUF(vcvtn, _vcvta, 3, (RNSDQMQ, oRNSDQMQ, oI32z), neon_cvtn),
26030 mnUF(vcvtm, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtm),
26031 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26032 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26033
26034 #undef ARM_VARIANT
26035 #define ARM_VARIANT & fpu_neon_ext_v1
26036 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26037 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
26038 mnUF(vaddl, _vaddl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
26039 mnUF(vsubl, _vsubl, 3, (RNQMQ, RNDMQ, RNDMQR), neon_dyadic_long),
26040 mnUF(vand, _vand, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26041 mnUF(vbic, _vbic, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26042 mnUF(vorr, _vorr, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26043 mnUF(vorn, _vorn, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26044 mnUF(veor, _veor, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_logic),
26045 MNUF(vcls, 1b00400, 2, (RNDQMQ, RNDQMQ), neon_cls),
26046 MNUF(vclz, 1b00480, 2, (RNDQMQ, RNDQMQ), neon_clz),
26047 mnCE(vdup, _vdup, 2, (RNDQMQ, RR_RNSC), neon_dup),
26048 MNUF(vhadd, 00000000, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26049 MNUF(vrhadd, 00000100, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_i_su),
26050 MNUF(vhsub, 00000200, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26051 mnUF(vmin, _vmin, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26052 mnUF(vmax, _vmax, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26053 MNUF(vqadd, 0000010, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26054 MNUF(vqsub, 0000210, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26055 mnUF(vmvn, _vmvn, 2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26056 MNUF(vqabs, 1b00700, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26057 MNUF(vqneg, 1b00780, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26058 mnUF(vqrdmlah, _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26059 mnUF(vqdmulh, _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26060 mnUF(vqrdmulh, _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26061 MNUF(vqrshl, 0000510, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26062 MNUF(vrshl, 0000500, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26063 MNUF(vshr, 0800010, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26064 MNUF(vrshr, 0800210, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26065 MNUF(vsli, 1800510, 3, (RNDQMQ, oRNDQMQ, I63), neon_sli),
26066 MNUF(vsri, 1800410, 3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26067 MNUF(vrev64, 1b00000, 2, (RNDQMQ, RNDQMQ), neon_rev),
26068 MNUF(vrev32, 1b00080, 2, (RNDQMQ, RNDQMQ), neon_rev),
26069 MNUF(vrev16, 1b00100, 2, (RNDQMQ, RNDQMQ), neon_rev),
26070 mnUF(vshl, _vshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26071 mnUF(vqshl, _vqshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26072 MNUF(vqshlu, 1800610, 3, (RNDQMQ, oRNDQMQ, I63), neon_qshlu_imm),
26073
26074 #undef ARM_VARIANT
26075 #define ARM_VARIANT & arm_ext_v8_3
26076 #undef THUMB_VARIANT
26077 #define THUMB_VARIANT & arm_ext_v6t2_v8m
26078 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26079 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26080
26081 #undef ARM_VARIANT
26082 #define ARM_VARIANT &arm_ext_bf16
26083 #undef THUMB_VARIANT
26084 #define THUMB_VARIANT &arm_ext_bf16
26085 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26086 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26087 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26088
26089 #undef ARM_VARIANT
26090 #define ARM_VARIANT &arm_ext_i8mm
26091 #undef THUMB_VARIANT
26092 #define THUMB_VARIANT &arm_ext_i8mm
26093 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26094 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26095 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26096 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26097 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26098 };
26099 #undef ARM_VARIANT
26100 #undef THUMB_VARIANT
26101 #undef TCE
26102 #undef TUE
26103 #undef TUF
26104 #undef TCC
26105 #undef cCE
26106 #undef cCL
26107 #undef C3E
26108 #undef C3
26109 #undef CE
26110 #undef CM
26111 #undef CL
26112 #undef UE
26113 #undef UF
26114 #undef UT
26115 #undef NUF
26116 #undef nUF
26117 #undef NCE
26118 #undef nCE
26119 #undef OPS0
26120 #undef OPS1
26121 #undef OPS2
26122 #undef OPS3
26123 #undef OPS4
26124 #undef OPS5
26125 #undef OPS6
26126 #undef do_0
26127 #undef ToC
26128 #undef toC
26129 #undef ToU
26130 #undef toU
26131 \f
26132 /* MD interface: bits in the object file. */
26133
26134 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26135 for use in the a.out file, and stores them in the array pointed to by buf.
26136 This knows about the endian-ness of the target machine and does
26137 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
26138 2 (short) and 4 (long) Floating numbers are put out as a series of
26139 LITTLENUMS (shorts, here at least). */
26140
26141 void
26142 md_number_to_chars (char * buf, valueT val, int n)
26143 {
26144 if (target_big_endian)
26145 number_to_chars_bigendian (buf, val, n);
26146 else
26147 number_to_chars_littleendian (buf, val, n);
26148 }
26149
26150 static valueT
26151 md_chars_to_number (char * buf, int n)
26152 {
26153 valueT result = 0;
26154 unsigned char * where = (unsigned char *) buf;
26155
26156 if (target_big_endian)
26157 {
26158 while (n--)
26159 {
26160 result <<= 8;
26161 result |= (*where++ & 255);
26162 }
26163 }
26164 else
26165 {
26166 while (n--)
26167 {
26168 result <<= 8;
26169 result |= (where[n] & 255);
26170 }
26171 }
26172
26173 return result;
26174 }
26175
26176 /* MD interface: Sections. */
26177
26178 /* Calculate the maximum variable size (i.e., excluding fr_fix)
26179 that an rs_machine_dependent frag may reach. */
26180
26181 unsigned int
26182 arm_frag_max_var (fragS *fragp)
26183 {
26184 /* We only use rs_machine_dependent for variable-size Thumb instructions,
26185 which are either THUMB_SIZE (2) or INSN_SIZE (4).
26186
26187 Note that we generate relaxable instructions even for cases that don't
26188 really need it, like an immediate that's a trivial constant. So we're
26189 overestimating the instruction size for some of those cases. Rather
26190 than putting more intelligence here, it would probably be better to
26191 avoid generating a relaxation frag in the first place when it can be
26192 determined up front that a short instruction will suffice. */
26193
26194 gas_assert (fragp->fr_type == rs_machine_dependent);
26195 return INSN_SIZE;
26196 }
26197
26198 /* Estimate the size of a frag before relaxing. Assume everything fits in
26199 2 bytes. */
26200
26201 int
26202 md_estimate_size_before_relax (fragS * fragp,
26203 segT segtype ATTRIBUTE_UNUSED)
26204 {
26205 fragp->fr_var = 2;
26206 return 2;
26207 }
26208
26209 /* Convert a machine dependent frag. */
26210
26211 void
26212 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26213 {
26214 unsigned long insn;
26215 unsigned long old_op;
26216 char *buf;
26217 expressionS exp;
26218 fixS *fixp;
26219 int reloc_type;
26220 int pc_rel;
26221 int opcode;
26222
26223 buf = fragp->fr_literal + fragp->fr_fix;
26224
26225 old_op = bfd_get_16(abfd, buf);
26226 if (fragp->fr_symbol)
26227 {
26228 exp.X_op = O_symbol;
26229 exp.X_add_symbol = fragp->fr_symbol;
26230 }
26231 else
26232 {
26233 exp.X_op = O_constant;
26234 }
26235 exp.X_add_number = fragp->fr_offset;
26236 opcode = fragp->fr_subtype;
26237 switch (opcode)
26238 {
26239 case T_MNEM_ldr_pc:
26240 case T_MNEM_ldr_pc2:
26241 case T_MNEM_ldr_sp:
26242 case T_MNEM_str_sp:
26243 case T_MNEM_ldr:
26244 case T_MNEM_ldrb:
26245 case T_MNEM_ldrh:
26246 case T_MNEM_str:
26247 case T_MNEM_strb:
26248 case T_MNEM_strh:
26249 if (fragp->fr_var == 4)
26250 {
26251 insn = THUMB_OP32 (opcode);
26252 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26253 {
26254 insn |= (old_op & 0x700) << 4;
26255 }
26256 else
26257 {
26258 insn |= (old_op & 7) << 12;
26259 insn |= (old_op & 0x38) << 13;
26260 }
26261 insn |= 0x00000c00;
26262 put_thumb32_insn (buf, insn);
26263 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26264 }
26265 else
26266 {
26267 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26268 }
26269 pc_rel = (opcode == T_MNEM_ldr_pc2);
26270 break;
26271 case T_MNEM_adr:
26272 if (fragp->fr_var == 4)
26273 {
26274 insn = THUMB_OP32 (opcode);
26275 insn |= (old_op & 0xf0) << 4;
26276 put_thumb32_insn (buf, insn);
26277 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26278 }
26279 else
26280 {
26281 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26282 exp.X_add_number -= 4;
26283 }
26284 pc_rel = 1;
26285 break;
26286 case T_MNEM_mov:
26287 case T_MNEM_movs:
26288 case T_MNEM_cmp:
26289 case T_MNEM_cmn:
26290 if (fragp->fr_var == 4)
26291 {
26292 int r0off = (opcode == T_MNEM_mov
26293 || opcode == T_MNEM_movs) ? 0 : 8;
26294 insn = THUMB_OP32 (opcode);
26295 insn = (insn & 0xe1ffffff) | 0x10000000;
26296 insn |= (old_op & 0x700) << r0off;
26297 put_thumb32_insn (buf, insn);
26298 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26299 }
26300 else
26301 {
26302 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26303 }
26304 pc_rel = 0;
26305 break;
26306 case T_MNEM_b:
26307 if (fragp->fr_var == 4)
26308 {
26309 insn = THUMB_OP32(opcode);
26310 put_thumb32_insn (buf, insn);
26311 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26312 }
26313 else
26314 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26315 pc_rel = 1;
26316 break;
26317 case T_MNEM_bcond:
26318 if (fragp->fr_var == 4)
26319 {
26320 insn = THUMB_OP32(opcode);
26321 insn |= (old_op & 0xf00) << 14;
26322 put_thumb32_insn (buf, insn);
26323 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26324 }
26325 else
26326 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26327 pc_rel = 1;
26328 break;
26329 case T_MNEM_add_sp:
26330 case T_MNEM_add_pc:
26331 case T_MNEM_inc_sp:
26332 case T_MNEM_dec_sp:
26333 if (fragp->fr_var == 4)
26334 {
26335 /* ??? Choose between add and addw. */
26336 insn = THUMB_OP32 (opcode);
26337 insn |= (old_op & 0xf0) << 4;
26338 put_thumb32_insn (buf, insn);
26339 if (opcode == T_MNEM_add_pc)
26340 reloc_type = BFD_RELOC_ARM_T32_IMM12;
26341 else
26342 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26343 }
26344 else
26345 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26346 pc_rel = 0;
26347 break;
26348
26349 case T_MNEM_addi:
26350 case T_MNEM_addis:
26351 case T_MNEM_subi:
26352 case T_MNEM_subis:
26353 if (fragp->fr_var == 4)
26354 {
26355 insn = THUMB_OP32 (opcode);
26356 insn |= (old_op & 0xf0) << 4;
26357 insn |= (old_op & 0xf) << 16;
26358 put_thumb32_insn (buf, insn);
26359 if (insn & (1 << 20))
26360 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26361 else
26362 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26363 }
26364 else
26365 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26366 pc_rel = 0;
26367 break;
26368 default:
26369 abort ();
26370 }
26371 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26372 (enum bfd_reloc_code_real) reloc_type);
26373 fixp->fx_file = fragp->fr_file;
26374 fixp->fx_line = fragp->fr_line;
26375 fragp->fr_fix += fragp->fr_var;
26376
26377 /* Set whether we use thumb-2 ISA based on final relaxation results. */
26378 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26379 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26380 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26381 }
26382
26383 /* Return the size of a relaxable immediate operand instruction.
26384 SHIFT and SIZE specify the form of the allowable immediate. */
26385 static int
26386 relax_immediate (fragS *fragp, int size, int shift)
26387 {
26388 offsetT offset;
26389 offsetT mask;
26390 offsetT low;
26391
26392 /* ??? Should be able to do better than this. */
26393 if (fragp->fr_symbol)
26394 return 4;
26395
26396 low = (1 << shift) - 1;
26397 mask = (1 << (shift + size)) - (1 << shift);
26398 offset = fragp->fr_offset;
26399 /* Force misaligned offsets to 32-bit variant. */
26400 if (offset & low)
26401 return 4;
26402 if (offset & ~mask)
26403 return 4;
26404 return 2;
26405 }
26406
26407 /* Get the address of a symbol during relaxation. */
26408 static addressT
26409 relaxed_symbol_addr (fragS *fragp, long stretch)
26410 {
26411 fragS *sym_frag;
26412 addressT addr;
26413 symbolS *sym;
26414
26415 sym = fragp->fr_symbol;
26416 sym_frag = symbol_get_frag (sym);
26417 know (S_GET_SEGMENT (sym) != absolute_section
26418 || sym_frag == &zero_address_frag);
26419 addr = S_GET_VALUE (sym) + fragp->fr_offset;
26420
26421 /* If frag has yet to be reached on this pass, assume it will
26422 move by STRETCH just as we did. If this is not so, it will
26423 be because some frag between grows, and that will force
26424 another pass. */
26425
26426 if (stretch != 0
26427 && sym_frag->relax_marker != fragp->relax_marker)
26428 {
26429 fragS *f;
26430
26431 /* Adjust stretch for any alignment frag. Note that if have
26432 been expanding the earlier code, the symbol may be
26433 defined in what appears to be an earlier frag. FIXME:
26434 This doesn't handle the fr_subtype field, which specifies
26435 a maximum number of bytes to skip when doing an
26436 alignment. */
26437 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
26438 {
26439 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
26440 {
26441 if (stretch < 0)
26442 stretch = - ((- stretch)
26443 & ~ ((1 << (int) f->fr_offset) - 1));
26444 else
26445 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
26446 if (stretch == 0)
26447 break;
26448 }
26449 }
26450 if (f != NULL)
26451 addr += stretch;
26452 }
26453
26454 return addr;
26455 }
26456
26457 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
26458 load. */
26459 static int
26460 relax_adr (fragS *fragp, asection *sec, long stretch)
26461 {
26462 addressT addr;
26463 offsetT val;
26464
26465 /* Assume worst case for symbols not known to be in the same section. */
26466 if (fragp->fr_symbol == NULL
26467 || !S_IS_DEFINED (fragp->fr_symbol)
26468 || sec != S_GET_SEGMENT (fragp->fr_symbol)
26469 || S_IS_WEAK (fragp->fr_symbol))
26470 return 4;
26471
26472 val = relaxed_symbol_addr (fragp, stretch);
26473 addr = fragp->fr_address + fragp->fr_fix;
26474 addr = (addr + 4) & ~3;
26475 /* Force misaligned targets to 32-bit variant. */
26476 if (val & 3)
26477 return 4;
26478 val -= addr;
26479 if (val < 0 || val > 1020)
26480 return 4;
26481 return 2;
26482 }
26483
26484 /* Return the size of a relaxable add/sub immediate instruction. */
26485 static int
26486 relax_addsub (fragS *fragp, asection *sec)
26487 {
26488 char *buf;
26489 int op;
26490
26491 buf = fragp->fr_literal + fragp->fr_fix;
26492 op = bfd_get_16(sec->owner, buf);
26493 if ((op & 0xf) == ((op >> 4) & 0xf))
26494 return relax_immediate (fragp, 8, 0);
26495 else
26496 return relax_immediate (fragp, 3, 0);
26497 }
26498
26499 /* Return TRUE iff the definition of symbol S could be pre-empted
26500 (overridden) at link or load time. */
26501 static bfd_boolean
26502 symbol_preemptible (symbolS *s)
26503 {
26504 /* Weak symbols can always be pre-empted. */
26505 if (S_IS_WEAK (s))
26506 return TRUE;
26507
26508 /* Non-global symbols cannot be pre-empted. */
26509 if (! S_IS_EXTERNAL (s))
26510 return FALSE;
26511
26512 #ifdef OBJ_ELF
26513 /* In ELF, a global symbol can be marked protected, or private. In that
26514 case it can't be pre-empted (other definitions in the same link unit
26515 would violate the ODR). */
26516 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
26517 return FALSE;
26518 #endif
26519
26520 /* Other global symbols might be pre-empted. */
26521 return TRUE;
26522 }
26523
26524 /* Return the size of a relaxable branch instruction. BITS is the
26525 size of the offset field in the narrow instruction. */
26526
26527 static int
26528 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
26529 {
26530 addressT addr;
26531 offsetT val;
26532 offsetT limit;
26533
26534 /* Assume worst case for symbols not known to be in the same section. */
26535 if (!S_IS_DEFINED (fragp->fr_symbol)
26536 || sec != S_GET_SEGMENT (fragp->fr_symbol)
26537 || S_IS_WEAK (fragp->fr_symbol))
26538 return 4;
26539
26540 #ifdef OBJ_ELF
26541 /* A branch to a function in ARM state will require interworking. */
26542 if (S_IS_DEFINED (fragp->fr_symbol)
26543 && ARM_IS_FUNC (fragp->fr_symbol))
26544 return 4;
26545 #endif
26546
26547 if (symbol_preemptible (fragp->fr_symbol))
26548 return 4;
26549
26550 val = relaxed_symbol_addr (fragp, stretch);
26551 addr = fragp->fr_address + fragp->fr_fix + 4;
26552 val -= addr;
26553
26554 /* Offset is a signed value *2 */
26555 limit = 1 << bits;
26556 if (val >= limit || val < -limit)
26557 return 4;
26558 return 2;
26559 }
26560
26561
26562 /* Relax a machine dependent frag. This returns the amount by which
26563 the current size of the frag should change. */
26564
26565 int
26566 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
26567 {
26568 int oldsize;
26569 int newsize;
26570
26571 oldsize = fragp->fr_var;
26572 switch (fragp->fr_subtype)
26573 {
26574 case T_MNEM_ldr_pc2:
26575 newsize = relax_adr (fragp, sec, stretch);
26576 break;
26577 case T_MNEM_ldr_pc:
26578 case T_MNEM_ldr_sp:
26579 case T_MNEM_str_sp:
26580 newsize = relax_immediate (fragp, 8, 2);
26581 break;
26582 case T_MNEM_ldr:
26583 case T_MNEM_str:
26584 newsize = relax_immediate (fragp, 5, 2);
26585 break;
26586 case T_MNEM_ldrh:
26587 case T_MNEM_strh:
26588 newsize = relax_immediate (fragp, 5, 1);
26589 break;
26590 case T_MNEM_ldrb:
26591 case T_MNEM_strb:
26592 newsize = relax_immediate (fragp, 5, 0);
26593 break;
26594 case T_MNEM_adr:
26595 newsize = relax_adr (fragp, sec, stretch);
26596 break;
26597 case T_MNEM_mov:
26598 case T_MNEM_movs:
26599 case T_MNEM_cmp:
26600 case T_MNEM_cmn:
26601 newsize = relax_immediate (fragp, 8, 0);
26602 break;
26603 case T_MNEM_b:
26604 newsize = relax_branch (fragp, sec, 11, stretch);
26605 break;
26606 case T_MNEM_bcond:
26607 newsize = relax_branch (fragp, sec, 8, stretch);
26608 break;
26609 case T_MNEM_add_sp:
26610 case T_MNEM_add_pc:
26611 newsize = relax_immediate (fragp, 8, 2);
26612 break;
26613 case T_MNEM_inc_sp:
26614 case T_MNEM_dec_sp:
26615 newsize = relax_immediate (fragp, 7, 2);
26616 break;
26617 case T_MNEM_addi:
26618 case T_MNEM_addis:
26619 case T_MNEM_subi:
26620 case T_MNEM_subis:
26621 newsize = relax_addsub (fragp, sec);
26622 break;
26623 default:
26624 abort ();
26625 }
26626
26627 fragp->fr_var = newsize;
26628 /* Freeze wide instructions that are at or before the same location as
26629 in the previous pass. This avoids infinite loops.
26630 Don't freeze them unconditionally because targets may be artificially
26631 misaligned by the expansion of preceding frags. */
26632 if (stretch <= 0 && newsize > 2)
26633 {
26634 md_convert_frag (sec->owner, sec, fragp);
26635 frag_wane (fragp);
26636 }
26637
26638 return newsize - oldsize;
26639 }
26640
26641 /* Round up a section size to the appropriate boundary. */
26642
26643 valueT
26644 md_section_align (segT segment ATTRIBUTE_UNUSED,
26645 valueT size)
26646 {
26647 return size;
26648 }
26649
26650 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
26651 of an rs_align_code fragment. */
26652
26653 void
26654 arm_handle_align (fragS * fragP)
26655 {
26656 static unsigned char const arm_noop[2][2][4] =
26657 {
26658 { /* ARMv1 */
26659 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
26660 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
26661 },
26662 { /* ARMv6k */
26663 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
26664 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
26665 },
26666 };
26667 static unsigned char const thumb_noop[2][2][2] =
26668 {
26669 { /* Thumb-1 */
26670 {0xc0, 0x46}, /* LE */
26671 {0x46, 0xc0}, /* BE */
26672 },
26673 { /* Thumb-2 */
26674 {0x00, 0xbf}, /* LE */
26675 {0xbf, 0x00} /* BE */
26676 }
26677 };
26678 static unsigned char const wide_thumb_noop[2][4] =
26679 { /* Wide Thumb-2 */
26680 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
26681 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
26682 };
26683
26684 unsigned bytes, fix, noop_size;
26685 char * p;
26686 const unsigned char * noop;
26687 const unsigned char *narrow_noop = NULL;
26688 #ifdef OBJ_ELF
26689 enum mstate state;
26690 #endif
26691
26692 if (fragP->fr_type != rs_align_code)
26693 return;
26694
26695 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
26696 p = fragP->fr_literal + fragP->fr_fix;
26697 fix = 0;
26698
26699 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
26700 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
26701
26702 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
26703
26704 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
26705 {
26706 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26707 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
26708 {
26709 narrow_noop = thumb_noop[1][target_big_endian];
26710 noop = wide_thumb_noop[target_big_endian];
26711 }
26712 else
26713 noop = thumb_noop[0][target_big_endian];
26714 noop_size = 2;
26715 #ifdef OBJ_ELF
26716 state = MAP_THUMB;
26717 #endif
26718 }
26719 else
26720 {
26721 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
26722 ? selected_cpu : arm_arch_none,
26723 arm_ext_v6k) != 0]
26724 [target_big_endian];
26725 noop_size = 4;
26726 #ifdef OBJ_ELF
26727 state = MAP_ARM;
26728 #endif
26729 }
26730
26731 fragP->fr_var = noop_size;
26732
26733 if (bytes & (noop_size - 1))
26734 {
26735 fix = bytes & (noop_size - 1);
26736 #ifdef OBJ_ELF
26737 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
26738 #endif
26739 memset (p, 0, fix);
26740 p += fix;
26741 bytes -= fix;
26742 }
26743
26744 if (narrow_noop)
26745 {
26746 if (bytes & noop_size)
26747 {
26748 /* Insert a narrow noop. */
26749 memcpy (p, narrow_noop, noop_size);
26750 p += noop_size;
26751 bytes -= noop_size;
26752 fix += noop_size;
26753 }
26754
26755 /* Use wide noops for the remainder */
26756 noop_size = 4;
26757 }
26758
26759 while (bytes >= noop_size)
26760 {
26761 memcpy (p, noop, noop_size);
26762 p += noop_size;
26763 bytes -= noop_size;
26764 fix += noop_size;
26765 }
26766
26767 fragP->fr_fix += fix;
26768 }
26769
26770 /* Called from md_do_align. Used to create an alignment
26771 frag in a code section. */
26772
26773 void
26774 arm_frag_align_code (int n, int max)
26775 {
26776 char * p;
26777
26778 /* We assume that there will never be a requirement
26779 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
26780 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
26781 {
26782 char err_msg[128];
26783
26784 sprintf (err_msg,
26785 _("alignments greater than %d bytes not supported in .text sections."),
26786 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
26787 as_fatal ("%s", err_msg);
26788 }
26789
26790 p = frag_var (rs_align_code,
26791 MAX_MEM_FOR_RS_ALIGN_CODE,
26792 1,
26793 (relax_substateT) max,
26794 (symbolS *) NULL,
26795 (offsetT) n,
26796 (char *) NULL);
26797 *p = 0;
26798 }
26799
26800 /* Perform target specific initialisation of a frag.
26801 Note - despite the name this initialisation is not done when the frag
26802 is created, but only when its type is assigned. A frag can be created
26803 and used a long time before its type is set, so beware of assuming that
26804 this initialisation is performed first. */
26805
26806 #ifndef OBJ_ELF
26807 void
26808 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
26809 {
26810 /* Record whether this frag is in an ARM or a THUMB area. */
26811 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26812 }
26813
26814 #else /* OBJ_ELF is defined. */
26815 void
26816 arm_init_frag (fragS * fragP, int max_chars)
26817 {
26818 bfd_boolean frag_thumb_mode;
26819
26820 /* If the current ARM vs THUMB mode has not already
26821 been recorded into this frag then do so now. */
26822 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
26823 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
26824
26825 /* PR 21809: Do not set a mapping state for debug sections
26826 - it just confuses other tools. */
26827 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
26828 return;
26829
26830 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
26831
26832 /* Record a mapping symbol for alignment frags. We will delete this
26833 later if the alignment ends up empty. */
26834 switch (fragP->fr_type)
26835 {
26836 case rs_align:
26837 case rs_align_test:
26838 case rs_fill:
26839 mapping_state_2 (MAP_DATA, max_chars);
26840 break;
26841 case rs_align_code:
26842 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
26843 break;
26844 default:
26845 break;
26846 }
26847 }
26848
26849 /* When we change sections we need to issue a new mapping symbol. */
26850
26851 void
26852 arm_elf_change_section (void)
26853 {
26854 /* Link an unlinked unwind index table section to the .text section. */
26855 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
26856 && elf_linked_to_section (now_seg) == NULL)
26857 elf_linked_to_section (now_seg) = text_section;
26858 }
26859
26860 int
26861 arm_elf_section_type (const char * str, size_t len)
26862 {
26863 if (len == 5 && strncmp (str, "exidx", 5) == 0)
26864 return SHT_ARM_EXIDX;
26865
26866 return -1;
26867 }
26868 \f
26869 /* Code to deal with unwinding tables. */
26870
26871 static void add_unwind_adjustsp (offsetT);
26872
26873 /* Generate any deferred unwind frame offset. */
26874
26875 static void
26876 flush_pending_unwind (void)
26877 {
26878 offsetT offset;
26879
26880 offset = unwind.pending_offset;
26881 unwind.pending_offset = 0;
26882 if (offset != 0)
26883 add_unwind_adjustsp (offset);
26884 }
26885
26886 /* Add an opcode to this list for this function. Two-byte opcodes should
26887 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
26888 order. */
26889
26890 static void
26891 add_unwind_opcode (valueT op, int length)
26892 {
26893 /* Add any deferred stack adjustment. */
26894 if (unwind.pending_offset)
26895 flush_pending_unwind ();
26896
26897 unwind.sp_restored = 0;
26898
26899 if (unwind.opcode_count + length > unwind.opcode_alloc)
26900 {
26901 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
26902 if (unwind.opcodes)
26903 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
26904 unwind.opcode_alloc);
26905 else
26906 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
26907 }
26908 while (length > 0)
26909 {
26910 length--;
26911 unwind.opcodes[unwind.opcode_count] = op & 0xff;
26912 op >>= 8;
26913 unwind.opcode_count++;
26914 }
26915 }
26916
26917 /* Add unwind opcodes to adjust the stack pointer. */
26918
26919 static void
26920 add_unwind_adjustsp (offsetT offset)
26921 {
26922 valueT op;
26923
26924 if (offset > 0x200)
26925 {
26926 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
26927 char bytes[5];
26928 int n;
26929 valueT o;
26930
26931 /* Long form: 0xb2, uleb128. */
26932 /* This might not fit in a word so add the individual bytes,
26933 remembering the list is built in reverse order. */
26934 o = (valueT) ((offset - 0x204) >> 2);
26935 if (o == 0)
26936 add_unwind_opcode (0, 1);
26937
26938 /* Calculate the uleb128 encoding of the offset. */
26939 n = 0;
26940 while (o)
26941 {
26942 bytes[n] = o & 0x7f;
26943 o >>= 7;
26944 if (o)
26945 bytes[n] |= 0x80;
26946 n++;
26947 }
26948 /* Add the insn. */
26949 for (; n; n--)
26950 add_unwind_opcode (bytes[n - 1], 1);
26951 add_unwind_opcode (0xb2, 1);
26952 }
26953 else if (offset > 0x100)
26954 {
26955 /* Two short opcodes. */
26956 add_unwind_opcode (0x3f, 1);
26957 op = (offset - 0x104) >> 2;
26958 add_unwind_opcode (op, 1);
26959 }
26960 else if (offset > 0)
26961 {
26962 /* Short opcode. */
26963 op = (offset - 4) >> 2;
26964 add_unwind_opcode (op, 1);
26965 }
26966 else if (offset < 0)
26967 {
26968 offset = -offset;
26969 while (offset > 0x100)
26970 {
26971 add_unwind_opcode (0x7f, 1);
26972 offset -= 0x100;
26973 }
26974 op = ((offset - 4) >> 2) | 0x40;
26975 add_unwind_opcode (op, 1);
26976 }
26977 }
26978
26979 /* Finish the list of unwind opcodes for this function. */
26980
26981 static void
26982 finish_unwind_opcodes (void)
26983 {
26984 valueT op;
26985
26986 if (unwind.fp_used)
26987 {
26988 /* Adjust sp as necessary. */
26989 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
26990 flush_pending_unwind ();
26991
26992 /* After restoring sp from the frame pointer. */
26993 op = 0x90 | unwind.fp_reg;
26994 add_unwind_opcode (op, 1);
26995 }
26996 else
26997 flush_pending_unwind ();
26998 }
26999
27000
27001 /* Start an exception table entry. If idx is nonzero this is an index table
27002 entry. */
27003
27004 static void
27005 start_unwind_section (const segT text_seg, int idx)
27006 {
27007 const char * text_name;
27008 const char * prefix;
27009 const char * prefix_once;
27010 const char * group_name;
27011 char * sec_name;
27012 int type;
27013 int flags;
27014 int linkonce;
27015
27016 if (idx)
27017 {
27018 prefix = ELF_STRING_ARM_unwind;
27019 prefix_once = ELF_STRING_ARM_unwind_once;
27020 type = SHT_ARM_EXIDX;
27021 }
27022 else
27023 {
27024 prefix = ELF_STRING_ARM_unwind_info;
27025 prefix_once = ELF_STRING_ARM_unwind_info_once;
27026 type = SHT_PROGBITS;
27027 }
27028
27029 text_name = segment_name (text_seg);
27030 if (streq (text_name, ".text"))
27031 text_name = "";
27032
27033 if (strncmp (text_name, ".gnu.linkonce.t.",
27034 strlen (".gnu.linkonce.t.")) == 0)
27035 {
27036 prefix = prefix_once;
27037 text_name += strlen (".gnu.linkonce.t.");
27038 }
27039
27040 sec_name = concat (prefix, text_name, (char *) NULL);
27041
27042 flags = SHF_ALLOC;
27043 linkonce = 0;
27044 group_name = 0;
27045
27046 /* Handle COMDAT group. */
27047 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27048 {
27049 group_name = elf_group_name (text_seg);
27050 if (group_name == NULL)
27051 {
27052 as_bad (_("Group section `%s' has no group signature"),
27053 segment_name (text_seg));
27054 ignore_rest_of_line ();
27055 return;
27056 }
27057 flags |= SHF_GROUP;
27058 linkonce = 1;
27059 }
27060
27061 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
27062 linkonce, 0);
27063
27064 /* Set the section link for index tables. */
27065 if (idx)
27066 elf_linked_to_section (now_seg) = text_seg;
27067 }
27068
27069
27070 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
27071 personality routine data. Returns zero, or the index table value for
27072 an inline entry. */
27073
27074 static valueT
27075 create_unwind_entry (int have_data)
27076 {
27077 int size;
27078 addressT where;
27079 char *ptr;
27080 /* The current word of data. */
27081 valueT data;
27082 /* The number of bytes left in this word. */
27083 int n;
27084
27085 finish_unwind_opcodes ();
27086
27087 /* Remember the current text section. */
27088 unwind.saved_seg = now_seg;
27089 unwind.saved_subseg = now_subseg;
27090
27091 start_unwind_section (now_seg, 0);
27092
27093 if (unwind.personality_routine == NULL)
27094 {
27095 if (unwind.personality_index == -2)
27096 {
27097 if (have_data)
27098 as_bad (_("handlerdata in cantunwind frame"));
27099 return 1; /* EXIDX_CANTUNWIND. */
27100 }
27101
27102 /* Use a default personality routine if none is specified. */
27103 if (unwind.personality_index == -1)
27104 {
27105 if (unwind.opcode_count > 3)
27106 unwind.personality_index = 1;
27107 else
27108 unwind.personality_index = 0;
27109 }
27110
27111 /* Space for the personality routine entry. */
27112 if (unwind.personality_index == 0)
27113 {
27114 if (unwind.opcode_count > 3)
27115 as_bad (_("too many unwind opcodes for personality routine 0"));
27116
27117 if (!have_data)
27118 {
27119 /* All the data is inline in the index table. */
27120 data = 0x80;
27121 n = 3;
27122 while (unwind.opcode_count > 0)
27123 {
27124 unwind.opcode_count--;
27125 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27126 n--;
27127 }
27128
27129 /* Pad with "finish" opcodes. */
27130 while (n--)
27131 data = (data << 8) | 0xb0;
27132
27133 return data;
27134 }
27135 size = 0;
27136 }
27137 else
27138 /* We get two opcodes "free" in the first word. */
27139 size = unwind.opcode_count - 2;
27140 }
27141 else
27142 {
27143 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
27144 if (unwind.personality_index != -1)
27145 {
27146 as_bad (_("attempt to recreate an unwind entry"));
27147 return 1;
27148 }
27149
27150 /* An extra byte is required for the opcode count. */
27151 size = unwind.opcode_count + 1;
27152 }
27153
27154 size = (size + 3) >> 2;
27155 if (size > 0xff)
27156 as_bad (_("too many unwind opcodes"));
27157
27158 frag_align (2, 0, 0);
27159 record_alignment (now_seg, 2);
27160 unwind.table_entry = expr_build_dot ();
27161
27162 /* Allocate the table entry. */
27163 ptr = frag_more ((size << 2) + 4);
27164 /* PR 13449: Zero the table entries in case some of them are not used. */
27165 memset (ptr, 0, (size << 2) + 4);
27166 where = frag_now_fix () - ((size << 2) + 4);
27167
27168 switch (unwind.personality_index)
27169 {
27170 case -1:
27171 /* ??? Should this be a PLT generating relocation? */
27172 /* Custom personality routine. */
27173 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27174 BFD_RELOC_ARM_PREL31);
27175
27176 where += 4;
27177 ptr += 4;
27178
27179 /* Set the first byte to the number of additional words. */
27180 data = size > 0 ? size - 1 : 0;
27181 n = 3;
27182 break;
27183
27184 /* ABI defined personality routines. */
27185 case 0:
27186 /* Three opcodes bytes are packed into the first word. */
27187 data = 0x80;
27188 n = 3;
27189 break;
27190
27191 case 1:
27192 case 2:
27193 /* The size and first two opcode bytes go in the first word. */
27194 data = ((0x80 + unwind.personality_index) << 8) | size;
27195 n = 2;
27196 break;
27197
27198 default:
27199 /* Should never happen. */
27200 abort ();
27201 }
27202
27203 /* Pack the opcodes into words (MSB first), reversing the list at the same
27204 time. */
27205 while (unwind.opcode_count > 0)
27206 {
27207 if (n == 0)
27208 {
27209 md_number_to_chars (ptr, data, 4);
27210 ptr += 4;
27211 n = 4;
27212 data = 0;
27213 }
27214 unwind.opcode_count--;
27215 n--;
27216 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27217 }
27218
27219 /* Finish off the last word. */
27220 if (n < 4)
27221 {
27222 /* Pad with "finish" opcodes. */
27223 while (n--)
27224 data = (data << 8) | 0xb0;
27225
27226 md_number_to_chars (ptr, data, 4);
27227 }
27228
27229 if (!have_data)
27230 {
27231 /* Add an empty descriptor if there is no user-specified data. */
27232 ptr = frag_more (4);
27233 md_number_to_chars (ptr, 0, 4);
27234 }
27235
27236 return 0;
27237 }
27238
27239
27240 /* Initialize the DWARF-2 unwind information for this procedure. */
27241
27242 void
27243 tc_arm_frame_initial_instructions (void)
27244 {
27245 cfi_add_CFA_def_cfa (REG_SP, 0);
27246 }
27247 #endif /* OBJ_ELF */
27248
27249 /* Convert REGNAME to a DWARF-2 register number. */
27250
27251 int
27252 tc_arm_regname_to_dw2regnum (char *regname)
27253 {
27254 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27255 if (reg != FAIL)
27256 return reg;
27257
27258 /* PR 16694: Allow VFP registers as well. */
27259 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27260 if (reg != FAIL)
27261 return 64 + reg;
27262
27263 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27264 if (reg != FAIL)
27265 return reg + 256;
27266
27267 return FAIL;
27268 }
27269
27270 #ifdef TE_PE
27271 void
27272 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27273 {
27274 expressionS exp;
27275
27276 exp.X_op = O_secrel;
27277 exp.X_add_symbol = symbol;
27278 exp.X_add_number = 0;
27279 emit_expr (&exp, size);
27280 }
27281 #endif
27282
27283 /* MD interface: Symbol and relocation handling. */
27284
27285 /* Return the address within the segment that a PC-relative fixup is
27286 relative to. For ARM, PC-relative fixups applied to instructions
27287 are generally relative to the location of the fixup plus 8 bytes.
27288 Thumb branches are offset by 4, and Thumb loads relative to PC
27289 require special handling. */
27290
27291 long
27292 md_pcrel_from_section (fixS * fixP, segT seg)
27293 {
27294 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27295
27296 /* If this is pc-relative and we are going to emit a relocation
27297 then we just want to put out any pipeline compensation that the linker
27298 will need. Otherwise we want to use the calculated base.
27299 For WinCE we skip the bias for externals as well, since this
27300 is how the MS ARM-CE assembler behaves and we want to be compatible. */
27301 if (fixP->fx_pcrel
27302 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27303 || (arm_force_relocation (fixP)
27304 #ifdef TE_WINCE
27305 && !S_IS_EXTERNAL (fixP->fx_addsy)
27306 #endif
27307 )))
27308 base = 0;
27309
27310
27311 switch (fixP->fx_r_type)
27312 {
27313 /* PC relative addressing on the Thumb is slightly odd as the
27314 bottom two bits of the PC are forced to zero for the
27315 calculation. This happens *after* application of the
27316 pipeline offset. However, Thumb adrl already adjusts for
27317 this, so we need not do it again. */
27318 case BFD_RELOC_ARM_THUMB_ADD:
27319 return base & ~3;
27320
27321 case BFD_RELOC_ARM_THUMB_OFFSET:
27322 case BFD_RELOC_ARM_T32_OFFSET_IMM:
27323 case BFD_RELOC_ARM_T32_ADD_PC12:
27324 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27325 return (base + 4) & ~3;
27326
27327 /* Thumb branches are simply offset by +4. */
27328 case BFD_RELOC_THUMB_PCREL_BRANCH5:
27329 case BFD_RELOC_THUMB_PCREL_BRANCH7:
27330 case BFD_RELOC_THUMB_PCREL_BRANCH9:
27331 case BFD_RELOC_THUMB_PCREL_BRANCH12:
27332 case BFD_RELOC_THUMB_PCREL_BRANCH20:
27333 case BFD_RELOC_THUMB_PCREL_BRANCH25:
27334 case BFD_RELOC_THUMB_PCREL_BFCSEL:
27335 case BFD_RELOC_ARM_THUMB_BF17:
27336 case BFD_RELOC_ARM_THUMB_BF19:
27337 case BFD_RELOC_ARM_THUMB_BF13:
27338 case BFD_RELOC_ARM_THUMB_LOOP12:
27339 return base + 4;
27340
27341 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27342 if (fixP->fx_addsy
27343 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27344 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27345 && ARM_IS_FUNC (fixP->fx_addsy)
27346 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27347 base = fixP->fx_where + fixP->fx_frag->fr_address;
27348 return base + 4;
27349
27350 /* BLX is like branches above, but forces the low two bits of PC to
27351 zero. */
27352 case BFD_RELOC_THUMB_PCREL_BLX:
27353 if (fixP->fx_addsy
27354 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27355 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27356 && THUMB_IS_FUNC (fixP->fx_addsy)
27357 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27358 base = fixP->fx_where + fixP->fx_frag->fr_address;
27359 return (base + 4) & ~3;
27360
27361 /* ARM mode branches are offset by +8. However, the Windows CE
27362 loader expects the relocation not to take this into account. */
27363 case BFD_RELOC_ARM_PCREL_BLX:
27364 if (fixP->fx_addsy
27365 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27366 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27367 && ARM_IS_FUNC (fixP->fx_addsy)
27368 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27369 base = fixP->fx_where + fixP->fx_frag->fr_address;
27370 return base + 8;
27371
27372 case BFD_RELOC_ARM_PCREL_CALL:
27373 if (fixP->fx_addsy
27374 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27375 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27376 && THUMB_IS_FUNC (fixP->fx_addsy)
27377 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27378 base = fixP->fx_where + fixP->fx_frag->fr_address;
27379 return base + 8;
27380
27381 case BFD_RELOC_ARM_PCREL_BRANCH:
27382 case BFD_RELOC_ARM_PCREL_JUMP:
27383 case BFD_RELOC_ARM_PLT32:
27384 #ifdef TE_WINCE
27385 /* When handling fixups immediately, because we have already
27386 discovered the value of a symbol, or the address of the frag involved
27387 we must account for the offset by +8, as the OS loader will never see the reloc.
27388 see fixup_segment() in write.c
27389 The S_IS_EXTERNAL test handles the case of global symbols.
27390 Those need the calculated base, not just the pipe compensation the linker will need. */
27391 if (fixP->fx_pcrel
27392 && fixP->fx_addsy != NULL
27393 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27394 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27395 return base + 8;
27396 return base;
27397 #else
27398 return base + 8;
27399 #endif
27400
27401
27402 /* ARM mode loads relative to PC are also offset by +8. Unlike
27403 branches, the Windows CE loader *does* expect the relocation
27404 to take this into account. */
27405 case BFD_RELOC_ARM_OFFSET_IMM:
27406 case BFD_RELOC_ARM_OFFSET_IMM8:
27407 case BFD_RELOC_ARM_HWLITERAL:
27408 case BFD_RELOC_ARM_LITERAL:
27409 case BFD_RELOC_ARM_CP_OFF_IMM:
27410 return base + 8;
27411
27412
27413 /* Other PC-relative relocations are un-offset. */
27414 default:
27415 return base;
27416 }
27417 }
27418
27419 static bfd_boolean flag_warn_syms = TRUE;
27420
27421 bfd_boolean
27422 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27423 {
27424 /* PR 18347 - Warn if the user attempts to create a symbol with the same
27425 name as an ARM instruction. Whilst strictly speaking it is allowed, it
27426 does mean that the resulting code might be very confusing to the reader.
27427 Also this warning can be triggered if the user omits an operand before
27428 an immediate address, eg:
27429
27430 LDR =foo
27431
27432 GAS treats this as an assignment of the value of the symbol foo to a
27433 symbol LDR, and so (without this code) it will not issue any kind of
27434 warning or error message.
27435
27436 Note - ARM instructions are case-insensitive but the strings in the hash
27437 table are all stored in lower case, so we must first ensure that name is
27438 lower case too. */
27439 if (flag_warn_syms && arm_ops_hsh)
27440 {
27441 char * nbuf = strdup (name);
27442 char * p;
27443
27444 for (p = nbuf; *p; p++)
27445 *p = TOLOWER (*p);
27446 if (hash_find (arm_ops_hsh, nbuf) != NULL)
27447 {
27448 static struct hash_control * already_warned = NULL;
27449
27450 if (already_warned == NULL)
27451 already_warned = hash_new ();
27452 /* Only warn about the symbol once. To keep the code
27453 simple we let hash_insert do the lookup for us. */
27454 if (hash_insert (already_warned, nbuf, NULL) == NULL)
27455 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
27456 }
27457 else
27458 free (nbuf);
27459 }
27460
27461 return FALSE;
27462 }
27463
27464 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
27465 Otherwise we have no need to default values of symbols. */
27466
27467 symbolS *
27468 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
27469 {
27470 #ifdef OBJ_ELF
27471 if (name[0] == '_' && name[1] == 'G'
27472 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
27473 {
27474 if (!GOT_symbol)
27475 {
27476 if (symbol_find (name))
27477 as_bad (_("GOT already in the symbol table"));
27478
27479 GOT_symbol = symbol_new (name, undefined_section,
27480 (valueT) 0, & zero_address_frag);
27481 }
27482
27483 return GOT_symbol;
27484 }
27485 #endif
27486
27487 return NULL;
27488 }
27489
27490 /* Subroutine of md_apply_fix. Check to see if an immediate can be
27491 computed as two separate immediate values, added together. We
27492 already know that this value cannot be computed by just one ARM
27493 instruction. */
27494
27495 static unsigned int
27496 validate_immediate_twopart (unsigned int val,
27497 unsigned int * highpart)
27498 {
27499 unsigned int a;
27500 unsigned int i;
27501
27502 for (i = 0; i < 32; i += 2)
27503 if (((a = rotate_left (val, i)) & 0xff) != 0)
27504 {
27505 if (a & 0xff00)
27506 {
27507 if (a & ~ 0xffff)
27508 continue;
27509 * highpart = (a >> 8) | ((i + 24) << 7);
27510 }
27511 else if (a & 0xff0000)
27512 {
27513 if (a & 0xff000000)
27514 continue;
27515 * highpart = (a >> 16) | ((i + 16) << 7);
27516 }
27517 else
27518 {
27519 gas_assert (a & 0xff000000);
27520 * highpart = (a >> 24) | ((i + 8) << 7);
27521 }
27522
27523 return (a & 0xff) | (i << 7);
27524 }
27525
27526 return FAIL;
27527 }
27528
27529 static int
27530 validate_offset_imm (unsigned int val, int hwse)
27531 {
27532 if ((hwse && val > 255) || val > 4095)
27533 return FAIL;
27534 return val;
27535 }
27536
27537 /* Subroutine of md_apply_fix. Do those data_ops which can take a
27538 negative immediate constant by altering the instruction. A bit of
27539 a hack really.
27540 MOV <-> MVN
27541 AND <-> BIC
27542 ADC <-> SBC
27543 by inverting the second operand, and
27544 ADD <-> SUB
27545 CMP <-> CMN
27546 by negating the second operand. */
27547
27548 static int
27549 negate_data_op (unsigned long * instruction,
27550 unsigned long value)
27551 {
27552 int op, new_inst;
27553 unsigned long negated, inverted;
27554
27555 negated = encode_arm_immediate (-value);
27556 inverted = encode_arm_immediate (~value);
27557
27558 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
27559 switch (op)
27560 {
27561 /* First negates. */
27562 case OPCODE_SUB: /* ADD <-> SUB */
27563 new_inst = OPCODE_ADD;
27564 value = negated;
27565 break;
27566
27567 case OPCODE_ADD:
27568 new_inst = OPCODE_SUB;
27569 value = negated;
27570 break;
27571
27572 case OPCODE_CMP: /* CMP <-> CMN */
27573 new_inst = OPCODE_CMN;
27574 value = negated;
27575 break;
27576
27577 case OPCODE_CMN:
27578 new_inst = OPCODE_CMP;
27579 value = negated;
27580 break;
27581
27582 /* Now Inverted ops. */
27583 case OPCODE_MOV: /* MOV <-> MVN */
27584 new_inst = OPCODE_MVN;
27585 value = inverted;
27586 break;
27587
27588 case OPCODE_MVN:
27589 new_inst = OPCODE_MOV;
27590 value = inverted;
27591 break;
27592
27593 case OPCODE_AND: /* AND <-> BIC */
27594 new_inst = OPCODE_BIC;
27595 value = inverted;
27596 break;
27597
27598 case OPCODE_BIC:
27599 new_inst = OPCODE_AND;
27600 value = inverted;
27601 break;
27602
27603 case OPCODE_ADC: /* ADC <-> SBC */
27604 new_inst = OPCODE_SBC;
27605 value = inverted;
27606 break;
27607
27608 case OPCODE_SBC:
27609 new_inst = OPCODE_ADC;
27610 value = inverted;
27611 break;
27612
27613 /* We cannot do anything. */
27614 default:
27615 return FAIL;
27616 }
27617
27618 if (value == (unsigned) FAIL)
27619 return FAIL;
27620
27621 *instruction &= OPCODE_MASK;
27622 *instruction |= new_inst << DATA_OP_SHIFT;
27623 return value;
27624 }
27625
27626 /* Like negate_data_op, but for Thumb-2. */
27627
27628 static unsigned int
27629 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
27630 {
27631 int op, new_inst;
27632 int rd;
27633 unsigned int negated, inverted;
27634
27635 negated = encode_thumb32_immediate (-value);
27636 inverted = encode_thumb32_immediate (~value);
27637
27638 rd = (*instruction >> 8) & 0xf;
27639 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
27640 switch (op)
27641 {
27642 /* ADD <-> SUB. Includes CMP <-> CMN. */
27643 case T2_OPCODE_SUB:
27644 new_inst = T2_OPCODE_ADD;
27645 value = negated;
27646 break;
27647
27648 case T2_OPCODE_ADD:
27649 new_inst = T2_OPCODE_SUB;
27650 value = negated;
27651 break;
27652
27653 /* ORR <-> ORN. Includes MOV <-> MVN. */
27654 case T2_OPCODE_ORR:
27655 new_inst = T2_OPCODE_ORN;
27656 value = inverted;
27657 break;
27658
27659 case T2_OPCODE_ORN:
27660 new_inst = T2_OPCODE_ORR;
27661 value = inverted;
27662 break;
27663
27664 /* AND <-> BIC. TST has no inverted equivalent. */
27665 case T2_OPCODE_AND:
27666 new_inst = T2_OPCODE_BIC;
27667 if (rd == 15)
27668 value = FAIL;
27669 else
27670 value = inverted;
27671 break;
27672
27673 case T2_OPCODE_BIC:
27674 new_inst = T2_OPCODE_AND;
27675 value = inverted;
27676 break;
27677
27678 /* ADC <-> SBC */
27679 case T2_OPCODE_ADC:
27680 new_inst = T2_OPCODE_SBC;
27681 value = inverted;
27682 break;
27683
27684 case T2_OPCODE_SBC:
27685 new_inst = T2_OPCODE_ADC;
27686 value = inverted;
27687 break;
27688
27689 /* We cannot do anything. */
27690 default:
27691 return FAIL;
27692 }
27693
27694 if (value == (unsigned int)FAIL)
27695 return FAIL;
27696
27697 *instruction &= T2_OPCODE_MASK;
27698 *instruction |= new_inst << T2_DATA_OP_SHIFT;
27699 return value;
27700 }
27701
27702 /* Read a 32-bit thumb instruction from buf. */
27703
27704 static unsigned long
27705 get_thumb32_insn (char * buf)
27706 {
27707 unsigned long insn;
27708 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
27709 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27710
27711 return insn;
27712 }
27713
27714 /* We usually want to set the low bit on the address of thumb function
27715 symbols. In particular .word foo - . should have the low bit set.
27716 Generic code tries to fold the difference of two symbols to
27717 a constant. Prevent this and force a relocation when the first symbols
27718 is a thumb function. */
27719
27720 bfd_boolean
27721 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
27722 {
27723 if (op == O_subtract
27724 && l->X_op == O_symbol
27725 && r->X_op == O_symbol
27726 && THUMB_IS_FUNC (l->X_add_symbol))
27727 {
27728 l->X_op = O_subtract;
27729 l->X_op_symbol = r->X_add_symbol;
27730 l->X_add_number -= r->X_add_number;
27731 return TRUE;
27732 }
27733
27734 /* Process as normal. */
27735 return FALSE;
27736 }
27737
27738 /* Encode Thumb2 unconditional branches and calls. The encoding
27739 for the 2 are identical for the immediate values. */
27740
27741 static void
27742 encode_thumb2_b_bl_offset (char * buf, offsetT value)
27743 {
27744 #define T2I1I2MASK ((1 << 13) | (1 << 11))
27745 offsetT newval;
27746 offsetT newval2;
27747 addressT S, I1, I2, lo, hi;
27748
27749 S = (value >> 24) & 0x01;
27750 I1 = (value >> 23) & 0x01;
27751 I2 = (value >> 22) & 0x01;
27752 hi = (value >> 12) & 0x3ff;
27753 lo = (value >> 1) & 0x7ff;
27754 newval = md_chars_to_number (buf, THUMB_SIZE);
27755 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
27756 newval |= (S << 10) | hi;
27757 newval2 &= ~T2I1I2MASK;
27758 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
27759 md_number_to_chars (buf, newval, THUMB_SIZE);
27760 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
27761 }
27762
27763 void
27764 md_apply_fix (fixS * fixP,
27765 valueT * valP,
27766 segT seg)
27767 {
27768 offsetT value = * valP;
27769 offsetT newval;
27770 unsigned int newimm;
27771 unsigned long temp;
27772 int sign;
27773 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
27774
27775 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
27776
27777 /* Note whether this will delete the relocation. */
27778
27779 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
27780 fixP->fx_done = 1;
27781
27782 /* On a 64-bit host, silently truncate 'value' to 32 bits for
27783 consistency with the behaviour on 32-bit hosts. Remember value
27784 for emit_reloc. */
27785 value &= 0xffffffff;
27786 value ^= 0x80000000;
27787 value -= 0x80000000;
27788
27789 *valP = value;
27790 fixP->fx_addnumber = value;
27791
27792 /* Same treatment for fixP->fx_offset. */
27793 fixP->fx_offset &= 0xffffffff;
27794 fixP->fx_offset ^= 0x80000000;
27795 fixP->fx_offset -= 0x80000000;
27796
27797 switch (fixP->fx_r_type)
27798 {
27799 case BFD_RELOC_NONE:
27800 /* This will need to go in the object file. */
27801 fixP->fx_done = 0;
27802 break;
27803
27804 case BFD_RELOC_ARM_IMMEDIATE:
27805 /* We claim that this fixup has been processed here,
27806 even if in fact we generate an error because we do
27807 not have a reloc for it, so tc_gen_reloc will reject it. */
27808 fixP->fx_done = 1;
27809
27810 if (fixP->fx_addsy)
27811 {
27812 const char *msg = 0;
27813
27814 if (! S_IS_DEFINED (fixP->fx_addsy))
27815 msg = _("undefined symbol %s used as an immediate value");
27816 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27817 msg = _("symbol %s is in a different section");
27818 else if (S_IS_WEAK (fixP->fx_addsy))
27819 msg = _("symbol %s is weak and may be overridden later");
27820
27821 if (msg)
27822 {
27823 as_bad_where (fixP->fx_file, fixP->fx_line,
27824 msg, S_GET_NAME (fixP->fx_addsy));
27825 break;
27826 }
27827 }
27828
27829 temp = md_chars_to_number (buf, INSN_SIZE);
27830
27831 /* If the offset is negative, we should use encoding A2 for ADR. */
27832 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
27833 newimm = negate_data_op (&temp, value);
27834 else
27835 {
27836 newimm = encode_arm_immediate (value);
27837
27838 /* If the instruction will fail, see if we can fix things up by
27839 changing the opcode. */
27840 if (newimm == (unsigned int) FAIL)
27841 newimm = negate_data_op (&temp, value);
27842 /* MOV accepts both ARM modified immediate (A1 encoding) and
27843 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
27844 When disassembling, MOV is preferred when there is no encoding
27845 overlap. */
27846 if (newimm == (unsigned int) FAIL
27847 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
27848 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
27849 && !((temp >> SBIT_SHIFT) & 0x1)
27850 && value >= 0 && value <= 0xffff)
27851 {
27852 /* Clear bits[23:20] to change encoding from A1 to A2. */
27853 temp &= 0xff0fffff;
27854 /* Encoding high 4bits imm. Code below will encode the remaining
27855 low 12bits. */
27856 temp |= (value & 0x0000f000) << 4;
27857 newimm = value & 0x00000fff;
27858 }
27859 }
27860
27861 if (newimm == (unsigned int) FAIL)
27862 {
27863 as_bad_where (fixP->fx_file, fixP->fx_line,
27864 _("invalid constant (%lx) after fixup"),
27865 (unsigned long) value);
27866 break;
27867 }
27868
27869 newimm |= (temp & 0xfffff000);
27870 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27871 break;
27872
27873 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
27874 {
27875 unsigned int highpart = 0;
27876 unsigned int newinsn = 0xe1a00000; /* nop. */
27877
27878 if (fixP->fx_addsy)
27879 {
27880 const char *msg = 0;
27881
27882 if (! S_IS_DEFINED (fixP->fx_addsy))
27883 msg = _("undefined symbol %s used as an immediate value");
27884 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
27885 msg = _("symbol %s is in a different section");
27886 else if (S_IS_WEAK (fixP->fx_addsy))
27887 msg = _("symbol %s is weak and may be overridden later");
27888
27889 if (msg)
27890 {
27891 as_bad_where (fixP->fx_file, fixP->fx_line,
27892 msg, S_GET_NAME (fixP->fx_addsy));
27893 break;
27894 }
27895 }
27896
27897 newimm = encode_arm_immediate (value);
27898 temp = md_chars_to_number (buf, INSN_SIZE);
27899
27900 /* If the instruction will fail, see if we can fix things up by
27901 changing the opcode. */
27902 if (newimm == (unsigned int) FAIL
27903 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
27904 {
27905 /* No ? OK - try using two ADD instructions to generate
27906 the value. */
27907 newimm = validate_immediate_twopart (value, & highpart);
27908
27909 /* Yes - then make sure that the second instruction is
27910 also an add. */
27911 if (newimm != (unsigned int) FAIL)
27912 newinsn = temp;
27913 /* Still No ? Try using a negated value. */
27914 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
27915 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
27916 /* Otherwise - give up. */
27917 else
27918 {
27919 as_bad_where (fixP->fx_file, fixP->fx_line,
27920 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
27921 (long) value);
27922 break;
27923 }
27924
27925 /* Replace the first operand in the 2nd instruction (which
27926 is the PC) with the destination register. We have
27927 already added in the PC in the first instruction and we
27928 do not want to do it again. */
27929 newinsn &= ~ 0xf0000;
27930 newinsn |= ((newinsn & 0x0f000) << 4);
27931 }
27932
27933 newimm |= (temp & 0xfffff000);
27934 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
27935
27936 highpart |= (newinsn & 0xfffff000);
27937 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
27938 }
27939 break;
27940
27941 case BFD_RELOC_ARM_OFFSET_IMM:
27942 if (!fixP->fx_done && seg->use_rela_p)
27943 value = 0;
27944 /* Fall through. */
27945
27946 case BFD_RELOC_ARM_LITERAL:
27947 sign = value > 0;
27948
27949 if (value < 0)
27950 value = - value;
27951
27952 if (validate_offset_imm (value, 0) == FAIL)
27953 {
27954 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
27955 as_bad_where (fixP->fx_file, fixP->fx_line,
27956 _("invalid literal constant: pool needs to be closer"));
27957 else
27958 as_bad_where (fixP->fx_file, fixP->fx_line,
27959 _("bad immediate value for offset (%ld)"),
27960 (long) value);
27961 break;
27962 }
27963
27964 newval = md_chars_to_number (buf, INSN_SIZE);
27965 if (value == 0)
27966 newval &= 0xfffff000;
27967 else
27968 {
27969 newval &= 0xff7ff000;
27970 newval |= value | (sign ? INDEX_UP : 0);
27971 }
27972 md_number_to_chars (buf, newval, INSN_SIZE);
27973 break;
27974
27975 case BFD_RELOC_ARM_OFFSET_IMM8:
27976 case BFD_RELOC_ARM_HWLITERAL:
27977 sign = value > 0;
27978
27979 if (value < 0)
27980 value = - value;
27981
27982 if (validate_offset_imm (value, 1) == FAIL)
27983 {
27984 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
27985 as_bad_where (fixP->fx_file, fixP->fx_line,
27986 _("invalid literal constant: pool needs to be closer"));
27987 else
27988 as_bad_where (fixP->fx_file, fixP->fx_line,
27989 _("bad immediate value for 8-bit offset (%ld)"),
27990 (long) value);
27991 break;
27992 }
27993
27994 newval = md_chars_to_number (buf, INSN_SIZE);
27995 if (value == 0)
27996 newval &= 0xfffff0f0;
27997 else
27998 {
27999 newval &= 0xff7ff0f0;
28000 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28001 }
28002 md_number_to_chars (buf, newval, INSN_SIZE);
28003 break;
28004
28005 case BFD_RELOC_ARM_T32_OFFSET_U8:
28006 if (value < 0 || value > 1020 || value % 4 != 0)
28007 as_bad_where (fixP->fx_file, fixP->fx_line,
28008 _("bad immediate value for offset (%ld)"), (long) value);
28009 value /= 4;
28010
28011 newval = md_chars_to_number (buf+2, THUMB_SIZE);
28012 newval |= value;
28013 md_number_to_chars (buf+2, newval, THUMB_SIZE);
28014 break;
28015
28016 case BFD_RELOC_ARM_T32_OFFSET_IMM:
28017 /* This is a complicated relocation used for all varieties of Thumb32
28018 load/store instruction with immediate offset:
28019
28020 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28021 *4, optional writeback(W)
28022 (doubleword load/store)
28023
28024 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28025 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28026 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28027 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28028 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28029
28030 Uppercase letters indicate bits that are already encoded at
28031 this point. Lowercase letters are our problem. For the
28032 second block of instructions, the secondary opcode nybble
28033 (bits 8..11) is present, and bit 23 is zero, even if this is
28034 a PC-relative operation. */
28035 newval = md_chars_to_number (buf, THUMB_SIZE);
28036 newval <<= 16;
28037 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28038
28039 if ((newval & 0xf0000000) == 0xe0000000)
28040 {
28041 /* Doubleword load/store: 8-bit offset, scaled by 4. */
28042 if (value >= 0)
28043 newval |= (1 << 23);
28044 else
28045 value = -value;
28046 if (value % 4 != 0)
28047 {
28048 as_bad_where (fixP->fx_file, fixP->fx_line,
28049 _("offset not a multiple of 4"));
28050 break;
28051 }
28052 value /= 4;
28053 if (value > 0xff)
28054 {
28055 as_bad_where (fixP->fx_file, fixP->fx_line,
28056 _("offset out of range"));
28057 break;
28058 }
28059 newval &= ~0xff;
28060 }
28061 else if ((newval & 0x000f0000) == 0x000f0000)
28062 {
28063 /* PC-relative, 12-bit offset. */
28064 if (value >= 0)
28065 newval |= (1 << 23);
28066 else
28067 value = -value;
28068 if (value > 0xfff)
28069 {
28070 as_bad_where (fixP->fx_file, fixP->fx_line,
28071 _("offset out of range"));
28072 break;
28073 }
28074 newval &= ~0xfff;
28075 }
28076 else if ((newval & 0x00000100) == 0x00000100)
28077 {
28078 /* Writeback: 8-bit, +/- offset. */
28079 if (value >= 0)
28080 newval |= (1 << 9);
28081 else
28082 value = -value;
28083 if (value > 0xff)
28084 {
28085 as_bad_where (fixP->fx_file, fixP->fx_line,
28086 _("offset out of range"));
28087 break;
28088 }
28089 newval &= ~0xff;
28090 }
28091 else if ((newval & 0x00000f00) == 0x00000e00)
28092 {
28093 /* T-instruction: positive 8-bit offset. */
28094 if (value < 0 || value > 0xff)
28095 {
28096 as_bad_where (fixP->fx_file, fixP->fx_line,
28097 _("offset out of range"));
28098 break;
28099 }
28100 newval &= ~0xff;
28101 newval |= value;
28102 }
28103 else
28104 {
28105 /* Positive 12-bit or negative 8-bit offset. */
28106 int limit;
28107 if (value >= 0)
28108 {
28109 newval |= (1 << 23);
28110 limit = 0xfff;
28111 }
28112 else
28113 {
28114 value = -value;
28115 limit = 0xff;
28116 }
28117 if (value > limit)
28118 {
28119 as_bad_where (fixP->fx_file, fixP->fx_line,
28120 _("offset out of range"));
28121 break;
28122 }
28123 newval &= ~limit;
28124 }
28125
28126 newval |= value;
28127 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28128 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28129 break;
28130
28131 case BFD_RELOC_ARM_SHIFT_IMM:
28132 newval = md_chars_to_number (buf, INSN_SIZE);
28133 if (((unsigned long) value) > 32
28134 || (value == 32
28135 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28136 {
28137 as_bad_where (fixP->fx_file, fixP->fx_line,
28138 _("shift expression is too large"));
28139 break;
28140 }
28141
28142 if (value == 0)
28143 /* Shifts of zero must be done as lsl. */
28144 newval &= ~0x60;
28145 else if (value == 32)
28146 value = 0;
28147 newval &= 0xfffff07f;
28148 newval |= (value & 0x1f) << 7;
28149 md_number_to_chars (buf, newval, INSN_SIZE);
28150 break;
28151
28152 case BFD_RELOC_ARM_T32_IMMEDIATE:
28153 case BFD_RELOC_ARM_T32_ADD_IMM:
28154 case BFD_RELOC_ARM_T32_IMM12:
28155 case BFD_RELOC_ARM_T32_ADD_PC12:
28156 /* We claim that this fixup has been processed here,
28157 even if in fact we generate an error because we do
28158 not have a reloc for it, so tc_gen_reloc will reject it. */
28159 fixP->fx_done = 1;
28160
28161 if (fixP->fx_addsy
28162 && ! S_IS_DEFINED (fixP->fx_addsy))
28163 {
28164 as_bad_where (fixP->fx_file, fixP->fx_line,
28165 _("undefined symbol %s used as an immediate value"),
28166 S_GET_NAME (fixP->fx_addsy));
28167 break;
28168 }
28169
28170 newval = md_chars_to_number (buf, THUMB_SIZE);
28171 newval <<= 16;
28172 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28173
28174 newimm = FAIL;
28175 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28176 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28177 Thumb2 modified immediate encoding (T2). */
28178 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28179 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28180 {
28181 newimm = encode_thumb32_immediate (value);
28182 if (newimm == (unsigned int) FAIL)
28183 newimm = thumb32_negate_data_op (&newval, value);
28184 }
28185 if (newimm == (unsigned int) FAIL)
28186 {
28187 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28188 {
28189 /* Turn add/sum into addw/subw. */
28190 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28191 newval = (newval & 0xfeffffff) | 0x02000000;
28192 /* No flat 12-bit imm encoding for addsw/subsw. */
28193 if ((newval & 0x00100000) == 0)
28194 {
28195 /* 12 bit immediate for addw/subw. */
28196 if (value < 0)
28197 {
28198 value = -value;
28199 newval ^= 0x00a00000;
28200 }
28201 if (value > 0xfff)
28202 newimm = (unsigned int) FAIL;
28203 else
28204 newimm = value;
28205 }
28206 }
28207 else
28208 {
28209 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28210 UINT16 (T3 encoding), MOVW only accepts UINT16. When
28211 disassembling, MOV is preferred when there is no encoding
28212 overlap. */
28213 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28214 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28215 but with the Rn field [19:16] set to 1111. */
28216 && (((newval >> 16) & 0xf) == 0xf)
28217 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28218 && !((newval >> T2_SBIT_SHIFT) & 0x1)
28219 && value >= 0 && value <= 0xffff)
28220 {
28221 /* Toggle bit[25] to change encoding from T2 to T3. */
28222 newval ^= 1 << 25;
28223 /* Clear bits[19:16]. */
28224 newval &= 0xfff0ffff;
28225 /* Encoding high 4bits imm. Code below will encode the
28226 remaining low 12bits. */
28227 newval |= (value & 0x0000f000) << 4;
28228 newimm = value & 0x00000fff;
28229 }
28230 }
28231 }
28232
28233 if (newimm == (unsigned int)FAIL)
28234 {
28235 as_bad_where (fixP->fx_file, fixP->fx_line,
28236 _("invalid constant (%lx) after fixup"),
28237 (unsigned long) value);
28238 break;
28239 }
28240
28241 newval |= (newimm & 0x800) << 15;
28242 newval |= (newimm & 0x700) << 4;
28243 newval |= (newimm & 0x0ff);
28244
28245 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28246 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28247 break;
28248
28249 case BFD_RELOC_ARM_SMC:
28250 if (((unsigned long) value) > 0xf)
28251 as_bad_where (fixP->fx_file, fixP->fx_line,
28252 _("invalid smc expression"));
28253
28254 newval = md_chars_to_number (buf, INSN_SIZE);
28255 newval |= (value & 0xf);
28256 md_number_to_chars (buf, newval, INSN_SIZE);
28257 break;
28258
28259 case BFD_RELOC_ARM_HVC:
28260 if (((unsigned long) value) > 0xffff)
28261 as_bad_where (fixP->fx_file, fixP->fx_line,
28262 _("invalid hvc expression"));
28263 newval = md_chars_to_number (buf, INSN_SIZE);
28264 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28265 md_number_to_chars (buf, newval, INSN_SIZE);
28266 break;
28267
28268 case BFD_RELOC_ARM_SWI:
28269 if (fixP->tc_fix_data != 0)
28270 {
28271 if (((unsigned long) value) > 0xff)
28272 as_bad_where (fixP->fx_file, fixP->fx_line,
28273 _("invalid swi expression"));
28274 newval = md_chars_to_number (buf, THUMB_SIZE);
28275 newval |= value;
28276 md_number_to_chars (buf, newval, THUMB_SIZE);
28277 }
28278 else
28279 {
28280 if (((unsigned long) value) > 0x00ffffff)
28281 as_bad_where (fixP->fx_file, fixP->fx_line,
28282 _("invalid swi expression"));
28283 newval = md_chars_to_number (buf, INSN_SIZE);
28284 newval |= value;
28285 md_number_to_chars (buf, newval, INSN_SIZE);
28286 }
28287 break;
28288
28289 case BFD_RELOC_ARM_MULTI:
28290 if (((unsigned long) value) > 0xffff)
28291 as_bad_where (fixP->fx_file, fixP->fx_line,
28292 _("invalid expression in load/store multiple"));
28293 newval = value | md_chars_to_number (buf, INSN_SIZE);
28294 md_number_to_chars (buf, newval, INSN_SIZE);
28295 break;
28296
28297 #ifdef OBJ_ELF
28298 case BFD_RELOC_ARM_PCREL_CALL:
28299
28300 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28301 && fixP->fx_addsy
28302 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28303 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28304 && THUMB_IS_FUNC (fixP->fx_addsy))
28305 /* Flip the bl to blx. This is a simple flip
28306 bit here because we generate PCREL_CALL for
28307 unconditional bls. */
28308 {
28309 newval = md_chars_to_number (buf, INSN_SIZE);
28310 newval = newval | 0x10000000;
28311 md_number_to_chars (buf, newval, INSN_SIZE);
28312 temp = 1;
28313 fixP->fx_done = 1;
28314 }
28315 else
28316 temp = 3;
28317 goto arm_branch_common;
28318
28319 case BFD_RELOC_ARM_PCREL_JUMP:
28320 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28321 && fixP->fx_addsy
28322 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28323 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28324 && THUMB_IS_FUNC (fixP->fx_addsy))
28325 {
28326 /* This would map to a bl<cond>, b<cond>,
28327 b<always> to a Thumb function. We
28328 need to force a relocation for this particular
28329 case. */
28330 newval = md_chars_to_number (buf, INSN_SIZE);
28331 fixP->fx_done = 0;
28332 }
28333 /* Fall through. */
28334
28335 case BFD_RELOC_ARM_PLT32:
28336 #endif
28337 case BFD_RELOC_ARM_PCREL_BRANCH:
28338 temp = 3;
28339 goto arm_branch_common;
28340
28341 case BFD_RELOC_ARM_PCREL_BLX:
28342
28343 temp = 1;
28344 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28345 && fixP->fx_addsy
28346 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28347 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28348 && ARM_IS_FUNC (fixP->fx_addsy))
28349 {
28350 /* Flip the blx to a bl and warn. */
28351 const char *name = S_GET_NAME (fixP->fx_addsy);
28352 newval = 0xeb000000;
28353 as_warn_where (fixP->fx_file, fixP->fx_line,
28354 _("blx to '%s' an ARM ISA state function changed to bl"),
28355 name);
28356 md_number_to_chars (buf, newval, INSN_SIZE);
28357 temp = 3;
28358 fixP->fx_done = 1;
28359 }
28360
28361 #ifdef OBJ_ELF
28362 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28363 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28364 #endif
28365
28366 arm_branch_common:
28367 /* We are going to store value (shifted right by two) in the
28368 instruction, in a 24 bit, signed field. Bits 26 through 32 either
28369 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
28370 also be clear. */
28371 if (value & temp)
28372 as_bad_where (fixP->fx_file, fixP->fx_line,
28373 _("misaligned branch destination"));
28374 if ((value & (offsetT)0xfe000000) != (offsetT)0
28375 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
28376 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28377
28378 if (fixP->fx_done || !seg->use_rela_p)
28379 {
28380 newval = md_chars_to_number (buf, INSN_SIZE);
28381 newval |= (value >> 2) & 0x00ffffff;
28382 /* Set the H bit on BLX instructions. */
28383 if (temp == 1)
28384 {
28385 if (value & 2)
28386 newval |= 0x01000000;
28387 else
28388 newval &= ~0x01000000;
28389 }
28390 md_number_to_chars (buf, newval, INSN_SIZE);
28391 }
28392 break;
28393
28394 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28395 /* CBZ can only branch forward. */
28396
28397 /* Attempts to use CBZ to branch to the next instruction
28398 (which, strictly speaking, are prohibited) will be turned into
28399 no-ops.
28400
28401 FIXME: It may be better to remove the instruction completely and
28402 perform relaxation. */
28403 if (value == -2)
28404 {
28405 newval = md_chars_to_number (buf, THUMB_SIZE);
28406 newval = 0xbf00; /* NOP encoding T1 */
28407 md_number_to_chars (buf, newval, THUMB_SIZE);
28408 }
28409 else
28410 {
28411 if (value & ~0x7e)
28412 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28413
28414 if (fixP->fx_done || !seg->use_rela_p)
28415 {
28416 newval = md_chars_to_number (buf, THUMB_SIZE);
28417 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28418 md_number_to_chars (buf, newval, THUMB_SIZE);
28419 }
28420 }
28421 break;
28422
28423 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
28424 if (out_of_range_p (value, 8))
28425 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28426
28427 if (fixP->fx_done || !seg->use_rela_p)
28428 {
28429 newval = md_chars_to_number (buf, THUMB_SIZE);
28430 newval |= (value & 0x1ff) >> 1;
28431 md_number_to_chars (buf, newval, THUMB_SIZE);
28432 }
28433 break;
28434
28435 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
28436 if (out_of_range_p (value, 11))
28437 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28438
28439 if (fixP->fx_done || !seg->use_rela_p)
28440 {
28441 newval = md_chars_to_number (buf, THUMB_SIZE);
28442 newval |= (value & 0xfff) >> 1;
28443 md_number_to_chars (buf, newval, THUMB_SIZE);
28444 }
28445 break;
28446
28447 /* This relocation is misnamed, it should be BRANCH21. */
28448 case BFD_RELOC_THUMB_PCREL_BRANCH20:
28449 if (fixP->fx_addsy
28450 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28451 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28452 && ARM_IS_FUNC (fixP->fx_addsy)
28453 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28454 {
28455 /* Force a relocation for a branch 20 bits wide. */
28456 fixP->fx_done = 0;
28457 }
28458 if (out_of_range_p (value, 20))
28459 as_bad_where (fixP->fx_file, fixP->fx_line,
28460 _("conditional branch out of range"));
28461
28462 if (fixP->fx_done || !seg->use_rela_p)
28463 {
28464 offsetT newval2;
28465 addressT S, J1, J2, lo, hi;
28466
28467 S = (value & 0x00100000) >> 20;
28468 J2 = (value & 0x00080000) >> 19;
28469 J1 = (value & 0x00040000) >> 18;
28470 hi = (value & 0x0003f000) >> 12;
28471 lo = (value & 0x00000ffe) >> 1;
28472
28473 newval = md_chars_to_number (buf, THUMB_SIZE);
28474 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28475 newval |= (S << 10) | hi;
28476 newval2 |= (J1 << 13) | (J2 << 11) | lo;
28477 md_number_to_chars (buf, newval, THUMB_SIZE);
28478 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28479 }
28480 break;
28481
28482 case BFD_RELOC_THUMB_PCREL_BLX:
28483 /* If there is a blx from a thumb state function to
28484 another thumb function flip this to a bl and warn
28485 about it. */
28486
28487 if (fixP->fx_addsy
28488 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28489 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28490 && THUMB_IS_FUNC (fixP->fx_addsy))
28491 {
28492 const char *name = S_GET_NAME (fixP->fx_addsy);
28493 as_warn_where (fixP->fx_file, fixP->fx_line,
28494 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
28495 name);
28496 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28497 newval = newval | 0x1000;
28498 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28499 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28500 fixP->fx_done = 1;
28501 }
28502
28503
28504 goto thumb_bl_common;
28505
28506 case BFD_RELOC_THUMB_PCREL_BRANCH23:
28507 /* A bl from Thumb state ISA to an internal ARM state function
28508 is converted to a blx. */
28509 if (fixP->fx_addsy
28510 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28511 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28512 && ARM_IS_FUNC (fixP->fx_addsy)
28513 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28514 {
28515 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28516 newval = newval & ~0x1000;
28517 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
28518 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
28519 fixP->fx_done = 1;
28520 }
28521
28522 thumb_bl_common:
28523
28524 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28525 /* For a BLX instruction, make sure that the relocation is rounded up
28526 to a word boundary. This follows the semantics of the instruction
28527 which specifies that bit 1 of the target address will come from bit
28528 1 of the base address. */
28529 value = (value + 3) & ~ 3;
28530
28531 #ifdef OBJ_ELF
28532 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
28533 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
28534 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
28535 #endif
28536
28537 if (out_of_range_p (value, 22))
28538 {
28539 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
28540 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28541 else if (out_of_range_p (value, 24))
28542 as_bad_where (fixP->fx_file, fixP->fx_line,
28543 _("Thumb2 branch out of range"));
28544 }
28545
28546 if (fixP->fx_done || !seg->use_rela_p)
28547 encode_thumb2_b_bl_offset (buf, value);
28548
28549 break;
28550
28551 case BFD_RELOC_THUMB_PCREL_BRANCH25:
28552 if (out_of_range_p (value, 24))
28553 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28554
28555 if (fixP->fx_done || !seg->use_rela_p)
28556 encode_thumb2_b_bl_offset (buf, value);
28557
28558 break;
28559
28560 case BFD_RELOC_8:
28561 if (fixP->fx_done || !seg->use_rela_p)
28562 *buf = value;
28563 break;
28564
28565 case BFD_RELOC_16:
28566 if (fixP->fx_done || !seg->use_rela_p)
28567 md_number_to_chars (buf, value, 2);
28568 break;
28569
28570 #ifdef OBJ_ELF
28571 case BFD_RELOC_ARM_TLS_CALL:
28572 case BFD_RELOC_ARM_THM_TLS_CALL:
28573 case BFD_RELOC_ARM_TLS_DESCSEQ:
28574 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
28575 case BFD_RELOC_ARM_TLS_GOTDESC:
28576 case BFD_RELOC_ARM_TLS_GD32:
28577 case BFD_RELOC_ARM_TLS_LE32:
28578 case BFD_RELOC_ARM_TLS_IE32:
28579 case BFD_RELOC_ARM_TLS_LDM32:
28580 case BFD_RELOC_ARM_TLS_LDO32:
28581 S_SET_THREAD_LOCAL (fixP->fx_addsy);
28582 break;
28583
28584 /* Same handling as above, but with the arm_fdpic guard. */
28585 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
28586 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
28587 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
28588 if (arm_fdpic)
28589 {
28590 S_SET_THREAD_LOCAL (fixP->fx_addsy);
28591 }
28592 else
28593 {
28594 as_bad_where (fixP->fx_file, fixP->fx_line,
28595 _("Relocation supported only in FDPIC mode"));
28596 }
28597 break;
28598
28599 case BFD_RELOC_ARM_GOT32:
28600 case BFD_RELOC_ARM_GOTOFF:
28601 break;
28602
28603 case BFD_RELOC_ARM_GOT_PREL:
28604 if (fixP->fx_done || !seg->use_rela_p)
28605 md_number_to_chars (buf, value, 4);
28606 break;
28607
28608 case BFD_RELOC_ARM_TARGET2:
28609 /* TARGET2 is not partial-inplace, so we need to write the
28610 addend here for REL targets, because it won't be written out
28611 during reloc processing later. */
28612 if (fixP->fx_done || !seg->use_rela_p)
28613 md_number_to_chars (buf, fixP->fx_offset, 4);
28614 break;
28615
28616 /* Relocations for FDPIC. */
28617 case BFD_RELOC_ARM_GOTFUNCDESC:
28618 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
28619 case BFD_RELOC_ARM_FUNCDESC:
28620 if (arm_fdpic)
28621 {
28622 if (fixP->fx_done || !seg->use_rela_p)
28623 md_number_to_chars (buf, 0, 4);
28624 }
28625 else
28626 {
28627 as_bad_where (fixP->fx_file, fixP->fx_line,
28628 _("Relocation supported only in FDPIC mode"));
28629 }
28630 break;
28631 #endif
28632
28633 case BFD_RELOC_RVA:
28634 case BFD_RELOC_32:
28635 case BFD_RELOC_ARM_TARGET1:
28636 case BFD_RELOC_ARM_ROSEGREL32:
28637 case BFD_RELOC_ARM_SBREL32:
28638 case BFD_RELOC_32_PCREL:
28639 #ifdef TE_PE
28640 case BFD_RELOC_32_SECREL:
28641 #endif
28642 if (fixP->fx_done || !seg->use_rela_p)
28643 #ifdef TE_WINCE
28644 /* For WinCE we only do this for pcrel fixups. */
28645 if (fixP->fx_done || fixP->fx_pcrel)
28646 #endif
28647 md_number_to_chars (buf, value, 4);
28648 break;
28649
28650 #ifdef OBJ_ELF
28651 case BFD_RELOC_ARM_PREL31:
28652 if (fixP->fx_done || !seg->use_rela_p)
28653 {
28654 newval = md_chars_to_number (buf, 4) & 0x80000000;
28655 if ((value ^ (value >> 1)) & 0x40000000)
28656 {
28657 as_bad_where (fixP->fx_file, fixP->fx_line,
28658 _("rel31 relocation overflow"));
28659 }
28660 newval |= value & 0x7fffffff;
28661 md_number_to_chars (buf, newval, 4);
28662 }
28663 break;
28664 #endif
28665
28666 case BFD_RELOC_ARM_CP_OFF_IMM:
28667 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
28668 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
28669 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
28670 newval = md_chars_to_number (buf, INSN_SIZE);
28671 else
28672 newval = get_thumb32_insn (buf);
28673 if ((newval & 0x0f200f00) == 0x0d000900)
28674 {
28675 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
28676 has permitted values that are multiples of 2, in the range 0
28677 to 510. */
28678 if (value < -510 || value > 510 || (value & 1))
28679 as_bad_where (fixP->fx_file, fixP->fx_line,
28680 _("co-processor offset out of range"));
28681 }
28682 else if ((newval & 0xfe001f80) == 0xec000f80)
28683 {
28684 if (value < -511 || value > 512 || (value & 3))
28685 as_bad_where (fixP->fx_file, fixP->fx_line,
28686 _("co-processor offset out of range"));
28687 }
28688 else if (value < -1023 || value > 1023 || (value & 3))
28689 as_bad_where (fixP->fx_file, fixP->fx_line,
28690 _("co-processor offset out of range"));
28691 cp_off_common:
28692 sign = value > 0;
28693 if (value < 0)
28694 value = -value;
28695 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28696 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28697 newval = md_chars_to_number (buf, INSN_SIZE);
28698 else
28699 newval = get_thumb32_insn (buf);
28700 if (value == 0)
28701 {
28702 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28703 newval &= 0xffffff80;
28704 else
28705 newval &= 0xffffff00;
28706 }
28707 else
28708 {
28709 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
28710 newval &= 0xff7fff80;
28711 else
28712 newval &= 0xff7fff00;
28713 if ((newval & 0x0f200f00) == 0x0d000900)
28714 {
28715 /* This is a fp16 vstr/vldr.
28716
28717 It requires the immediate offset in the instruction is shifted
28718 left by 1 to be a half-word offset.
28719
28720 Here, left shift by 1 first, and later right shift by 2
28721 should get the right offset. */
28722 value <<= 1;
28723 }
28724 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
28725 }
28726 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
28727 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
28728 md_number_to_chars (buf, newval, INSN_SIZE);
28729 else
28730 put_thumb32_insn (buf, newval);
28731 break;
28732
28733 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
28734 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
28735 if (value < -255 || value > 255)
28736 as_bad_where (fixP->fx_file, fixP->fx_line,
28737 _("co-processor offset out of range"));
28738 value *= 4;
28739 goto cp_off_common;
28740
28741 case BFD_RELOC_ARM_THUMB_OFFSET:
28742 newval = md_chars_to_number (buf, THUMB_SIZE);
28743 /* Exactly what ranges, and where the offset is inserted depends
28744 on the type of instruction, we can establish this from the
28745 top 4 bits. */
28746 switch (newval >> 12)
28747 {
28748 case 4: /* PC load. */
28749 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
28750 forced to zero for these loads; md_pcrel_from has already
28751 compensated for this. */
28752 if (value & 3)
28753 as_bad_where (fixP->fx_file, fixP->fx_line,
28754 _("invalid offset, target not word aligned (0x%08lX)"),
28755 (((unsigned long) fixP->fx_frag->fr_address
28756 + (unsigned long) fixP->fx_where) & ~3)
28757 + (unsigned long) value);
28758
28759 if (value & ~0x3fc)
28760 as_bad_where (fixP->fx_file, fixP->fx_line,
28761 _("invalid offset, value too big (0x%08lX)"),
28762 (long) value);
28763
28764 newval |= value >> 2;
28765 break;
28766
28767 case 9: /* SP load/store. */
28768 if (value & ~0x3fc)
28769 as_bad_where (fixP->fx_file, fixP->fx_line,
28770 _("invalid offset, value too big (0x%08lX)"),
28771 (long) value);
28772 newval |= value >> 2;
28773 break;
28774
28775 case 6: /* Word load/store. */
28776 if (value & ~0x7c)
28777 as_bad_where (fixP->fx_file, fixP->fx_line,
28778 _("invalid offset, value too big (0x%08lX)"),
28779 (long) value);
28780 newval |= value << 4; /* 6 - 2. */
28781 break;
28782
28783 case 7: /* Byte load/store. */
28784 if (value & ~0x1f)
28785 as_bad_where (fixP->fx_file, fixP->fx_line,
28786 _("invalid offset, value too big (0x%08lX)"),
28787 (long) value);
28788 newval |= value << 6;
28789 break;
28790
28791 case 8: /* Halfword load/store. */
28792 if (value & ~0x3e)
28793 as_bad_where (fixP->fx_file, fixP->fx_line,
28794 _("invalid offset, value too big (0x%08lX)"),
28795 (long) value);
28796 newval |= value << 5; /* 6 - 1. */
28797 break;
28798
28799 default:
28800 as_bad_where (fixP->fx_file, fixP->fx_line,
28801 "Unable to process relocation for thumb opcode: %lx",
28802 (unsigned long) newval);
28803 break;
28804 }
28805 md_number_to_chars (buf, newval, THUMB_SIZE);
28806 break;
28807
28808 case BFD_RELOC_ARM_THUMB_ADD:
28809 /* This is a complicated relocation, since we use it for all of
28810 the following immediate relocations:
28811
28812 3bit ADD/SUB
28813 8bit ADD/SUB
28814 9bit ADD/SUB SP word-aligned
28815 10bit ADD PC/SP word-aligned
28816
28817 The type of instruction being processed is encoded in the
28818 instruction field:
28819
28820 0x8000 SUB
28821 0x00F0 Rd
28822 0x000F Rs
28823 */
28824 newval = md_chars_to_number (buf, THUMB_SIZE);
28825 {
28826 int rd = (newval >> 4) & 0xf;
28827 int rs = newval & 0xf;
28828 int subtract = !!(newval & 0x8000);
28829
28830 /* Check for HI regs, only very restricted cases allowed:
28831 Adjusting SP, and using PC or SP to get an address. */
28832 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
28833 || (rs > 7 && rs != REG_SP && rs != REG_PC))
28834 as_bad_where (fixP->fx_file, fixP->fx_line,
28835 _("invalid Hi register with immediate"));
28836
28837 /* If value is negative, choose the opposite instruction. */
28838 if (value < 0)
28839 {
28840 value = -value;
28841 subtract = !subtract;
28842 if (value < 0)
28843 as_bad_where (fixP->fx_file, fixP->fx_line,
28844 _("immediate value out of range"));
28845 }
28846
28847 if (rd == REG_SP)
28848 {
28849 if (value & ~0x1fc)
28850 as_bad_where (fixP->fx_file, fixP->fx_line,
28851 _("invalid immediate for stack address calculation"));
28852 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
28853 newval |= value >> 2;
28854 }
28855 else if (rs == REG_PC || rs == REG_SP)
28856 {
28857 /* PR gas/18541. If the addition is for a defined symbol
28858 within range of an ADR instruction then accept it. */
28859 if (subtract
28860 && value == 4
28861 && fixP->fx_addsy != NULL)
28862 {
28863 subtract = 0;
28864
28865 if (! S_IS_DEFINED (fixP->fx_addsy)
28866 || S_GET_SEGMENT (fixP->fx_addsy) != seg
28867 || S_IS_WEAK (fixP->fx_addsy))
28868 {
28869 as_bad_where (fixP->fx_file, fixP->fx_line,
28870 _("address calculation needs a strongly defined nearby symbol"));
28871 }
28872 else
28873 {
28874 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
28875
28876 /* Round up to the next 4-byte boundary. */
28877 if (v & 3)
28878 v = (v + 3) & ~ 3;
28879 else
28880 v += 4;
28881 v = S_GET_VALUE (fixP->fx_addsy) - v;
28882
28883 if (v & ~0x3fc)
28884 {
28885 as_bad_where (fixP->fx_file, fixP->fx_line,
28886 _("symbol too far away"));
28887 }
28888 else
28889 {
28890 fixP->fx_done = 1;
28891 value = v;
28892 }
28893 }
28894 }
28895
28896 if (subtract || value & ~0x3fc)
28897 as_bad_where (fixP->fx_file, fixP->fx_line,
28898 _("invalid immediate for address calculation (value = 0x%08lX)"),
28899 (unsigned long) (subtract ? - value : value));
28900 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
28901 newval |= rd << 8;
28902 newval |= value >> 2;
28903 }
28904 else if (rs == rd)
28905 {
28906 if (value & ~0xff)
28907 as_bad_where (fixP->fx_file, fixP->fx_line,
28908 _("immediate value out of range"));
28909 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
28910 newval |= (rd << 8) | value;
28911 }
28912 else
28913 {
28914 if (value & ~0x7)
28915 as_bad_where (fixP->fx_file, fixP->fx_line,
28916 _("immediate value out of range"));
28917 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
28918 newval |= rd | (rs << 3) | (value << 6);
28919 }
28920 }
28921 md_number_to_chars (buf, newval, THUMB_SIZE);
28922 break;
28923
28924 case BFD_RELOC_ARM_THUMB_IMM:
28925 newval = md_chars_to_number (buf, THUMB_SIZE);
28926 if (value < 0 || value > 255)
28927 as_bad_where (fixP->fx_file, fixP->fx_line,
28928 _("invalid immediate: %ld is out of range"),
28929 (long) value);
28930 newval |= value;
28931 md_number_to_chars (buf, newval, THUMB_SIZE);
28932 break;
28933
28934 case BFD_RELOC_ARM_THUMB_SHIFT:
28935 /* 5bit shift value (0..32). LSL cannot take 32. */
28936 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
28937 temp = newval & 0xf800;
28938 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
28939 as_bad_where (fixP->fx_file, fixP->fx_line,
28940 _("invalid shift value: %ld"), (long) value);
28941 /* Shifts of zero must be encoded as LSL. */
28942 if (value == 0)
28943 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
28944 /* Shifts of 32 are encoded as zero. */
28945 else if (value == 32)
28946 value = 0;
28947 newval |= value << 6;
28948 md_number_to_chars (buf, newval, THUMB_SIZE);
28949 break;
28950
28951 case BFD_RELOC_VTABLE_INHERIT:
28952 case BFD_RELOC_VTABLE_ENTRY:
28953 fixP->fx_done = 0;
28954 return;
28955
28956 case BFD_RELOC_ARM_MOVW:
28957 case BFD_RELOC_ARM_MOVT:
28958 case BFD_RELOC_ARM_THUMB_MOVW:
28959 case BFD_RELOC_ARM_THUMB_MOVT:
28960 if (fixP->fx_done || !seg->use_rela_p)
28961 {
28962 /* REL format relocations are limited to a 16-bit addend. */
28963 if (!fixP->fx_done)
28964 {
28965 if (value < -0x8000 || value > 0x7fff)
28966 as_bad_where (fixP->fx_file, fixP->fx_line,
28967 _("offset out of range"));
28968 }
28969 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
28970 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28971 {
28972 value >>= 16;
28973 }
28974
28975 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
28976 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
28977 {
28978 newval = get_thumb32_insn (buf);
28979 newval &= 0xfbf08f00;
28980 newval |= (value & 0xf000) << 4;
28981 newval |= (value & 0x0800) << 15;
28982 newval |= (value & 0x0700) << 4;
28983 newval |= (value & 0x00ff);
28984 put_thumb32_insn (buf, newval);
28985 }
28986 else
28987 {
28988 newval = md_chars_to_number (buf, 4);
28989 newval &= 0xfff0f000;
28990 newval |= value & 0x0fff;
28991 newval |= (value & 0xf000) << 4;
28992 md_number_to_chars (buf, newval, 4);
28993 }
28994 }
28995 return;
28996
28997 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
28998 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
28999 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29000 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29001 gas_assert (!fixP->fx_done);
29002 {
29003 bfd_vma insn;
29004 bfd_boolean is_mov;
29005 bfd_vma encoded_addend = value;
29006
29007 /* Check that addend can be encoded in instruction. */
29008 if (!seg->use_rela_p && (value < 0 || value > 255))
29009 as_bad_where (fixP->fx_file, fixP->fx_line,
29010 _("the offset 0x%08lX is not representable"),
29011 (unsigned long) encoded_addend);
29012
29013 /* Extract the instruction. */
29014 insn = md_chars_to_number (buf, THUMB_SIZE);
29015 is_mov = (insn & 0xf800) == 0x2000;
29016
29017 /* Encode insn. */
29018 if (is_mov)
29019 {
29020 if (!seg->use_rela_p)
29021 insn |= encoded_addend;
29022 }
29023 else
29024 {
29025 int rd, rs;
29026
29027 /* Extract the instruction. */
29028 /* Encoding is the following
29029 0x8000 SUB
29030 0x00F0 Rd
29031 0x000F Rs
29032 */
29033 /* The following conditions must be true :
29034 - ADD
29035 - Rd == Rs
29036 - Rd <= 7
29037 */
29038 rd = (insn >> 4) & 0xf;
29039 rs = insn & 0xf;
29040 if ((insn & 0x8000) || (rd != rs) || rd > 7)
29041 as_bad_where (fixP->fx_file, fixP->fx_line,
29042 _("Unable to process relocation for thumb opcode: %lx"),
29043 (unsigned long) insn);
29044
29045 /* Encode as ADD immediate8 thumb 1 code. */
29046 insn = 0x3000 | (rd << 8);
29047
29048 /* Place the encoded addend into the first 8 bits of the
29049 instruction. */
29050 if (!seg->use_rela_p)
29051 insn |= encoded_addend;
29052 }
29053
29054 /* Update the instruction. */
29055 md_number_to_chars (buf, insn, THUMB_SIZE);
29056 }
29057 break;
29058
29059 case BFD_RELOC_ARM_ALU_PC_G0_NC:
29060 case BFD_RELOC_ARM_ALU_PC_G0:
29061 case BFD_RELOC_ARM_ALU_PC_G1_NC:
29062 case BFD_RELOC_ARM_ALU_PC_G1:
29063 case BFD_RELOC_ARM_ALU_PC_G2:
29064 case BFD_RELOC_ARM_ALU_SB_G0_NC:
29065 case BFD_RELOC_ARM_ALU_SB_G0:
29066 case BFD_RELOC_ARM_ALU_SB_G1_NC:
29067 case BFD_RELOC_ARM_ALU_SB_G1:
29068 case BFD_RELOC_ARM_ALU_SB_G2:
29069 gas_assert (!fixP->fx_done);
29070 if (!seg->use_rela_p)
29071 {
29072 bfd_vma insn;
29073 bfd_vma encoded_addend;
29074 bfd_vma addend_abs = llabs (value);
29075
29076 /* Check that the absolute value of the addend can be
29077 expressed as an 8-bit constant plus a rotation. */
29078 encoded_addend = encode_arm_immediate (addend_abs);
29079 if (encoded_addend == (unsigned int) FAIL)
29080 as_bad_where (fixP->fx_file, fixP->fx_line,
29081 _("the offset 0x%08lX is not representable"),
29082 (unsigned long) addend_abs);
29083
29084 /* Extract the instruction. */
29085 insn = md_chars_to_number (buf, INSN_SIZE);
29086
29087 /* If the addend is positive, use an ADD instruction.
29088 Otherwise use a SUB. Take care not to destroy the S bit. */
29089 insn &= 0xff1fffff;
29090 if (value < 0)
29091 insn |= 1 << 22;
29092 else
29093 insn |= 1 << 23;
29094
29095 /* Place the encoded addend into the first 12 bits of the
29096 instruction. */
29097 insn &= 0xfffff000;
29098 insn |= encoded_addend;
29099
29100 /* Update the instruction. */
29101 md_number_to_chars (buf, insn, INSN_SIZE);
29102 }
29103 break;
29104
29105 case BFD_RELOC_ARM_LDR_PC_G0:
29106 case BFD_RELOC_ARM_LDR_PC_G1:
29107 case BFD_RELOC_ARM_LDR_PC_G2:
29108 case BFD_RELOC_ARM_LDR_SB_G0:
29109 case BFD_RELOC_ARM_LDR_SB_G1:
29110 case BFD_RELOC_ARM_LDR_SB_G2:
29111 gas_assert (!fixP->fx_done);
29112 if (!seg->use_rela_p)
29113 {
29114 bfd_vma insn;
29115 bfd_vma addend_abs = llabs (value);
29116
29117 /* Check that the absolute value of the addend can be
29118 encoded in 12 bits. */
29119 if (addend_abs >= 0x1000)
29120 as_bad_where (fixP->fx_file, fixP->fx_line,
29121 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29122 (unsigned long) addend_abs);
29123
29124 /* Extract the instruction. */
29125 insn = md_chars_to_number (buf, INSN_SIZE);
29126
29127 /* If the addend is negative, clear bit 23 of the instruction.
29128 Otherwise set it. */
29129 if (value < 0)
29130 insn &= ~(1 << 23);
29131 else
29132 insn |= 1 << 23;
29133
29134 /* Place the absolute value of the addend into the first 12 bits
29135 of the instruction. */
29136 insn &= 0xfffff000;
29137 insn |= addend_abs;
29138
29139 /* Update the instruction. */
29140 md_number_to_chars (buf, insn, INSN_SIZE);
29141 }
29142 break;
29143
29144 case BFD_RELOC_ARM_LDRS_PC_G0:
29145 case BFD_RELOC_ARM_LDRS_PC_G1:
29146 case BFD_RELOC_ARM_LDRS_PC_G2:
29147 case BFD_RELOC_ARM_LDRS_SB_G0:
29148 case BFD_RELOC_ARM_LDRS_SB_G1:
29149 case BFD_RELOC_ARM_LDRS_SB_G2:
29150 gas_assert (!fixP->fx_done);
29151 if (!seg->use_rela_p)
29152 {
29153 bfd_vma insn;
29154 bfd_vma addend_abs = llabs (value);
29155
29156 /* Check that the absolute value of the addend can be
29157 encoded in 8 bits. */
29158 if (addend_abs >= 0x100)
29159 as_bad_where (fixP->fx_file, fixP->fx_line,
29160 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29161 (unsigned long) addend_abs);
29162
29163 /* Extract the instruction. */
29164 insn = md_chars_to_number (buf, INSN_SIZE);
29165
29166 /* If the addend is negative, clear bit 23 of the instruction.
29167 Otherwise set it. */
29168 if (value < 0)
29169 insn &= ~(1 << 23);
29170 else
29171 insn |= 1 << 23;
29172
29173 /* Place the first four bits of the absolute value of the addend
29174 into the first 4 bits of the instruction, and the remaining
29175 four into bits 8 .. 11. */
29176 insn &= 0xfffff0f0;
29177 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29178
29179 /* Update the instruction. */
29180 md_number_to_chars (buf, insn, INSN_SIZE);
29181 }
29182 break;
29183
29184 case BFD_RELOC_ARM_LDC_PC_G0:
29185 case BFD_RELOC_ARM_LDC_PC_G1:
29186 case BFD_RELOC_ARM_LDC_PC_G2:
29187 case BFD_RELOC_ARM_LDC_SB_G0:
29188 case BFD_RELOC_ARM_LDC_SB_G1:
29189 case BFD_RELOC_ARM_LDC_SB_G2:
29190 gas_assert (!fixP->fx_done);
29191 if (!seg->use_rela_p)
29192 {
29193 bfd_vma insn;
29194 bfd_vma addend_abs = llabs (value);
29195
29196 /* Check that the absolute value of the addend is a multiple of
29197 four and, when divided by four, fits in 8 bits. */
29198 if (addend_abs & 0x3)
29199 as_bad_where (fixP->fx_file, fixP->fx_line,
29200 _("bad offset 0x%08lX (must be word-aligned)"),
29201 (unsigned long) addend_abs);
29202
29203 if ((addend_abs >> 2) > 0xff)
29204 as_bad_where (fixP->fx_file, fixP->fx_line,
29205 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29206 (unsigned long) addend_abs);
29207
29208 /* Extract the instruction. */
29209 insn = md_chars_to_number (buf, INSN_SIZE);
29210
29211 /* If the addend is negative, clear bit 23 of the instruction.
29212 Otherwise set it. */
29213 if (value < 0)
29214 insn &= ~(1 << 23);
29215 else
29216 insn |= 1 << 23;
29217
29218 /* Place the addend (divided by four) into the first eight
29219 bits of the instruction. */
29220 insn &= 0xfffffff0;
29221 insn |= addend_abs >> 2;
29222
29223 /* Update the instruction. */
29224 md_number_to_chars (buf, insn, INSN_SIZE);
29225 }
29226 break;
29227
29228 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29229 if (fixP->fx_addsy
29230 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29231 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29232 && ARM_IS_FUNC (fixP->fx_addsy)
29233 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29234 {
29235 /* Force a relocation for a branch 5 bits wide. */
29236 fixP->fx_done = 0;
29237 }
29238 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
29239 as_bad_where (fixP->fx_file, fixP->fx_line,
29240 BAD_BRANCH_OFF);
29241
29242 if (fixP->fx_done || !seg->use_rela_p)
29243 {
29244 addressT boff = value >> 1;
29245
29246 newval = md_chars_to_number (buf, THUMB_SIZE);
29247 newval |= (boff << 7);
29248 md_number_to_chars (buf, newval, THUMB_SIZE);
29249 }
29250 break;
29251
29252 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29253 if (fixP->fx_addsy
29254 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29255 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29256 && ARM_IS_FUNC (fixP->fx_addsy)
29257 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29258 {
29259 fixP->fx_done = 0;
29260 }
29261 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
29262 as_bad_where (fixP->fx_file, fixP->fx_line,
29263 _("branch out of range"));
29264
29265 if (fixP->fx_done || !seg->use_rela_p)
29266 {
29267 newval = md_chars_to_number (buf, THUMB_SIZE);
29268
29269 addressT boff = ((newval & 0x0780) >> 7) << 1;
29270 addressT diff = value - boff;
29271
29272 if (diff == 4)
29273 {
29274 newval |= 1 << 1; /* T bit. */
29275 }
29276 else if (diff != 2)
29277 {
29278 as_bad_where (fixP->fx_file, fixP->fx_line,
29279 _("out of range label-relative fixup value"));
29280 }
29281 md_number_to_chars (buf, newval, THUMB_SIZE);
29282 }
29283 break;
29284
29285 case BFD_RELOC_ARM_THUMB_BF17:
29286 if (fixP->fx_addsy
29287 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29288 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29289 && ARM_IS_FUNC (fixP->fx_addsy)
29290 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29291 {
29292 /* Force a relocation for a branch 17 bits wide. */
29293 fixP->fx_done = 0;
29294 }
29295
29296 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
29297 as_bad_where (fixP->fx_file, fixP->fx_line,
29298 BAD_BRANCH_OFF);
29299
29300 if (fixP->fx_done || !seg->use_rela_p)
29301 {
29302 offsetT newval2;
29303 addressT immA, immB, immC;
29304
29305 immA = (value & 0x0001f000) >> 12;
29306 immB = (value & 0x00000ffc) >> 2;
29307 immC = (value & 0x00000002) >> 1;
29308
29309 newval = md_chars_to_number (buf, THUMB_SIZE);
29310 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29311 newval |= immA;
29312 newval2 |= (immC << 11) | (immB << 1);
29313 md_number_to_chars (buf, newval, THUMB_SIZE);
29314 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29315 }
29316 break;
29317
29318 case BFD_RELOC_ARM_THUMB_BF19:
29319 if (fixP->fx_addsy
29320 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29321 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29322 && ARM_IS_FUNC (fixP->fx_addsy)
29323 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29324 {
29325 /* Force a relocation for a branch 19 bits wide. */
29326 fixP->fx_done = 0;
29327 }
29328
29329 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
29330 as_bad_where (fixP->fx_file, fixP->fx_line,
29331 BAD_BRANCH_OFF);
29332
29333 if (fixP->fx_done || !seg->use_rela_p)
29334 {
29335 offsetT newval2;
29336 addressT immA, immB, immC;
29337
29338 immA = (value & 0x0007f000) >> 12;
29339 immB = (value & 0x00000ffc) >> 2;
29340 immC = (value & 0x00000002) >> 1;
29341
29342 newval = md_chars_to_number (buf, THUMB_SIZE);
29343 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29344 newval |= immA;
29345 newval2 |= (immC << 11) | (immB << 1);
29346 md_number_to_chars (buf, newval, THUMB_SIZE);
29347 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29348 }
29349 break;
29350
29351 case BFD_RELOC_ARM_THUMB_BF13:
29352 if (fixP->fx_addsy
29353 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29354 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29355 && ARM_IS_FUNC (fixP->fx_addsy)
29356 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29357 {
29358 /* Force a relocation for a branch 13 bits wide. */
29359 fixP->fx_done = 0;
29360 }
29361
29362 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
29363 as_bad_where (fixP->fx_file, fixP->fx_line,
29364 BAD_BRANCH_OFF);
29365
29366 if (fixP->fx_done || !seg->use_rela_p)
29367 {
29368 offsetT newval2;
29369 addressT immA, immB, immC;
29370
29371 immA = (value & 0x00001000) >> 12;
29372 immB = (value & 0x00000ffc) >> 2;
29373 immC = (value & 0x00000002) >> 1;
29374
29375 newval = md_chars_to_number (buf, THUMB_SIZE);
29376 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29377 newval |= immA;
29378 newval2 |= (immC << 11) | (immB << 1);
29379 md_number_to_chars (buf, newval, THUMB_SIZE);
29380 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29381 }
29382 break;
29383
29384 case BFD_RELOC_ARM_THUMB_LOOP12:
29385 if (fixP->fx_addsy
29386 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29387 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29388 && ARM_IS_FUNC (fixP->fx_addsy)
29389 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29390 {
29391 /* Force a relocation for a branch 12 bits wide. */
29392 fixP->fx_done = 0;
29393 }
29394
29395 bfd_vma insn = get_thumb32_insn (buf);
29396 /* le lr, <label>, le <label> or letp lr, <label> */
29397 if (((insn & 0xffffffff) == 0xf00fc001)
29398 || ((insn & 0xffffffff) == 0xf02fc001)
29399 || ((insn & 0xffffffff) == 0xf01fc001))
29400 value = -value;
29401
29402 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
29403 as_bad_where (fixP->fx_file, fixP->fx_line,
29404 BAD_BRANCH_OFF);
29405 if (fixP->fx_done || !seg->use_rela_p)
29406 {
29407 addressT imml, immh;
29408
29409 immh = (value & 0x00000ffc) >> 2;
29410 imml = (value & 0x00000002) >> 1;
29411
29412 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29413 newval |= (imml << 11) | (immh << 1);
29414 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29415 }
29416 break;
29417
29418 case BFD_RELOC_ARM_V4BX:
29419 /* This will need to go in the object file. */
29420 fixP->fx_done = 0;
29421 break;
29422
29423 case BFD_RELOC_UNUSED:
29424 default:
29425 as_bad_where (fixP->fx_file, fixP->fx_line,
29426 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
29427 }
29428 }
29429
29430 /* Translate internal representation of relocation info to BFD target
29431 format. */
29432
29433 arelent *
29434 tc_gen_reloc (asection *section, fixS *fixp)
29435 {
29436 arelent * reloc;
29437 bfd_reloc_code_real_type code;
29438
29439 reloc = XNEW (arelent);
29440
29441 reloc->sym_ptr_ptr = XNEW (asymbol *);
29442 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
29443 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
29444
29445 if (fixp->fx_pcrel)
29446 {
29447 if (section->use_rela_p)
29448 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
29449 else
29450 fixp->fx_offset = reloc->address;
29451 }
29452 reloc->addend = fixp->fx_offset;
29453
29454 switch (fixp->fx_r_type)
29455 {
29456 case BFD_RELOC_8:
29457 if (fixp->fx_pcrel)
29458 {
29459 code = BFD_RELOC_8_PCREL;
29460 break;
29461 }
29462 /* Fall through. */
29463
29464 case BFD_RELOC_16:
29465 if (fixp->fx_pcrel)
29466 {
29467 code = BFD_RELOC_16_PCREL;
29468 break;
29469 }
29470 /* Fall through. */
29471
29472 case BFD_RELOC_32:
29473 if (fixp->fx_pcrel)
29474 {
29475 code = BFD_RELOC_32_PCREL;
29476 break;
29477 }
29478 /* Fall through. */
29479
29480 case BFD_RELOC_ARM_MOVW:
29481 if (fixp->fx_pcrel)
29482 {
29483 code = BFD_RELOC_ARM_MOVW_PCREL;
29484 break;
29485 }
29486 /* Fall through. */
29487
29488 case BFD_RELOC_ARM_MOVT:
29489 if (fixp->fx_pcrel)
29490 {
29491 code = BFD_RELOC_ARM_MOVT_PCREL;
29492 break;
29493 }
29494 /* Fall through. */
29495
29496 case BFD_RELOC_ARM_THUMB_MOVW:
29497 if (fixp->fx_pcrel)
29498 {
29499 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
29500 break;
29501 }
29502 /* Fall through. */
29503
29504 case BFD_RELOC_ARM_THUMB_MOVT:
29505 if (fixp->fx_pcrel)
29506 {
29507 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
29508 break;
29509 }
29510 /* Fall through. */
29511
29512 case BFD_RELOC_NONE:
29513 case BFD_RELOC_ARM_PCREL_BRANCH:
29514 case BFD_RELOC_ARM_PCREL_BLX:
29515 case BFD_RELOC_RVA:
29516 case BFD_RELOC_THUMB_PCREL_BRANCH7:
29517 case BFD_RELOC_THUMB_PCREL_BRANCH9:
29518 case BFD_RELOC_THUMB_PCREL_BRANCH12:
29519 case BFD_RELOC_THUMB_PCREL_BRANCH20:
29520 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29521 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29522 case BFD_RELOC_VTABLE_ENTRY:
29523 case BFD_RELOC_VTABLE_INHERIT:
29524 #ifdef TE_PE
29525 case BFD_RELOC_32_SECREL:
29526 #endif
29527 code = fixp->fx_r_type;
29528 break;
29529
29530 case BFD_RELOC_THUMB_PCREL_BLX:
29531 #ifdef OBJ_ELF
29532 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
29533 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
29534 else
29535 #endif
29536 code = BFD_RELOC_THUMB_PCREL_BLX;
29537 break;
29538
29539 case BFD_RELOC_ARM_LITERAL:
29540 case BFD_RELOC_ARM_HWLITERAL:
29541 /* If this is called then the a literal has
29542 been referenced across a section boundary. */
29543 as_bad_where (fixp->fx_file, fixp->fx_line,
29544 _("literal referenced across section boundary"));
29545 return NULL;
29546
29547 #ifdef OBJ_ELF
29548 case BFD_RELOC_ARM_TLS_CALL:
29549 case BFD_RELOC_ARM_THM_TLS_CALL:
29550 case BFD_RELOC_ARM_TLS_DESCSEQ:
29551 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29552 case BFD_RELOC_ARM_GOT32:
29553 case BFD_RELOC_ARM_GOTOFF:
29554 case BFD_RELOC_ARM_GOT_PREL:
29555 case BFD_RELOC_ARM_PLT32:
29556 case BFD_RELOC_ARM_TARGET1:
29557 case BFD_RELOC_ARM_ROSEGREL32:
29558 case BFD_RELOC_ARM_SBREL32:
29559 case BFD_RELOC_ARM_PREL31:
29560 case BFD_RELOC_ARM_TARGET2:
29561 case BFD_RELOC_ARM_TLS_LDO32:
29562 case BFD_RELOC_ARM_PCREL_CALL:
29563 case BFD_RELOC_ARM_PCREL_JUMP:
29564 case BFD_RELOC_ARM_ALU_PC_G0_NC:
29565 case BFD_RELOC_ARM_ALU_PC_G0:
29566 case BFD_RELOC_ARM_ALU_PC_G1_NC:
29567 case BFD_RELOC_ARM_ALU_PC_G1:
29568 case BFD_RELOC_ARM_ALU_PC_G2:
29569 case BFD_RELOC_ARM_LDR_PC_G0:
29570 case BFD_RELOC_ARM_LDR_PC_G1:
29571 case BFD_RELOC_ARM_LDR_PC_G2:
29572 case BFD_RELOC_ARM_LDRS_PC_G0:
29573 case BFD_RELOC_ARM_LDRS_PC_G1:
29574 case BFD_RELOC_ARM_LDRS_PC_G2:
29575 case BFD_RELOC_ARM_LDC_PC_G0:
29576 case BFD_RELOC_ARM_LDC_PC_G1:
29577 case BFD_RELOC_ARM_LDC_PC_G2:
29578 case BFD_RELOC_ARM_ALU_SB_G0_NC:
29579 case BFD_RELOC_ARM_ALU_SB_G0:
29580 case BFD_RELOC_ARM_ALU_SB_G1_NC:
29581 case BFD_RELOC_ARM_ALU_SB_G1:
29582 case BFD_RELOC_ARM_ALU_SB_G2:
29583 case BFD_RELOC_ARM_LDR_SB_G0:
29584 case BFD_RELOC_ARM_LDR_SB_G1:
29585 case BFD_RELOC_ARM_LDR_SB_G2:
29586 case BFD_RELOC_ARM_LDRS_SB_G0:
29587 case BFD_RELOC_ARM_LDRS_SB_G1:
29588 case BFD_RELOC_ARM_LDRS_SB_G2:
29589 case BFD_RELOC_ARM_LDC_SB_G0:
29590 case BFD_RELOC_ARM_LDC_SB_G1:
29591 case BFD_RELOC_ARM_LDC_SB_G2:
29592 case BFD_RELOC_ARM_V4BX:
29593 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29594 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29595 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29596 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29597 case BFD_RELOC_ARM_GOTFUNCDESC:
29598 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29599 case BFD_RELOC_ARM_FUNCDESC:
29600 case BFD_RELOC_ARM_THUMB_BF17:
29601 case BFD_RELOC_ARM_THUMB_BF19:
29602 case BFD_RELOC_ARM_THUMB_BF13:
29603 code = fixp->fx_r_type;
29604 break;
29605
29606 case BFD_RELOC_ARM_TLS_GOTDESC:
29607 case BFD_RELOC_ARM_TLS_GD32:
29608 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29609 case BFD_RELOC_ARM_TLS_LE32:
29610 case BFD_RELOC_ARM_TLS_IE32:
29611 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29612 case BFD_RELOC_ARM_TLS_LDM32:
29613 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29614 /* BFD will include the symbol's address in the addend.
29615 But we don't want that, so subtract it out again here. */
29616 if (!S_IS_COMMON (fixp->fx_addsy))
29617 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
29618 code = fixp->fx_r_type;
29619 break;
29620 #endif
29621
29622 case BFD_RELOC_ARM_IMMEDIATE:
29623 as_bad_where (fixp->fx_file, fixp->fx_line,
29624 _("internal relocation (type: IMMEDIATE) not fixed up"));
29625 return NULL;
29626
29627 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
29628 as_bad_where (fixp->fx_file, fixp->fx_line,
29629 _("ADRL used for a symbol not defined in the same file"));
29630 return NULL;
29631
29632 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29633 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29634 case BFD_RELOC_ARM_THUMB_LOOP12:
29635 as_bad_where (fixp->fx_file, fixp->fx_line,
29636 _("%s used for a symbol not defined in the same file"),
29637 bfd_get_reloc_code_name (fixp->fx_r_type));
29638 return NULL;
29639
29640 case BFD_RELOC_ARM_OFFSET_IMM:
29641 if (section->use_rela_p)
29642 {
29643 code = fixp->fx_r_type;
29644 break;
29645 }
29646
29647 if (fixp->fx_addsy != NULL
29648 && !S_IS_DEFINED (fixp->fx_addsy)
29649 && S_IS_LOCAL (fixp->fx_addsy))
29650 {
29651 as_bad_where (fixp->fx_file, fixp->fx_line,
29652 _("undefined local label `%s'"),
29653 S_GET_NAME (fixp->fx_addsy));
29654 return NULL;
29655 }
29656
29657 as_bad_where (fixp->fx_file, fixp->fx_line,
29658 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
29659 return NULL;
29660
29661 default:
29662 {
29663 const char * type;
29664
29665 switch (fixp->fx_r_type)
29666 {
29667 case BFD_RELOC_NONE: type = "NONE"; break;
29668 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
29669 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
29670 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
29671 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
29672 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
29673 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
29674 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
29675 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
29676 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
29677 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
29678 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
29679 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
29680 default: type = _("<unknown>"); break;
29681 }
29682 as_bad_where (fixp->fx_file, fixp->fx_line,
29683 _("cannot represent %s relocation in this object file format"),
29684 type);
29685 return NULL;
29686 }
29687 }
29688
29689 #ifdef OBJ_ELF
29690 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
29691 && GOT_symbol
29692 && fixp->fx_addsy == GOT_symbol)
29693 {
29694 code = BFD_RELOC_ARM_GOTPC;
29695 reloc->addend = fixp->fx_offset = reloc->address;
29696 }
29697 #endif
29698
29699 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
29700
29701 if (reloc->howto == NULL)
29702 {
29703 as_bad_where (fixp->fx_file, fixp->fx_line,
29704 _("cannot represent %s relocation in this object file format"),
29705 bfd_get_reloc_code_name (code));
29706 return NULL;
29707 }
29708
29709 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
29710 vtable entry to be used in the relocation's section offset. */
29711 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29712 reloc->address = fixp->fx_offset;
29713
29714 return reloc;
29715 }
29716
29717 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
29718
29719 void
29720 cons_fix_new_arm (fragS * frag,
29721 int where,
29722 int size,
29723 expressionS * exp,
29724 bfd_reloc_code_real_type reloc)
29725 {
29726 int pcrel = 0;
29727
29728 /* Pick a reloc.
29729 FIXME: @@ Should look at CPU word size. */
29730 switch (size)
29731 {
29732 case 1:
29733 reloc = BFD_RELOC_8;
29734 break;
29735 case 2:
29736 reloc = BFD_RELOC_16;
29737 break;
29738 case 4:
29739 default:
29740 reloc = BFD_RELOC_32;
29741 break;
29742 case 8:
29743 reloc = BFD_RELOC_64;
29744 break;
29745 }
29746
29747 #ifdef TE_PE
29748 if (exp->X_op == O_secrel)
29749 {
29750 exp->X_op = O_symbol;
29751 reloc = BFD_RELOC_32_SECREL;
29752 }
29753 #endif
29754
29755 fix_new_exp (frag, where, size, exp, pcrel, reloc);
29756 }
29757
29758 #if defined (OBJ_COFF)
29759 void
29760 arm_validate_fix (fixS * fixP)
29761 {
29762 /* If the destination of the branch is a defined symbol which does not have
29763 the THUMB_FUNC attribute, then we must be calling a function which has
29764 the (interfacearm) attribute. We look for the Thumb entry point to that
29765 function and change the branch to refer to that function instead. */
29766 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
29767 && fixP->fx_addsy != NULL
29768 && S_IS_DEFINED (fixP->fx_addsy)
29769 && ! THUMB_IS_FUNC (fixP->fx_addsy))
29770 {
29771 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
29772 }
29773 }
29774 #endif
29775
29776
29777 int
29778 arm_force_relocation (struct fix * fixp)
29779 {
29780 #if defined (OBJ_COFF) && defined (TE_PE)
29781 if (fixp->fx_r_type == BFD_RELOC_RVA)
29782 return 1;
29783 #endif
29784
29785 /* In case we have a call or a branch to a function in ARM ISA mode from
29786 a thumb function or vice-versa force the relocation. These relocations
29787 are cleared off for some cores that might have blx and simple transformations
29788 are possible. */
29789
29790 #ifdef OBJ_ELF
29791 switch (fixp->fx_r_type)
29792 {
29793 case BFD_RELOC_ARM_PCREL_JUMP:
29794 case BFD_RELOC_ARM_PCREL_CALL:
29795 case BFD_RELOC_THUMB_PCREL_BLX:
29796 if (THUMB_IS_FUNC (fixp->fx_addsy))
29797 return 1;
29798 break;
29799
29800 case BFD_RELOC_ARM_PCREL_BLX:
29801 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29802 case BFD_RELOC_THUMB_PCREL_BRANCH20:
29803 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29804 if (ARM_IS_FUNC (fixp->fx_addsy))
29805 return 1;
29806 break;
29807
29808 default:
29809 break;
29810 }
29811 #endif
29812
29813 /* Resolve these relocations even if the symbol is extern or weak.
29814 Technically this is probably wrong due to symbol preemption.
29815 In practice these relocations do not have enough range to be useful
29816 at dynamic link time, and some code (e.g. in the Linux kernel)
29817 expects these references to be resolved. */
29818 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
29819 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
29820 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
29821 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
29822 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29823 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
29824 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
29825 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
29826 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
29827 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
29828 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
29829 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
29830 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
29831 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
29832 return 0;
29833
29834 /* Always leave these relocations for the linker. */
29835 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29836 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29837 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29838 return 1;
29839
29840 /* Always generate relocations against function symbols. */
29841 if (fixp->fx_r_type == BFD_RELOC_32
29842 && fixp->fx_addsy
29843 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
29844 return 1;
29845
29846 return generic_force_reloc (fixp);
29847 }
29848
29849 #if defined (OBJ_ELF) || defined (OBJ_COFF)
29850 /* Relocations against function names must be left unadjusted,
29851 so that the linker can use this information to generate interworking
29852 stubs. The MIPS version of this function
29853 also prevents relocations that are mips-16 specific, but I do not
29854 know why it does this.
29855
29856 FIXME:
29857 There is one other problem that ought to be addressed here, but
29858 which currently is not: Taking the address of a label (rather
29859 than a function) and then later jumping to that address. Such
29860 addresses also ought to have their bottom bit set (assuming that
29861 they reside in Thumb code), but at the moment they will not. */
29862
29863 bfd_boolean
29864 arm_fix_adjustable (fixS * fixP)
29865 {
29866 if (fixP->fx_addsy == NULL)
29867 return 1;
29868
29869 /* Preserve relocations against symbols with function type. */
29870 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
29871 return FALSE;
29872
29873 if (THUMB_IS_FUNC (fixP->fx_addsy)
29874 && fixP->fx_subsy == NULL)
29875 return FALSE;
29876
29877 /* We need the symbol name for the VTABLE entries. */
29878 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
29879 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
29880 return FALSE;
29881
29882 /* Don't allow symbols to be discarded on GOT related relocs. */
29883 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
29884 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
29885 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
29886 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
29887 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
29888 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
29889 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
29890 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
29891 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
29892 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
29893 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
29894 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
29895 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
29896 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
29897 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
29898 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
29899 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
29900 return FALSE;
29901
29902 /* Similarly for group relocations. */
29903 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
29904 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
29905 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
29906 return FALSE;
29907
29908 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
29909 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
29910 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29911 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
29912 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
29913 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29914 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
29915 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
29916 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
29917 return FALSE;
29918
29919 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
29920 offsets, so keep these symbols. */
29921 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
29922 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
29923 return FALSE;
29924
29925 return TRUE;
29926 }
29927 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
29928
29929 #ifdef OBJ_ELF
29930 const char *
29931 elf32_arm_target_format (void)
29932 {
29933 #ifdef TE_SYMBIAN
29934 return (target_big_endian
29935 ? "elf32-bigarm-symbian"
29936 : "elf32-littlearm-symbian");
29937 #elif defined (TE_VXWORKS)
29938 return (target_big_endian
29939 ? "elf32-bigarm-vxworks"
29940 : "elf32-littlearm-vxworks");
29941 #elif defined (TE_NACL)
29942 return (target_big_endian
29943 ? "elf32-bigarm-nacl"
29944 : "elf32-littlearm-nacl");
29945 #else
29946 if (arm_fdpic)
29947 {
29948 if (target_big_endian)
29949 return "elf32-bigarm-fdpic";
29950 else
29951 return "elf32-littlearm-fdpic";
29952 }
29953 else
29954 {
29955 if (target_big_endian)
29956 return "elf32-bigarm";
29957 else
29958 return "elf32-littlearm";
29959 }
29960 #endif
29961 }
29962
29963 void
29964 armelf_frob_symbol (symbolS * symp,
29965 int * puntp)
29966 {
29967 elf_frob_symbol (symp, puntp);
29968 }
29969 #endif
29970
29971 /* MD interface: Finalization. */
29972
29973 void
29974 arm_cleanup (void)
29975 {
29976 literal_pool * pool;
29977
29978 /* Ensure that all the predication blocks are properly closed. */
29979 check_pred_blocks_finished ();
29980
29981 for (pool = list_of_pools; pool; pool = pool->next)
29982 {
29983 /* Put it at the end of the relevant section. */
29984 subseg_set (pool->section, pool->sub_section);
29985 #ifdef OBJ_ELF
29986 arm_elf_change_section ();
29987 #endif
29988 s_ltorg (0);
29989 }
29990 }
29991
29992 #ifdef OBJ_ELF
29993 /* Remove any excess mapping symbols generated for alignment frags in
29994 SEC. We may have created a mapping symbol before a zero byte
29995 alignment; remove it if there's a mapping symbol after the
29996 alignment. */
29997 static void
29998 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
29999 void *dummy ATTRIBUTE_UNUSED)
30000 {
30001 segment_info_type *seginfo = seg_info (sec);
30002 fragS *fragp;
30003
30004 if (seginfo == NULL || seginfo->frchainP == NULL)
30005 return;
30006
30007 for (fragp = seginfo->frchainP->frch_root;
30008 fragp != NULL;
30009 fragp = fragp->fr_next)
30010 {
30011 symbolS *sym = fragp->tc_frag_data.last_map;
30012 fragS *next = fragp->fr_next;
30013
30014 /* Variable-sized frags have been converted to fixed size by
30015 this point. But if this was variable-sized to start with,
30016 there will be a fixed-size frag after it. So don't handle
30017 next == NULL. */
30018 if (sym == NULL || next == NULL)
30019 continue;
30020
30021 if (S_GET_VALUE (sym) < next->fr_address)
30022 /* Not at the end of this frag. */
30023 continue;
30024 know (S_GET_VALUE (sym) == next->fr_address);
30025
30026 do
30027 {
30028 if (next->tc_frag_data.first_map != NULL)
30029 {
30030 /* Next frag starts with a mapping symbol. Discard this
30031 one. */
30032 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30033 break;
30034 }
30035
30036 if (next->fr_next == NULL)
30037 {
30038 /* This mapping symbol is at the end of the section. Discard
30039 it. */
30040 know (next->fr_fix == 0 && next->fr_var == 0);
30041 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30042 break;
30043 }
30044
30045 /* As long as we have empty frags without any mapping symbols,
30046 keep looking. */
30047 /* If the next frag is non-empty and does not start with a
30048 mapping symbol, then this mapping symbol is required. */
30049 if (next->fr_address != next->fr_next->fr_address)
30050 break;
30051
30052 next = next->fr_next;
30053 }
30054 while (next != NULL);
30055 }
30056 }
30057 #endif
30058
30059 /* Adjust the symbol table. This marks Thumb symbols as distinct from
30060 ARM ones. */
30061
30062 void
30063 arm_adjust_symtab (void)
30064 {
30065 #ifdef OBJ_COFF
30066 symbolS * sym;
30067
30068 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30069 {
30070 if (ARM_IS_THUMB (sym))
30071 {
30072 if (THUMB_IS_FUNC (sym))
30073 {
30074 /* Mark the symbol as a Thumb function. */
30075 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
30076 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
30077 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30078
30079 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30080 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30081 else
30082 as_bad (_("%s: unexpected function type: %d"),
30083 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30084 }
30085 else switch (S_GET_STORAGE_CLASS (sym))
30086 {
30087 case C_EXT:
30088 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30089 break;
30090 case C_STAT:
30091 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30092 break;
30093 case C_LABEL:
30094 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30095 break;
30096 default:
30097 /* Do nothing. */
30098 break;
30099 }
30100 }
30101
30102 if (ARM_IS_INTERWORK (sym))
30103 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30104 }
30105 #endif
30106 #ifdef OBJ_ELF
30107 symbolS * sym;
30108 char bind;
30109
30110 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30111 {
30112 if (ARM_IS_THUMB (sym))
30113 {
30114 elf_symbol_type * elf_sym;
30115
30116 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30117 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30118
30119 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30120 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30121 {
30122 /* If it's a .thumb_func, declare it as so,
30123 otherwise tag label as .code 16. */
30124 if (THUMB_IS_FUNC (sym))
30125 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30126 ST_BRANCH_TO_THUMB);
30127 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30128 elf_sym->internal_elf_sym.st_info =
30129 ELF_ST_INFO (bind, STT_ARM_16BIT);
30130 }
30131 }
30132 }
30133
30134 /* Remove any overlapping mapping symbols generated by alignment frags. */
30135 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30136 /* Now do generic ELF adjustments. */
30137 elf_adjust_symtab ();
30138 #endif
30139 }
30140
30141 /* MD interface: Initialization. */
30142
30143 static void
30144 set_constant_flonums (void)
30145 {
30146 int i;
30147
30148 for (i = 0; i < NUM_FLOAT_VALS; i++)
30149 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30150 abort ();
30151 }
30152
30153 /* Auto-select Thumb mode if it's the only available instruction set for the
30154 given architecture. */
30155
30156 static void
30157 autoselect_thumb_from_cpu_variant (void)
30158 {
30159 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30160 opcode_select (16);
30161 }
30162
30163 void
30164 md_begin (void)
30165 {
30166 unsigned mach;
30167 unsigned int i;
30168
30169 if ( (arm_ops_hsh = hash_new ()) == NULL
30170 || (arm_cond_hsh = hash_new ()) == NULL
30171 || (arm_vcond_hsh = hash_new ()) == NULL
30172 || (arm_shift_hsh = hash_new ()) == NULL
30173 || (arm_psr_hsh = hash_new ()) == NULL
30174 || (arm_v7m_psr_hsh = hash_new ()) == NULL
30175 || (arm_reg_hsh = hash_new ()) == NULL
30176 || (arm_reloc_hsh = hash_new ()) == NULL
30177 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
30178 as_fatal (_("virtual memory exhausted"));
30179
30180 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30181 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
30182 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30183 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
30184 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30185 hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
30186 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30187 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
30188 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30189 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
30190 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30191 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30192 (void *) (v7m_psrs + i));
30193 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30194 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
30195 for (i = 0;
30196 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30197 i++)
30198 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30199 (void *) (barrier_opt_names + i));
30200 #ifdef OBJ_ELF
30201 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30202 {
30203 struct reloc_entry * entry = reloc_names + i;
30204
30205 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30206 /* This makes encode_branch() use the EABI versions of this relocation. */
30207 entry->reloc = BFD_RELOC_UNUSED;
30208
30209 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
30210 }
30211 #endif
30212
30213 set_constant_flonums ();
30214
30215 /* Set the cpu variant based on the command-line options. We prefer
30216 -mcpu= over -march= if both are set (as for GCC); and we prefer
30217 -mfpu= over any other way of setting the floating point unit.
30218 Use of legacy options with new options are faulted. */
30219 if (legacy_cpu)
30220 {
30221 if (mcpu_cpu_opt || march_cpu_opt)
30222 as_bad (_("use of old and new-style options to set CPU type"));
30223
30224 selected_arch = *legacy_cpu;
30225 }
30226 else if (mcpu_cpu_opt)
30227 {
30228 selected_arch = *mcpu_cpu_opt;
30229 selected_ext = *mcpu_ext_opt;
30230 }
30231 else if (march_cpu_opt)
30232 {
30233 selected_arch = *march_cpu_opt;
30234 selected_ext = *march_ext_opt;
30235 }
30236 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30237
30238 if (legacy_fpu)
30239 {
30240 if (mfpu_opt)
30241 as_bad (_("use of old and new-style options to set FPU type"));
30242
30243 selected_fpu = *legacy_fpu;
30244 }
30245 else if (mfpu_opt)
30246 selected_fpu = *mfpu_opt;
30247 else
30248 {
30249 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30250 || defined (TE_NetBSD) || defined (TE_VXWORKS))
30251 /* Some environments specify a default FPU. If they don't, infer it
30252 from the processor. */
30253 if (mcpu_fpu_opt)
30254 selected_fpu = *mcpu_fpu_opt;
30255 else if (march_fpu_opt)
30256 selected_fpu = *march_fpu_opt;
30257 #else
30258 selected_fpu = fpu_default;
30259 #endif
30260 }
30261
30262 if (ARM_FEATURE_ZERO (selected_fpu))
30263 {
30264 if (!no_cpu_selected ())
30265 selected_fpu = fpu_default;
30266 else
30267 selected_fpu = fpu_arch_fpa;
30268 }
30269
30270 #ifdef CPU_DEFAULT
30271 if (ARM_FEATURE_ZERO (selected_arch))
30272 {
30273 selected_arch = cpu_default;
30274 selected_cpu = selected_arch;
30275 }
30276 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30277 #else
30278 /* Autodection of feature mode: allow all features in cpu_variant but leave
30279 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
30280 after all instruction have been processed and we can decide what CPU
30281 should be selected. */
30282 if (ARM_FEATURE_ZERO (selected_arch))
30283 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30284 else
30285 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30286 #endif
30287
30288 autoselect_thumb_from_cpu_variant ();
30289
30290 arm_arch_used = thumb_arch_used = arm_arch_none;
30291
30292 #if defined OBJ_COFF || defined OBJ_ELF
30293 {
30294 unsigned int flags = 0;
30295
30296 #if defined OBJ_ELF
30297 flags = meabi_flags;
30298
30299 switch (meabi_flags)
30300 {
30301 case EF_ARM_EABI_UNKNOWN:
30302 #endif
30303 /* Set the flags in the private structure. */
30304 if (uses_apcs_26) flags |= F_APCS26;
30305 if (support_interwork) flags |= F_INTERWORK;
30306 if (uses_apcs_float) flags |= F_APCS_FLOAT;
30307 if (pic_code) flags |= F_PIC;
30308 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30309 flags |= F_SOFT_FLOAT;
30310
30311 switch (mfloat_abi_opt)
30312 {
30313 case ARM_FLOAT_ABI_SOFT:
30314 case ARM_FLOAT_ABI_SOFTFP:
30315 flags |= F_SOFT_FLOAT;
30316 break;
30317
30318 case ARM_FLOAT_ABI_HARD:
30319 if (flags & F_SOFT_FLOAT)
30320 as_bad (_("hard-float conflicts with specified fpu"));
30321 break;
30322 }
30323
30324 /* Using pure-endian doubles (even if soft-float). */
30325 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30326 flags |= F_VFP_FLOAT;
30327
30328 #if defined OBJ_ELF
30329 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30330 flags |= EF_ARM_MAVERICK_FLOAT;
30331 break;
30332
30333 case EF_ARM_EABI_VER4:
30334 case EF_ARM_EABI_VER5:
30335 /* No additional flags to set. */
30336 break;
30337
30338 default:
30339 abort ();
30340 }
30341 #endif
30342 bfd_set_private_flags (stdoutput, flags);
30343
30344 /* We have run out flags in the COFF header to encode the
30345 status of ATPCS support, so instead we create a dummy,
30346 empty, debug section called .arm.atpcs. */
30347 if (atpcs)
30348 {
30349 asection * sec;
30350
30351 sec = bfd_make_section (stdoutput, ".arm.atpcs");
30352
30353 if (sec != NULL)
30354 {
30355 bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30356 bfd_set_section_size (sec, 0);
30357 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30358 }
30359 }
30360 }
30361 #endif
30362
30363 /* Record the CPU type as well. */
30364 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30365 mach = bfd_mach_arm_iWMMXt2;
30366 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30367 mach = bfd_mach_arm_iWMMXt;
30368 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30369 mach = bfd_mach_arm_XScale;
30370 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30371 mach = bfd_mach_arm_ep9312;
30372 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30373 mach = bfd_mach_arm_5TE;
30374 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30375 {
30376 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30377 mach = bfd_mach_arm_5T;
30378 else
30379 mach = bfd_mach_arm_5;
30380 }
30381 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30382 {
30383 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30384 mach = bfd_mach_arm_4T;
30385 else
30386 mach = bfd_mach_arm_4;
30387 }
30388 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30389 mach = bfd_mach_arm_3M;
30390 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30391 mach = bfd_mach_arm_3;
30392 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30393 mach = bfd_mach_arm_2a;
30394 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30395 mach = bfd_mach_arm_2;
30396 else
30397 mach = bfd_mach_arm_unknown;
30398
30399 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30400 }
30401
30402 /* Command line processing. */
30403
30404 /* md_parse_option
30405 Invocation line includes a switch not recognized by the base assembler.
30406 See if it's a processor-specific option.
30407
30408 This routine is somewhat complicated by the need for backwards
30409 compatibility (since older releases of gcc can't be changed).
30410 The new options try to make the interface as compatible as
30411 possible with GCC.
30412
30413 New options (supported) are:
30414
30415 -mcpu=<cpu name> Assemble for selected processor
30416 -march=<architecture name> Assemble for selected architecture
30417 -mfpu=<fpu architecture> Assemble for selected FPU.
30418 -EB/-mbig-endian Big-endian
30419 -EL/-mlittle-endian Little-endian
30420 -k Generate PIC code
30421 -mthumb Start in Thumb mode
30422 -mthumb-interwork Code supports ARM/Thumb interworking
30423
30424 -m[no-]warn-deprecated Warn about deprecated features
30425 -m[no-]warn-syms Warn when symbols match instructions
30426
30427 For now we will also provide support for:
30428
30429 -mapcs-32 32-bit Program counter
30430 -mapcs-26 26-bit Program counter
30431 -macps-float Floats passed in FP registers
30432 -mapcs-reentrant Reentrant code
30433 -matpcs
30434 (sometime these will probably be replaced with -mapcs=<list of options>
30435 and -matpcs=<list of options>)
30436
30437 The remaining options are only supported for back-wards compatibility.
30438 Cpu variants, the arm part is optional:
30439 -m[arm]1 Currently not supported.
30440 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
30441 -m[arm]3 Arm 3 processor
30442 -m[arm]6[xx], Arm 6 processors
30443 -m[arm]7[xx][t][[d]m] Arm 7 processors
30444 -m[arm]8[10] Arm 8 processors
30445 -m[arm]9[20][tdmi] Arm 9 processors
30446 -mstrongarm[110[0]] StrongARM processors
30447 -mxscale XScale processors
30448 -m[arm]v[2345[t[e]]] Arm architectures
30449 -mall All (except the ARM1)
30450 FP variants:
30451 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
30452 -mfpe-old (No float load/store multiples)
30453 -mvfpxd VFP Single precision
30454 -mvfp All VFP
30455 -mno-fpu Disable all floating point instructions
30456
30457 The following CPU names are recognized:
30458 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
30459 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
30460 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
30461 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
30462 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
30463 arm10t arm10e, arm1020t, arm1020e, arm10200e,
30464 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
30465
30466 */
30467
30468 const char * md_shortopts = "m:k";
30469
30470 #ifdef ARM_BI_ENDIAN
30471 #define OPTION_EB (OPTION_MD_BASE + 0)
30472 #define OPTION_EL (OPTION_MD_BASE + 1)
30473 #else
30474 #if TARGET_BYTES_BIG_ENDIAN
30475 #define OPTION_EB (OPTION_MD_BASE + 0)
30476 #else
30477 #define OPTION_EL (OPTION_MD_BASE + 1)
30478 #endif
30479 #endif
30480 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
30481 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
30482
30483 struct option md_longopts[] =
30484 {
30485 #ifdef OPTION_EB
30486 {"EB", no_argument, NULL, OPTION_EB},
30487 #endif
30488 #ifdef OPTION_EL
30489 {"EL", no_argument, NULL, OPTION_EL},
30490 #endif
30491 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
30492 #ifdef OBJ_ELF
30493 {"fdpic", no_argument, NULL, OPTION_FDPIC},
30494 #endif
30495 {NULL, no_argument, NULL, 0}
30496 };
30497
30498 size_t md_longopts_size = sizeof (md_longopts);
30499
30500 struct arm_option_table
30501 {
30502 const char * option; /* Option name to match. */
30503 const char * help; /* Help information. */
30504 int * var; /* Variable to change. */
30505 int value; /* What to change it to. */
30506 const char * deprecated; /* If non-null, print this message. */
30507 };
30508
30509 struct arm_option_table arm_opts[] =
30510 {
30511 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
30512 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
30513 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
30514 &support_interwork, 1, NULL},
30515 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
30516 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
30517 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
30518 1, NULL},
30519 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
30520 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
30521 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
30522 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
30523 NULL},
30524
30525 /* These are recognized by the assembler, but have no affect on code. */
30526 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
30527 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
30528
30529 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
30530 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
30531 &warn_on_deprecated, 0, NULL},
30532 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
30533 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
30534 {NULL, NULL, NULL, 0, NULL}
30535 };
30536
30537 struct arm_legacy_option_table
30538 {
30539 const char * option; /* Option name to match. */
30540 const arm_feature_set ** var; /* Variable to change. */
30541 const arm_feature_set value; /* What to change it to. */
30542 const char * deprecated; /* If non-null, print this message. */
30543 };
30544
30545 const struct arm_legacy_option_table arm_legacy_opts[] =
30546 {
30547 /* DON'T add any new processors to this list -- we want the whole list
30548 to go away... Add them to the processors table instead. */
30549 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
30550 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
30551 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
30552 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
30553 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30554 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
30555 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30556 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
30557 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
30558 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
30559 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
30560 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
30561 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
30562 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
30563 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
30564 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
30565 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
30566 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
30567 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
30568 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
30569 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
30570 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
30571 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
30572 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
30573 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
30574 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
30575 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
30576 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
30577 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
30578 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
30579 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
30580 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
30581 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
30582 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
30583 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30584 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
30585 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30586 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
30587 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30588 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
30589 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
30590 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
30591 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
30592 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
30593 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
30594 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
30595 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30596 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30597 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30598 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
30599 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30600 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
30601 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30602 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
30603 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30604 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
30605 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
30606 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
30607 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
30608 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
30609 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30610 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
30611 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30612 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
30613 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30614 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
30615 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30616 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
30617 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
30618 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
30619 N_("use -mcpu=strongarm110")},
30620 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
30621 N_("use -mcpu=strongarm1100")},
30622 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
30623 N_("use -mcpu=strongarm1110")},
30624 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
30625 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
30626 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
30627
30628 /* Architecture variants -- don't add any more to this list either. */
30629 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
30630 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
30631 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30632 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
30633 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
30634 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
30635 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30636 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
30637 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
30638 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
30639 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30640 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
30641 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
30642 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
30643 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30644 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
30645 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30646 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
30647
30648 /* Floating point variants -- don't add any more to this list either. */
30649 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
30650 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
30651 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
30652 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
30653 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
30654
30655 {NULL, NULL, ARM_ARCH_NONE, NULL}
30656 };
30657
30658 struct arm_cpu_option_table
30659 {
30660 const char * name;
30661 size_t name_len;
30662 const arm_feature_set value;
30663 const arm_feature_set ext;
30664 /* For some CPUs we assume an FPU unless the user explicitly sets
30665 -mfpu=... */
30666 const arm_feature_set default_fpu;
30667 /* The canonical name of the CPU, or NULL to use NAME converted to upper
30668 case. */
30669 const char * canonical_name;
30670 };
30671
30672 /* This list should, at a minimum, contain all the cpu names
30673 recognized by GCC. */
30674 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
30675
30676 static const struct arm_cpu_option_table arm_cpus[] =
30677 {
30678 ARM_CPU_OPT ("all", NULL, ARM_ANY,
30679 ARM_ARCH_NONE,
30680 FPU_ARCH_FPA),
30681 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
30682 ARM_ARCH_NONE,
30683 FPU_ARCH_FPA),
30684 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
30685 ARM_ARCH_NONE,
30686 FPU_ARCH_FPA),
30687 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
30688 ARM_ARCH_NONE,
30689 FPU_ARCH_FPA),
30690 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
30691 ARM_ARCH_NONE,
30692 FPU_ARCH_FPA),
30693 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
30694 ARM_ARCH_NONE,
30695 FPU_ARCH_FPA),
30696 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
30697 ARM_ARCH_NONE,
30698 FPU_ARCH_FPA),
30699 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
30700 ARM_ARCH_NONE,
30701 FPU_ARCH_FPA),
30702 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
30703 ARM_ARCH_NONE,
30704 FPU_ARCH_FPA),
30705 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
30706 ARM_ARCH_NONE,
30707 FPU_ARCH_FPA),
30708 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
30709 ARM_ARCH_NONE,
30710 FPU_ARCH_FPA),
30711 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
30712 ARM_ARCH_NONE,
30713 FPU_ARCH_FPA),
30714 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
30715 ARM_ARCH_NONE,
30716 FPU_ARCH_FPA),
30717 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
30718 ARM_ARCH_NONE,
30719 FPU_ARCH_FPA),
30720 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
30721 ARM_ARCH_NONE,
30722 FPU_ARCH_FPA),
30723 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
30724 ARM_ARCH_NONE,
30725 FPU_ARCH_FPA),
30726 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
30727 ARM_ARCH_NONE,
30728 FPU_ARCH_FPA),
30729 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
30730 ARM_ARCH_NONE,
30731 FPU_ARCH_FPA),
30732 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
30733 ARM_ARCH_NONE,
30734 FPU_ARCH_FPA),
30735 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
30736 ARM_ARCH_NONE,
30737 FPU_ARCH_FPA),
30738 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
30739 ARM_ARCH_NONE,
30740 FPU_ARCH_FPA),
30741 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
30742 ARM_ARCH_NONE,
30743 FPU_ARCH_FPA),
30744 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
30745 ARM_ARCH_NONE,
30746 FPU_ARCH_FPA),
30747 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
30748 ARM_ARCH_NONE,
30749 FPU_ARCH_FPA),
30750 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
30751 ARM_ARCH_NONE,
30752 FPU_ARCH_FPA),
30753 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
30754 ARM_ARCH_NONE,
30755 FPU_ARCH_FPA),
30756 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
30757 ARM_ARCH_NONE,
30758 FPU_ARCH_FPA),
30759 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
30760 ARM_ARCH_NONE,
30761 FPU_ARCH_FPA),
30762 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
30763 ARM_ARCH_NONE,
30764 FPU_ARCH_FPA),
30765 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
30766 ARM_ARCH_NONE,
30767 FPU_ARCH_FPA),
30768 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
30769 ARM_ARCH_NONE,
30770 FPU_ARCH_FPA),
30771 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
30772 ARM_ARCH_NONE,
30773 FPU_ARCH_FPA),
30774 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
30775 ARM_ARCH_NONE,
30776 FPU_ARCH_FPA),
30777 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
30778 ARM_ARCH_NONE,
30779 FPU_ARCH_FPA),
30780 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
30781 ARM_ARCH_NONE,
30782 FPU_ARCH_FPA),
30783 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
30784 ARM_ARCH_NONE,
30785 FPU_ARCH_FPA),
30786 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
30787 ARM_ARCH_NONE,
30788 FPU_ARCH_FPA),
30789 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
30790 ARM_ARCH_NONE,
30791 FPU_ARCH_FPA),
30792 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
30793 ARM_ARCH_NONE,
30794 FPU_ARCH_FPA),
30795 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
30796 ARM_ARCH_NONE,
30797 FPU_ARCH_FPA),
30798 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
30799 ARM_ARCH_NONE,
30800 FPU_ARCH_FPA),
30801 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
30802 ARM_ARCH_NONE,
30803 FPU_ARCH_FPA),
30804 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
30805 ARM_ARCH_NONE,
30806 FPU_ARCH_FPA),
30807 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
30808 ARM_ARCH_NONE,
30809 FPU_ARCH_FPA),
30810 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
30811 ARM_ARCH_NONE,
30812 FPU_ARCH_FPA),
30813 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
30814 ARM_ARCH_NONE,
30815 FPU_ARCH_FPA),
30816
30817 /* For V5 or later processors we default to using VFP; but the user
30818 should really set the FPU type explicitly. */
30819 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
30820 ARM_ARCH_NONE,
30821 FPU_ARCH_VFP_V2),
30822 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
30823 ARM_ARCH_NONE,
30824 FPU_ARCH_VFP_V2),
30825 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
30826 ARM_ARCH_NONE,
30827 FPU_ARCH_VFP_V2),
30828 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
30829 ARM_ARCH_NONE,
30830 FPU_ARCH_VFP_V2),
30831 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
30832 ARM_ARCH_NONE,
30833 FPU_ARCH_VFP_V2),
30834 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
30835 ARM_ARCH_NONE,
30836 FPU_ARCH_VFP_V2),
30837 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
30838 ARM_ARCH_NONE,
30839 FPU_ARCH_VFP_V2),
30840 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
30841 ARM_ARCH_NONE,
30842 FPU_ARCH_VFP_V2),
30843 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
30844 ARM_ARCH_NONE,
30845 FPU_ARCH_VFP_V2),
30846 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
30847 ARM_ARCH_NONE,
30848 FPU_ARCH_VFP_V2),
30849 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
30850 ARM_ARCH_NONE,
30851 FPU_ARCH_VFP_V2),
30852 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
30853 ARM_ARCH_NONE,
30854 FPU_ARCH_VFP_V2),
30855 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
30856 ARM_ARCH_NONE,
30857 FPU_ARCH_VFP_V1),
30858 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
30859 ARM_ARCH_NONE,
30860 FPU_ARCH_VFP_V1),
30861 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
30862 ARM_ARCH_NONE,
30863 FPU_ARCH_VFP_V2),
30864 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
30865 ARM_ARCH_NONE,
30866 FPU_ARCH_VFP_V2),
30867 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
30868 ARM_ARCH_NONE,
30869 FPU_ARCH_VFP_V1),
30870 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
30871 ARM_ARCH_NONE,
30872 FPU_ARCH_VFP_V2),
30873 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
30874 ARM_ARCH_NONE,
30875 FPU_ARCH_VFP_V2),
30876 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
30877 ARM_ARCH_NONE,
30878 FPU_ARCH_VFP_V2),
30879 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
30880 ARM_ARCH_NONE,
30881 FPU_ARCH_VFP_V2),
30882 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
30883 ARM_ARCH_NONE,
30884 FPU_ARCH_VFP_V2),
30885 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
30886 ARM_ARCH_NONE,
30887 FPU_ARCH_VFP_V2),
30888 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
30889 ARM_ARCH_NONE,
30890 FPU_ARCH_VFP_V2),
30891 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
30892 ARM_ARCH_NONE,
30893 FPU_ARCH_VFP_V2),
30894 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
30895 ARM_ARCH_NONE,
30896 FPU_ARCH_VFP_V2),
30897 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
30898 ARM_ARCH_NONE,
30899 FPU_NONE),
30900 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
30901 ARM_ARCH_NONE,
30902 FPU_NONE),
30903 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
30904 ARM_ARCH_NONE,
30905 FPU_ARCH_VFP_V2),
30906 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
30907 ARM_ARCH_NONE,
30908 FPU_ARCH_VFP_V2),
30909 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
30910 ARM_ARCH_NONE,
30911 FPU_ARCH_VFP_V2),
30912 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
30913 ARM_ARCH_NONE,
30914 FPU_NONE),
30915 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
30916 ARM_ARCH_NONE,
30917 FPU_NONE),
30918 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
30919 ARM_ARCH_NONE,
30920 FPU_ARCH_VFP_V2),
30921 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
30922 ARM_ARCH_NONE,
30923 FPU_NONE),
30924 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
30925 ARM_ARCH_NONE,
30926 FPU_ARCH_VFP_V2),
30927 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
30928 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30929 FPU_NONE),
30930 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
30931 ARM_ARCH_NONE,
30932 FPU_ARCH_NEON_VFP_V4),
30933 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
30934 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
30935 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30936 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
30937 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
30938 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
30939 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
30940 ARM_ARCH_NONE,
30941 FPU_ARCH_NEON_VFP_V4),
30942 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
30943 ARM_ARCH_NONE,
30944 FPU_ARCH_NEON_VFP_V4),
30945 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
30946 ARM_ARCH_NONE,
30947 FPU_ARCH_NEON_VFP_V4),
30948 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
30949 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30950 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30951 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
30952 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30953 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30954 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
30955 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30956 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30957 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
30958 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30959 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30960 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
30961 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30962 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30963 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
30964 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30965 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30966 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
30967 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
30968 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
30969 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
30970 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30971 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30972 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
30973 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30974 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30975 ARM_CPU_OPT ("cortex-a76ae", "Cortex-A76AE", ARM_ARCH_V8_2A,
30976 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30977 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30978 ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A,
30979 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30980 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30981 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
30982 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
30983 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
30984 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
30985 ARM_ARCH_NONE,
30986 FPU_NONE),
30987 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
30988 ARM_ARCH_NONE,
30989 FPU_ARCH_VFP_V3D16),
30990 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
30991 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30992 FPU_NONE),
30993 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
30994 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30995 FPU_ARCH_VFP_V3D16),
30996 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
30997 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
30998 FPU_ARCH_VFP_V3D16),
30999 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
31000 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
31001 FPU_ARCH_NEON_VFP_ARMV8),
31002 ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN,
31003 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31004 FPU_NONE),
31005 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
31006 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31007 FPU_NONE),
31008 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
31009 ARM_ARCH_NONE,
31010 FPU_NONE),
31011 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
31012 ARM_ARCH_NONE,
31013 FPU_NONE),
31014 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
31015 ARM_ARCH_NONE,
31016 FPU_NONE),
31017 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
31018 ARM_ARCH_NONE,
31019 FPU_NONE),
31020 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
31021 ARM_ARCH_NONE,
31022 FPU_NONE),
31023 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
31024 ARM_ARCH_NONE,
31025 FPU_NONE),
31026 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
31027 ARM_ARCH_NONE,
31028 FPU_NONE),
31029 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
31030 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
31031 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31032 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
31033 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31034 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31035 /* ??? XSCALE is really an architecture. */
31036 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
31037 ARM_ARCH_NONE,
31038 FPU_ARCH_VFP_V2),
31039
31040 /* ??? iwmmxt is not a processor. */
31041 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
31042 ARM_ARCH_NONE,
31043 FPU_ARCH_VFP_V2),
31044 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
31045 ARM_ARCH_NONE,
31046 FPU_ARCH_VFP_V2),
31047 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
31048 ARM_ARCH_NONE,
31049 FPU_ARCH_VFP_V2),
31050
31051 /* Maverick. */
31052 ARM_CPU_OPT ("ep9312", "ARM920T",
31053 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31054 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31055
31056 /* Marvell processors. */
31057 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
31058 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31059 FPU_ARCH_VFP_V3D16),
31060 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
31061 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31062 FPU_ARCH_NEON_VFP_V4),
31063
31064 /* APM X-Gene family. */
31065 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
31066 ARM_ARCH_NONE,
31067 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31068 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
31069 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
31070 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31071
31072 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31073 };
31074 #undef ARM_CPU_OPT
31075
31076 struct arm_ext_table
31077 {
31078 const char * name;
31079 size_t name_len;
31080 const arm_feature_set merge;
31081 const arm_feature_set clear;
31082 };
31083
31084 struct arm_arch_option_table
31085 {
31086 const char * name;
31087 size_t name_len;
31088 const arm_feature_set value;
31089 const arm_feature_set default_fpu;
31090 const struct arm_ext_table * ext_table;
31091 };
31092
31093 /* Used to add support for +E and +noE extension. */
31094 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31095 /* Used to add support for a +E extension. */
31096 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31097 /* Used to add support for a +noE extension. */
31098 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31099
31100 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31101 ~0 & ~FPU_ENDIAN_PURE)
31102
31103 static const struct arm_ext_table armv5te_ext_table[] =
31104 {
31105 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31106 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31107 };
31108
31109 static const struct arm_ext_table armv7_ext_table[] =
31110 {
31111 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31112 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31113 };
31114
31115 static const struct arm_ext_table armv7ve_ext_table[] =
31116 {
31117 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31118 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31119 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31120 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31121 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31122 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
31123 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31124
31125 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31126 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31127
31128 /* Aliases for +simd. */
31129 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31130
31131 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31132 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31133 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31134
31135 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31136 };
31137
31138 static const struct arm_ext_table armv7a_ext_table[] =
31139 {
31140 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31141 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31142 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31143 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31144 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31145 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31146 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31147
31148 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31149 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31150
31151 /* Aliases for +simd. */
31152 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31153 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31154
31155 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31156 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31157
31158 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31159 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31160 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31161 };
31162
31163 static const struct arm_ext_table armv7r_ext_table[] =
31164 {
31165 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31166 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
31167 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31168 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31169 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31170 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31171 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31172 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31173 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31174 };
31175
31176 static const struct arm_ext_table armv7em_ext_table[] =
31177 {
31178 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31179 /* Alias for +fp, used to be known as fpv4-sp-d16. */
31180 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31181 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31182 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31183 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31184 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31185 };
31186
31187 static const struct arm_ext_table armv8a_ext_table[] =
31188 {
31189 ARM_ADD ("crc", ARCH_CRC_ARMV8),
31190 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31191 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31192 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31193
31194 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31195 should use the +simd option to turn on FP. */
31196 ARM_REMOVE ("fp", ALL_FP),
31197 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31198 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31199 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31200 };
31201
31202
31203 static const struct arm_ext_table armv81a_ext_table[] =
31204 {
31205 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31206 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31207 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31208
31209 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31210 should use the +simd option to turn on FP. */
31211 ARM_REMOVE ("fp", ALL_FP),
31212 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31213 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31214 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31215 };
31216
31217 static const struct arm_ext_table armv82a_ext_table[] =
31218 {
31219 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31220 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31221 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31222 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31223 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31224 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31225 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31226 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31227
31228 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31229 should use the +simd option to turn on FP. */
31230 ARM_REMOVE ("fp", ALL_FP),
31231 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31232 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31233 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31234 };
31235
31236 static const struct arm_ext_table armv84a_ext_table[] =
31237 {
31238 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31239 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31240 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31241 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31242 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31243 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31244
31245 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31246 should use the +simd option to turn on FP. */
31247 ARM_REMOVE ("fp", ALL_FP),
31248 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31249 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31250 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31251 };
31252
31253 static const struct arm_ext_table armv85a_ext_table[] =
31254 {
31255 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31256 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31257 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31258 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31259 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31260 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31261
31262 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31263 should use the +simd option to turn on FP. */
31264 ARM_REMOVE ("fp", ALL_FP),
31265 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31266 };
31267
31268 static const struct arm_ext_table armv86a_ext_table[] =
31269 {
31270 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31271 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31272 };
31273
31274 static const struct arm_ext_table armv8m_main_ext_table[] =
31275 {
31276 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31277 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
31278 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31279 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31280 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31281 };
31282
31283 static const struct arm_ext_table armv8_1m_main_ext_table[] =
31284 {
31285 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31286 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
31287 ARM_EXT ("fp",
31288 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31289 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31290 ALL_FP),
31291 ARM_ADD ("fp.dp",
31292 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31293 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31294 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE),
31295 ARM_FEATURE_COPROC (FPU_MVE | FPU_MVE_FP)),
31296 ARM_ADD ("mve.fp",
31297 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31298 FPU_MVE | FPU_MVE_FP | FPU_VFP_V5_SP_D16 |
31299 FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31300 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31301 };
31302
31303 static const struct arm_ext_table armv8r_ext_table[] =
31304 {
31305 ARM_ADD ("crc", ARCH_CRC_ARMV8),
31306 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31307 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31308 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31309 ARM_REMOVE ("fp", ALL_FP),
31310 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31311 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31312 };
31313
31314 /* This list should, at a minimum, contain all the architecture names
31315 recognized by GCC. */
31316 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31317 #define ARM_ARCH_OPT2(N, V, DF, ext) \
31318 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31319
31320 static const struct arm_arch_option_table arm_archs[] =
31321 {
31322 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
31323 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
31324 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
31325 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
31326 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
31327 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
31328 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
31329 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
31330 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
31331 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
31332 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
31333 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
31334 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
31335 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
31336 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
31337 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
31338 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
31339 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31340 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
31341 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
31342 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
31343 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31344 kept to preserve existing behaviour. */
31345 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31346 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
31347 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
31348 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
31349 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
31350 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31351 kept to preserve existing behaviour. */
31352 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31353 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
31354 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
31355 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
31356 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
31357 /* The official spelling of the ARMv7 profile variants is the dashed form.
31358 Accept the non-dashed form for compatibility with old toolchains. */
31359 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31360 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
31361 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31362 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31363 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
31364 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
31365 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
31366 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
31367 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
31368 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
31369 armv8m_main),
31370 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
31371 armv8_1m_main),
31372 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
31373 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
31374 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
31375 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
31376 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
31377 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
31378 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
31379 ARM_ARCH_OPT2 ("armv8.6-a", ARM_ARCH_V8_6A, FPU_ARCH_VFP, armv86a),
31380 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
31381 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
31382 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
31383 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31384 };
31385 #undef ARM_ARCH_OPT
31386
31387 /* ISA extensions in the co-processor and main instruction set space. */
31388
31389 struct arm_option_extension_value_table
31390 {
31391 const char * name;
31392 size_t name_len;
31393 const arm_feature_set merge_value;
31394 const arm_feature_set clear_value;
31395 /* List of architectures for which an extension is available. ARM_ARCH_NONE
31396 indicates that an extension is available for all architectures while
31397 ARM_ANY marks an empty entry. */
31398 const arm_feature_set allowed_archs[2];
31399 };
31400
31401 /* The following table must be in alphabetical order with a NULL last entry. */
31402
31403 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
31404 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
31405
31406 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
31407 use the context sensitive approach using arm_ext_table's. */
31408 static const struct arm_option_extension_value_table arm_extensions[] =
31409 {
31410 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
31411 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31412 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31413 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
31414 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31415 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
31416 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
31417 ARM_ARCH_V8_2A),
31418 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31419 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31420 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
31421 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
31422 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31423 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31424 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31425 ARM_ARCH_V8_2A),
31426 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31427 | ARM_EXT2_FP16_FML),
31428 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31429 | ARM_EXT2_FP16_FML),
31430 ARM_ARCH_V8_2A),
31431 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31432 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31433 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31434 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31435 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
31436 Thumb divide instruction. Due to this having the same name as the
31437 previous entry, this will be ignored when doing command-line parsing and
31438 only considered by build attribute selection code. */
31439 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31440 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
31441 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
31442 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
31443 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
31444 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
31445 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
31446 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
31447 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
31448 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31449 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
31450 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
31451 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
31452 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31453 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
31454 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
31455 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
31456 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
31457 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31458 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31459 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
31460 ARM_ARCH_V8A),
31461 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
31462 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
31463 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31464 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
31465 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
31466 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
31467 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31468 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
31469 ARM_ARCH_V8A),
31470 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31471 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31472 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
31473 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31474 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
31475 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
31476 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31477 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
31478 | ARM_EXT_DIV),
31479 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
31480 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
31481 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
31482 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
31483 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
31484 };
31485 #undef ARM_EXT_OPT
31486
31487 /* ISA floating-point and Advanced SIMD extensions. */
31488 struct arm_option_fpu_value_table
31489 {
31490 const char * name;
31491 const arm_feature_set value;
31492 };
31493
31494 /* This list should, at a minimum, contain all the fpu names
31495 recognized by GCC. */
31496 static const struct arm_option_fpu_value_table arm_fpus[] =
31497 {
31498 {"softfpa", FPU_NONE},
31499 {"fpe", FPU_ARCH_FPE},
31500 {"fpe2", FPU_ARCH_FPE},
31501 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
31502 {"fpa", FPU_ARCH_FPA},
31503 {"fpa10", FPU_ARCH_FPA},
31504 {"fpa11", FPU_ARCH_FPA},
31505 {"arm7500fe", FPU_ARCH_FPA},
31506 {"softvfp", FPU_ARCH_VFP},
31507 {"softvfp+vfp", FPU_ARCH_VFP_V2},
31508 {"vfp", FPU_ARCH_VFP_V2},
31509 {"vfp9", FPU_ARCH_VFP_V2},
31510 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
31511 {"vfp10", FPU_ARCH_VFP_V2},
31512 {"vfp10-r0", FPU_ARCH_VFP_V1},
31513 {"vfpxd", FPU_ARCH_VFP_V1xD},
31514 {"vfpv2", FPU_ARCH_VFP_V2},
31515 {"vfpv3", FPU_ARCH_VFP_V3},
31516 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
31517 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
31518 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
31519 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
31520 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
31521 {"arm1020t", FPU_ARCH_VFP_V1},
31522 {"arm1020e", FPU_ARCH_VFP_V2},
31523 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
31524 {"arm1136jf-s", FPU_ARCH_VFP_V2},
31525 {"maverick", FPU_ARCH_MAVERICK},
31526 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31527 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
31528 {"neon-fp16", FPU_ARCH_NEON_FP16},
31529 {"vfpv4", FPU_ARCH_VFP_V4},
31530 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
31531 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
31532 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
31533 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
31534 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
31535 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
31536 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
31537 {"crypto-neon-fp-armv8",
31538 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
31539 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
31540 {"crypto-neon-fp-armv8.1",
31541 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
31542 {NULL, ARM_ARCH_NONE}
31543 };
31544
31545 struct arm_option_value_table
31546 {
31547 const char *name;
31548 long value;
31549 };
31550
31551 static const struct arm_option_value_table arm_float_abis[] =
31552 {
31553 {"hard", ARM_FLOAT_ABI_HARD},
31554 {"softfp", ARM_FLOAT_ABI_SOFTFP},
31555 {"soft", ARM_FLOAT_ABI_SOFT},
31556 {NULL, 0}
31557 };
31558
31559 #ifdef OBJ_ELF
31560 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
31561 static const struct arm_option_value_table arm_eabis[] =
31562 {
31563 {"gnu", EF_ARM_EABI_UNKNOWN},
31564 {"4", EF_ARM_EABI_VER4},
31565 {"5", EF_ARM_EABI_VER5},
31566 {NULL, 0}
31567 };
31568 #endif
31569
31570 struct arm_long_option_table
31571 {
31572 const char * option; /* Substring to match. */
31573 const char * help; /* Help information. */
31574 int (* func) (const char * subopt); /* Function to decode sub-option. */
31575 const char * deprecated; /* If non-null, print this message. */
31576 };
31577
31578 static bfd_boolean
31579 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
31580 arm_feature_set *ext_set,
31581 const struct arm_ext_table *ext_table)
31582 {
31583 /* We insist on extensions being specified in alphabetical order, and with
31584 extensions being added before being removed. We achieve this by having
31585 the global ARM_EXTENSIONS table in alphabetical order, and using the
31586 ADDING_VALUE variable to indicate whether we are adding an extension (1)
31587 or removing it (0) and only allowing it to change in the order
31588 -1 -> 1 -> 0. */
31589 const struct arm_option_extension_value_table * opt = NULL;
31590 const arm_feature_set arm_any = ARM_ANY;
31591 int adding_value = -1;
31592
31593 while (str != NULL && *str != 0)
31594 {
31595 const char *ext;
31596 size_t len;
31597
31598 if (*str != '+')
31599 {
31600 as_bad (_("invalid architectural extension"));
31601 return FALSE;
31602 }
31603
31604 str++;
31605 ext = strchr (str, '+');
31606
31607 if (ext != NULL)
31608 len = ext - str;
31609 else
31610 len = strlen (str);
31611
31612 if (len >= 2 && strncmp (str, "no", 2) == 0)
31613 {
31614 if (adding_value != 0)
31615 {
31616 adding_value = 0;
31617 opt = arm_extensions;
31618 }
31619
31620 len -= 2;
31621 str += 2;
31622 }
31623 else if (len > 0)
31624 {
31625 if (adding_value == -1)
31626 {
31627 adding_value = 1;
31628 opt = arm_extensions;
31629 }
31630 else if (adding_value != 1)
31631 {
31632 as_bad (_("must specify extensions to add before specifying "
31633 "those to remove"));
31634 return FALSE;
31635 }
31636 }
31637
31638 if (len == 0)
31639 {
31640 as_bad (_("missing architectural extension"));
31641 return FALSE;
31642 }
31643
31644 gas_assert (adding_value != -1);
31645 gas_assert (opt != NULL);
31646
31647 if (ext_table != NULL)
31648 {
31649 const struct arm_ext_table * ext_opt = ext_table;
31650 bfd_boolean found = FALSE;
31651 for (; ext_opt->name != NULL; ext_opt++)
31652 if (ext_opt->name_len == len
31653 && strncmp (ext_opt->name, str, len) == 0)
31654 {
31655 if (adding_value)
31656 {
31657 if (ARM_FEATURE_ZERO (ext_opt->merge))
31658 /* TODO: Option not supported. When we remove the
31659 legacy table this case should error out. */
31660 continue;
31661
31662 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
31663 }
31664 else
31665 {
31666 if (ARM_FEATURE_ZERO (ext_opt->clear))
31667 /* TODO: Option not supported. When we remove the
31668 legacy table this case should error out. */
31669 continue;
31670 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
31671 }
31672 found = TRUE;
31673 break;
31674 }
31675 if (found)
31676 {
31677 str = ext;
31678 continue;
31679 }
31680 }
31681
31682 /* Scan over the options table trying to find an exact match. */
31683 for (; opt->name != NULL; opt++)
31684 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31685 {
31686 int i, nb_allowed_archs =
31687 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
31688 /* Check we can apply the extension to this architecture. */
31689 for (i = 0; i < nb_allowed_archs; i++)
31690 {
31691 /* Empty entry. */
31692 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
31693 continue;
31694 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
31695 break;
31696 }
31697 if (i == nb_allowed_archs)
31698 {
31699 as_bad (_("extension does not apply to the base architecture"));
31700 return FALSE;
31701 }
31702
31703 /* Add or remove the extension. */
31704 if (adding_value)
31705 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
31706 else
31707 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
31708
31709 /* Allowing Thumb division instructions for ARMv7 in autodetection
31710 rely on this break so that duplicate extensions (extensions
31711 with the same name as a previous extension in the list) are not
31712 considered for command-line parsing. */
31713 break;
31714 }
31715
31716 if (opt->name == NULL)
31717 {
31718 /* Did we fail to find an extension because it wasn't specified in
31719 alphabetical order, or because it does not exist? */
31720
31721 for (opt = arm_extensions; opt->name != NULL; opt++)
31722 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31723 break;
31724
31725 if (opt->name == NULL)
31726 as_bad (_("unknown architectural extension `%s'"), str);
31727 else
31728 as_bad (_("architectural extensions must be specified in "
31729 "alphabetical order"));
31730
31731 return FALSE;
31732 }
31733 else
31734 {
31735 /* We should skip the extension we've just matched the next time
31736 round. */
31737 opt++;
31738 }
31739
31740 str = ext;
31741 };
31742
31743 return TRUE;
31744 }
31745
31746 static bfd_boolean
31747 arm_parse_fp16_opt (const char *str)
31748 {
31749 if (strcasecmp (str, "ieee") == 0)
31750 fp16_format = ARM_FP16_FORMAT_IEEE;
31751 else if (strcasecmp (str, "alternative") == 0)
31752 fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
31753 else
31754 {
31755 as_bad (_("unrecognised float16 format \"%s\""), str);
31756 return FALSE;
31757 }
31758
31759 return TRUE;
31760 }
31761
31762 static bfd_boolean
31763 arm_parse_cpu (const char *str)
31764 {
31765 const struct arm_cpu_option_table *opt;
31766 const char *ext = strchr (str, '+');
31767 size_t len;
31768
31769 if (ext != NULL)
31770 len = ext - str;
31771 else
31772 len = strlen (str);
31773
31774 if (len == 0)
31775 {
31776 as_bad (_("missing cpu name `%s'"), str);
31777 return FALSE;
31778 }
31779
31780 for (opt = arm_cpus; opt->name != NULL; opt++)
31781 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31782 {
31783 mcpu_cpu_opt = &opt->value;
31784 if (mcpu_ext_opt == NULL)
31785 mcpu_ext_opt = XNEW (arm_feature_set);
31786 *mcpu_ext_opt = opt->ext;
31787 mcpu_fpu_opt = &opt->default_fpu;
31788 if (opt->canonical_name)
31789 {
31790 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
31791 strcpy (selected_cpu_name, opt->canonical_name);
31792 }
31793 else
31794 {
31795 size_t i;
31796
31797 if (len >= sizeof selected_cpu_name)
31798 len = (sizeof selected_cpu_name) - 1;
31799
31800 for (i = 0; i < len; i++)
31801 selected_cpu_name[i] = TOUPPER (opt->name[i]);
31802 selected_cpu_name[i] = 0;
31803 }
31804
31805 if (ext != NULL)
31806 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
31807
31808 return TRUE;
31809 }
31810
31811 as_bad (_("unknown cpu `%s'"), str);
31812 return FALSE;
31813 }
31814
31815 static bfd_boolean
31816 arm_parse_arch (const char *str)
31817 {
31818 const struct arm_arch_option_table *opt;
31819 const char *ext = strchr (str, '+');
31820 size_t len;
31821
31822 if (ext != NULL)
31823 len = ext - str;
31824 else
31825 len = strlen (str);
31826
31827 if (len == 0)
31828 {
31829 as_bad (_("missing architecture name `%s'"), str);
31830 return FALSE;
31831 }
31832
31833 for (opt = arm_archs; opt->name != NULL; opt++)
31834 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
31835 {
31836 march_cpu_opt = &opt->value;
31837 if (march_ext_opt == NULL)
31838 march_ext_opt = XNEW (arm_feature_set);
31839 *march_ext_opt = arm_arch_none;
31840 march_fpu_opt = &opt->default_fpu;
31841 selected_ctx_ext_table = opt->ext_table;
31842 strcpy (selected_cpu_name, opt->name);
31843
31844 if (ext != NULL)
31845 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
31846 opt->ext_table);
31847
31848 return TRUE;
31849 }
31850
31851 as_bad (_("unknown architecture `%s'\n"), str);
31852 return FALSE;
31853 }
31854
31855 static bfd_boolean
31856 arm_parse_fpu (const char * str)
31857 {
31858 const struct arm_option_fpu_value_table * opt;
31859
31860 for (opt = arm_fpus; opt->name != NULL; opt++)
31861 if (streq (opt->name, str))
31862 {
31863 mfpu_opt = &opt->value;
31864 return TRUE;
31865 }
31866
31867 as_bad (_("unknown floating point format `%s'\n"), str);
31868 return FALSE;
31869 }
31870
31871 static bfd_boolean
31872 arm_parse_float_abi (const char * str)
31873 {
31874 const struct arm_option_value_table * opt;
31875
31876 for (opt = arm_float_abis; opt->name != NULL; opt++)
31877 if (streq (opt->name, str))
31878 {
31879 mfloat_abi_opt = opt->value;
31880 return TRUE;
31881 }
31882
31883 as_bad (_("unknown floating point abi `%s'\n"), str);
31884 return FALSE;
31885 }
31886
31887 #ifdef OBJ_ELF
31888 static bfd_boolean
31889 arm_parse_eabi (const char * str)
31890 {
31891 const struct arm_option_value_table *opt;
31892
31893 for (opt = arm_eabis; opt->name != NULL; opt++)
31894 if (streq (opt->name, str))
31895 {
31896 meabi_flags = opt->value;
31897 return TRUE;
31898 }
31899 as_bad (_("unknown EABI `%s'\n"), str);
31900 return FALSE;
31901 }
31902 #endif
31903
31904 static bfd_boolean
31905 arm_parse_it_mode (const char * str)
31906 {
31907 bfd_boolean ret = TRUE;
31908
31909 if (streq ("arm", str))
31910 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
31911 else if (streq ("thumb", str))
31912 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
31913 else if (streq ("always", str))
31914 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
31915 else if (streq ("never", str))
31916 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
31917 else
31918 {
31919 as_bad (_("unknown implicit IT mode `%s', should be "\
31920 "arm, thumb, always, or never."), str);
31921 ret = FALSE;
31922 }
31923
31924 return ret;
31925 }
31926
31927 static bfd_boolean
31928 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
31929 {
31930 codecomposer_syntax = TRUE;
31931 arm_comment_chars[0] = ';';
31932 arm_line_separator_chars[0] = 0;
31933 return TRUE;
31934 }
31935
31936 struct arm_long_option_table arm_long_opts[] =
31937 {
31938 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
31939 arm_parse_cpu, NULL},
31940 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
31941 arm_parse_arch, NULL},
31942 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
31943 arm_parse_fpu, NULL},
31944 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
31945 arm_parse_float_abi, NULL},
31946 #ifdef OBJ_ELF
31947 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
31948 arm_parse_eabi, NULL},
31949 #endif
31950 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
31951 arm_parse_it_mode, NULL},
31952 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
31953 arm_ccs_mode, NULL},
31954 {"mfp16-format=",
31955 N_("[ieee|alternative]\n\
31956 set the encoding for half precision floating point "
31957 "numbers to IEEE\n\
31958 or Arm alternative format."),
31959 arm_parse_fp16_opt, NULL },
31960 {NULL, NULL, 0, NULL}
31961 };
31962
31963 int
31964 md_parse_option (int c, const char * arg)
31965 {
31966 struct arm_option_table *opt;
31967 const struct arm_legacy_option_table *fopt;
31968 struct arm_long_option_table *lopt;
31969
31970 switch (c)
31971 {
31972 #ifdef OPTION_EB
31973 case OPTION_EB:
31974 target_big_endian = 1;
31975 break;
31976 #endif
31977
31978 #ifdef OPTION_EL
31979 case OPTION_EL:
31980 target_big_endian = 0;
31981 break;
31982 #endif
31983
31984 case OPTION_FIX_V4BX:
31985 fix_v4bx = TRUE;
31986 break;
31987
31988 #ifdef OBJ_ELF
31989 case OPTION_FDPIC:
31990 arm_fdpic = TRUE;
31991 break;
31992 #endif /* OBJ_ELF */
31993
31994 case 'a':
31995 /* Listing option. Just ignore these, we don't support additional
31996 ones. */
31997 return 0;
31998
31999 default:
32000 for (opt = arm_opts; opt->option != NULL; opt++)
32001 {
32002 if (c == opt->option[0]
32003 && ((arg == NULL && opt->option[1] == 0)
32004 || streq (arg, opt->option + 1)))
32005 {
32006 /* If the option is deprecated, tell the user. */
32007 if (warn_on_deprecated && opt->deprecated != NULL)
32008 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32009 arg ? arg : "", _(opt->deprecated));
32010
32011 if (opt->var != NULL)
32012 *opt->var = opt->value;
32013
32014 return 1;
32015 }
32016 }
32017
32018 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32019 {
32020 if (c == fopt->option[0]
32021 && ((arg == NULL && fopt->option[1] == 0)
32022 || streq (arg, fopt->option + 1)))
32023 {
32024 /* If the option is deprecated, tell the user. */
32025 if (warn_on_deprecated && fopt->deprecated != NULL)
32026 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32027 arg ? arg : "", _(fopt->deprecated));
32028
32029 if (fopt->var != NULL)
32030 *fopt->var = &fopt->value;
32031
32032 return 1;
32033 }
32034 }
32035
32036 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32037 {
32038 /* These options are expected to have an argument. */
32039 if (c == lopt->option[0]
32040 && arg != NULL
32041 && strncmp (arg, lopt->option + 1,
32042 strlen (lopt->option + 1)) == 0)
32043 {
32044 /* If the option is deprecated, tell the user. */
32045 if (warn_on_deprecated && lopt->deprecated != NULL)
32046 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32047 _(lopt->deprecated));
32048
32049 /* Call the sup-option parser. */
32050 return lopt->func (arg + strlen (lopt->option) - 1);
32051 }
32052 }
32053
32054 return 0;
32055 }
32056
32057 return 1;
32058 }
32059
32060 void
32061 md_show_usage (FILE * fp)
32062 {
32063 struct arm_option_table *opt;
32064 struct arm_long_option_table *lopt;
32065
32066 fprintf (fp, _(" ARM-specific assembler options:\n"));
32067
32068 for (opt = arm_opts; opt->option != NULL; opt++)
32069 if (opt->help != NULL)
32070 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
32071
32072 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32073 if (lopt->help != NULL)
32074 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
32075
32076 #ifdef OPTION_EB
32077 fprintf (fp, _("\
32078 -EB assemble code for a big-endian cpu\n"));
32079 #endif
32080
32081 #ifdef OPTION_EL
32082 fprintf (fp, _("\
32083 -EL assemble code for a little-endian cpu\n"));
32084 #endif
32085
32086 fprintf (fp, _("\
32087 --fix-v4bx Allow BX in ARMv4 code\n"));
32088
32089 #ifdef OBJ_ELF
32090 fprintf (fp, _("\
32091 --fdpic generate an FDPIC object file\n"));
32092 #endif /* OBJ_ELF */
32093 }
32094
32095 #ifdef OBJ_ELF
32096
32097 typedef struct
32098 {
32099 int val;
32100 arm_feature_set flags;
32101 } cpu_arch_ver_table;
32102
32103 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
32104 chronologically for architectures, with an exception for ARMv6-M and
32105 ARMv6S-M due to legacy reasons. No new architecture should have a
32106 special case. This allows for build attribute selection results to be
32107 stable when new architectures are added. */
32108 static const cpu_arch_ver_table cpu_arch_ver[] =
32109 {
32110 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
32111 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
32112 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
32113 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
32114 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
32115 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
32116 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
32117 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
32118 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
32119 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
32120 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
32121 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
32122 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
32123 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
32124 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
32125 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
32126 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
32127 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
32128 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
32129 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
32130 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
32131 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
32132 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
32133 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
32134
32135 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32136 always selected build attributes to match those of ARMv6-M
32137 (resp. ARMv6S-M). However, due to these architectures being a strict
32138 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32139 would be selected when fully respecting chronology of architectures.
32140 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32141 move them before ARMv7 architectures. */
32142 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
32143 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
32144
32145 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
32146 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
32147 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
32148 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
32149 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
32150 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
32151 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
32152 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
32153 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
32154 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
32155 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
32156 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
32157 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
32158 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
32159 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
32160 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32161 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_6A},
32162 {-1, ARM_ARCH_NONE}
32163 };
32164
32165 /* Set an attribute if it has not already been set by the user. */
32166
32167 static void
32168 aeabi_set_attribute_int (int tag, int value)
32169 {
32170 if (tag < 1
32171 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32172 || !attributes_set_explicitly[tag])
32173 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32174 }
32175
32176 static void
32177 aeabi_set_attribute_string (int tag, const char *value)
32178 {
32179 if (tag < 1
32180 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32181 || !attributes_set_explicitly[tag])
32182 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32183 }
32184
32185 /* Return whether features in the *NEEDED feature set are available via
32186 extensions for the architecture whose feature set is *ARCH_FSET. */
32187
32188 static bfd_boolean
32189 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32190 const arm_feature_set *needed)
32191 {
32192 int i, nb_allowed_archs;
32193 arm_feature_set ext_fset;
32194 const struct arm_option_extension_value_table *opt;
32195
32196 ext_fset = arm_arch_none;
32197 for (opt = arm_extensions; opt->name != NULL; opt++)
32198 {
32199 /* Extension does not provide any feature we need. */
32200 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32201 continue;
32202
32203 nb_allowed_archs =
32204 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32205 for (i = 0; i < nb_allowed_archs; i++)
32206 {
32207 /* Empty entry. */
32208 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32209 break;
32210
32211 /* Extension is available, add it. */
32212 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32213 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32214 }
32215 }
32216
32217 /* Can we enable all features in *needed? */
32218 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32219 }
32220
32221 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32222 a given architecture feature set *ARCH_EXT_FSET including extension feature
32223 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
32224 - if true, check for an exact match of the architecture modulo extensions;
32225 - otherwise, select build attribute value of the first superset
32226 architecture released so that results remains stable when new architectures
32227 are added.
32228 For -march/-mcpu=all the build attribute value of the most featureful
32229 architecture is returned. Tag_CPU_arch_profile result is returned in
32230 PROFILE. */
32231
32232 static int
32233 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32234 const arm_feature_set *ext_fset,
32235 char *profile, int exact_match)
32236 {
32237 arm_feature_set arch_fset;
32238 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32239
32240 /* Select most featureful architecture with all its extensions if building
32241 for -march=all as the feature sets used to set build attributes. */
32242 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32243 {
32244 /* Force revisiting of decision for each new architecture. */
32245 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32246 *profile = 'A';
32247 return TAG_CPU_ARCH_V8;
32248 }
32249
32250 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32251
32252 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32253 {
32254 arm_feature_set known_arch_fset;
32255
32256 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32257 if (exact_match)
32258 {
32259 /* Base architecture match user-specified architecture and
32260 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
32261 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32262 {
32263 p_ver_ret = p_ver;
32264 goto found;
32265 }
32266 /* Base architecture match user-specified architecture only
32267 (eg. ARMv6-M in the same case as above). Record it in case we
32268 find a match with above condition. */
32269 else if (p_ver_ret == NULL
32270 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32271 p_ver_ret = p_ver;
32272 }
32273 else
32274 {
32275
32276 /* Architecture has all features wanted. */
32277 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32278 {
32279 arm_feature_set added_fset;
32280
32281 /* Compute features added by this architecture over the one
32282 recorded in p_ver_ret. */
32283 if (p_ver_ret != NULL)
32284 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32285 p_ver_ret->flags);
32286 /* First architecture that match incl. with extensions, or the
32287 only difference in features over the recorded match is
32288 features that were optional and are now mandatory. */
32289 if (p_ver_ret == NULL
32290 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32291 {
32292 p_ver_ret = p_ver;
32293 goto found;
32294 }
32295 }
32296 else if (p_ver_ret == NULL)
32297 {
32298 arm_feature_set needed_ext_fset;
32299
32300 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32301
32302 /* Architecture has all features needed when using some
32303 extensions. Record it and continue searching in case there
32304 exist an architecture providing all needed features without
32305 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32306 OS extension). */
32307 if (have_ext_for_needed_feat_p (&known_arch_fset,
32308 &needed_ext_fset))
32309 p_ver_ret = p_ver;
32310 }
32311 }
32312 }
32313
32314 if (p_ver_ret == NULL)
32315 return -1;
32316
32317 found:
32318 /* Tag_CPU_arch_profile. */
32319 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32320 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32321 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32322 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
32323 *profile = 'A';
32324 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
32325 *profile = 'R';
32326 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32327 *profile = 'M';
32328 else
32329 *profile = '\0';
32330 return p_ver_ret->val;
32331 }
32332
32333 /* Set the public EABI object attributes. */
32334
32335 static void
32336 aeabi_set_public_attributes (void)
32337 {
32338 char profile = '\0';
32339 int arch = -1;
32340 int virt_sec = 0;
32341 int fp16_optional = 0;
32342 int skip_exact_match = 0;
32343 arm_feature_set flags, flags_arch, flags_ext;
32344
32345 /* Autodetection mode, choose the architecture based the instructions
32346 actually used. */
32347 if (no_cpu_selected ())
32348 {
32349 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32350
32351 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32352 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32353
32354 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32355 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32356
32357 /* Code run during relaxation relies on selected_cpu being set. */
32358 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32359 flags_ext = arm_arch_none;
32360 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32361 selected_ext = flags_ext;
32362 selected_cpu = flags;
32363 }
32364 /* Otherwise, choose the architecture based on the capabilities of the
32365 requested cpu. */
32366 else
32367 {
32368 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32369 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32370 flags_ext = selected_ext;
32371 flags = selected_cpu;
32372 }
32373 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32374
32375 /* Allow the user to override the reported architecture. */
32376 if (!ARM_FEATURE_ZERO (selected_object_arch))
32377 {
32378 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32379 flags_ext = arm_arch_none;
32380 }
32381 else
32382 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32383
32384 /* When this function is run again after relaxation has happened there is no
32385 way to determine whether an architecture or CPU was specified by the user:
32386 - selected_cpu is set above for relaxation to work;
32387 - march_cpu_opt is not set if only -mcpu or .cpu is used;
32388 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
32389 Therefore, if not in -march=all case we first try an exact match and fall
32390 back to autodetection. */
32391 if (!skip_exact_match)
32392 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
32393 if (arch == -1)
32394 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
32395 if (arch == -1)
32396 as_bad (_("no architecture contains all the instructions used\n"));
32397
32398 /* Tag_CPU_name. */
32399 if (selected_cpu_name[0])
32400 {
32401 char *q;
32402
32403 q = selected_cpu_name;
32404 if (strncmp (q, "armv", 4) == 0)
32405 {
32406 int i;
32407
32408 q += 4;
32409 for (i = 0; q[i]; i++)
32410 q[i] = TOUPPER (q[i]);
32411 }
32412 aeabi_set_attribute_string (Tag_CPU_name, q);
32413 }
32414
32415 /* Tag_CPU_arch. */
32416 aeabi_set_attribute_int (Tag_CPU_arch, arch);
32417
32418 /* Tag_CPU_arch_profile. */
32419 if (profile != '\0')
32420 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
32421
32422 /* Tag_DSP_extension. */
32423 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
32424 aeabi_set_attribute_int (Tag_DSP_extension, 1);
32425
32426 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32427 /* Tag_ARM_ISA_use. */
32428 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
32429 || ARM_FEATURE_ZERO (flags_arch))
32430 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
32431
32432 /* Tag_THUMB_ISA_use. */
32433 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
32434 || ARM_FEATURE_ZERO (flags_arch))
32435 {
32436 int thumb_isa_use;
32437
32438 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32439 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
32440 thumb_isa_use = 3;
32441 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
32442 thumb_isa_use = 2;
32443 else
32444 thumb_isa_use = 1;
32445 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
32446 }
32447
32448 /* Tag_VFP_arch. */
32449 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
32450 aeabi_set_attribute_int (Tag_VFP_arch,
32451 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32452 ? 7 : 8);
32453 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
32454 aeabi_set_attribute_int (Tag_VFP_arch,
32455 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
32456 ? 5 : 6);
32457 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
32458 {
32459 fp16_optional = 1;
32460 aeabi_set_attribute_int (Tag_VFP_arch, 3);
32461 }
32462 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
32463 {
32464 aeabi_set_attribute_int (Tag_VFP_arch, 4);
32465 fp16_optional = 1;
32466 }
32467 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
32468 aeabi_set_attribute_int (Tag_VFP_arch, 2);
32469 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
32470 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
32471 aeabi_set_attribute_int (Tag_VFP_arch, 1);
32472
32473 /* Tag_ABI_HardFP_use. */
32474 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
32475 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
32476 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
32477
32478 /* Tag_WMMX_arch. */
32479 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
32480 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
32481 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
32482 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
32483
32484 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
32485 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
32486 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
32487 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
32488 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
32489 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
32490 {
32491 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
32492 {
32493 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
32494 }
32495 else
32496 {
32497 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
32498 fp16_optional = 1;
32499 }
32500 }
32501
32502 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
32503 aeabi_set_attribute_int (Tag_MVE_arch, 2);
32504 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
32505 aeabi_set_attribute_int (Tag_MVE_arch, 1);
32506
32507 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
32508 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
32509 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
32510
32511 /* Tag_DIV_use.
32512
32513 We set Tag_DIV_use to two when integer divide instructions have been used
32514 in ARM state, or when Thumb integer divide instructions have been used,
32515 but we have no architecture profile set, nor have we any ARM instructions.
32516
32517 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
32518 by the base architecture.
32519
32520 For new architectures we will have to check these tests. */
32521 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
32522 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
32523 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
32524 aeabi_set_attribute_int (Tag_DIV_use, 0);
32525 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
32526 || (profile == '\0'
32527 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
32528 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
32529 aeabi_set_attribute_int (Tag_DIV_use, 2);
32530
32531 /* Tag_MP_extension_use. */
32532 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
32533 aeabi_set_attribute_int (Tag_MPextension_use, 1);
32534
32535 /* Tag Virtualization_use. */
32536 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
32537 virt_sec |= 1;
32538 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
32539 virt_sec |= 2;
32540 if (virt_sec != 0)
32541 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
32542
32543 if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
32544 aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
32545 }
32546
32547 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
32548 finished and free extension feature bits which will not be used anymore. */
32549
32550 void
32551 arm_md_post_relax (void)
32552 {
32553 aeabi_set_public_attributes ();
32554 XDELETE (mcpu_ext_opt);
32555 mcpu_ext_opt = NULL;
32556 XDELETE (march_ext_opt);
32557 march_ext_opt = NULL;
32558 }
32559
32560 /* Add the default contents for the .ARM.attributes section. */
32561
32562 void
32563 arm_md_end (void)
32564 {
32565 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
32566 return;
32567
32568 aeabi_set_public_attributes ();
32569 }
32570 #endif /* OBJ_ELF */
32571
32572 /* Parse a .cpu directive. */
32573
32574 static void
32575 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
32576 {
32577 const struct arm_cpu_option_table *opt;
32578 char *name;
32579 char saved_char;
32580
32581 name = input_line_pointer;
32582 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32583 input_line_pointer++;
32584 saved_char = *input_line_pointer;
32585 *input_line_pointer = 0;
32586
32587 /* Skip the first "all" entry. */
32588 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
32589 if (streq (opt->name, name))
32590 {
32591 selected_arch = opt->value;
32592 selected_ext = opt->ext;
32593 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32594 if (opt->canonical_name)
32595 strcpy (selected_cpu_name, opt->canonical_name);
32596 else
32597 {
32598 int i;
32599 for (i = 0; opt->name[i]; i++)
32600 selected_cpu_name[i] = TOUPPER (opt->name[i]);
32601
32602 selected_cpu_name[i] = 0;
32603 }
32604 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32605
32606 *input_line_pointer = saved_char;
32607 demand_empty_rest_of_line ();
32608 return;
32609 }
32610 as_bad (_("unknown cpu `%s'"), name);
32611 *input_line_pointer = saved_char;
32612 ignore_rest_of_line ();
32613 }
32614
32615 /* Parse a .arch directive. */
32616
32617 static void
32618 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
32619 {
32620 const struct arm_arch_option_table *opt;
32621 char saved_char;
32622 char *name;
32623
32624 name = input_line_pointer;
32625 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32626 input_line_pointer++;
32627 saved_char = *input_line_pointer;
32628 *input_line_pointer = 0;
32629
32630 /* Skip the first "all" entry. */
32631 for (opt = arm_archs + 1; opt->name != NULL; opt++)
32632 if (streq (opt->name, name))
32633 {
32634 selected_arch = opt->value;
32635 selected_ext = arm_arch_none;
32636 selected_cpu = selected_arch;
32637 strcpy (selected_cpu_name, opt->name);
32638 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32639 *input_line_pointer = saved_char;
32640 demand_empty_rest_of_line ();
32641 return;
32642 }
32643
32644 as_bad (_("unknown architecture `%s'\n"), name);
32645 *input_line_pointer = saved_char;
32646 ignore_rest_of_line ();
32647 }
32648
32649 /* Parse a .object_arch directive. */
32650
32651 static void
32652 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
32653 {
32654 const struct arm_arch_option_table *opt;
32655 char saved_char;
32656 char *name;
32657
32658 name = input_line_pointer;
32659 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32660 input_line_pointer++;
32661 saved_char = *input_line_pointer;
32662 *input_line_pointer = 0;
32663
32664 /* Skip the first "all" entry. */
32665 for (opt = arm_archs + 1; opt->name != NULL; opt++)
32666 if (streq (opt->name, name))
32667 {
32668 selected_object_arch = opt->value;
32669 *input_line_pointer = saved_char;
32670 demand_empty_rest_of_line ();
32671 return;
32672 }
32673
32674 as_bad (_("unknown architecture `%s'\n"), name);
32675 *input_line_pointer = saved_char;
32676 ignore_rest_of_line ();
32677 }
32678
32679 /* Parse a .arch_extension directive. */
32680
32681 static void
32682 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
32683 {
32684 const struct arm_option_extension_value_table *opt;
32685 char saved_char;
32686 char *name;
32687 int adding_value = 1;
32688
32689 name = input_line_pointer;
32690 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32691 input_line_pointer++;
32692 saved_char = *input_line_pointer;
32693 *input_line_pointer = 0;
32694
32695 if (strlen (name) >= 2
32696 && strncmp (name, "no", 2) == 0)
32697 {
32698 adding_value = 0;
32699 name += 2;
32700 }
32701
32702 /* Check the context specific extension table */
32703 if (selected_ctx_ext_table)
32704 {
32705 const struct arm_ext_table * ext_opt;
32706 for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
32707 {
32708 if (streq (ext_opt->name, name))
32709 {
32710 if (adding_value)
32711 {
32712 if (ARM_FEATURE_ZERO (ext_opt->merge))
32713 /* TODO: Option not supported. When we remove the
32714 legacy table this case should error out. */
32715 continue;
32716 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32717 ext_opt->merge);
32718 }
32719 else
32720 ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
32721
32722 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32723 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32724 *input_line_pointer = saved_char;
32725 demand_empty_rest_of_line ();
32726 return;
32727 }
32728 }
32729 }
32730
32731 for (opt = arm_extensions; opt->name != NULL; opt++)
32732 if (streq (opt->name, name))
32733 {
32734 int i, nb_allowed_archs =
32735 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
32736 for (i = 0; i < nb_allowed_archs; i++)
32737 {
32738 /* Empty entry. */
32739 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
32740 continue;
32741 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
32742 break;
32743 }
32744
32745 if (i == nb_allowed_archs)
32746 {
32747 as_bad (_("architectural extension `%s' is not allowed for the "
32748 "current base architecture"), name);
32749 break;
32750 }
32751
32752 if (adding_value)
32753 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
32754 opt->merge_value);
32755 else
32756 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
32757
32758 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
32759 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32760 *input_line_pointer = saved_char;
32761 demand_empty_rest_of_line ();
32762 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
32763 on this return so that duplicate extensions (extensions with the
32764 same name as a previous extension in the list) are not considered
32765 for command-line parsing. */
32766 return;
32767 }
32768
32769 if (opt->name == NULL)
32770 as_bad (_("unknown architecture extension `%s'\n"), name);
32771
32772 *input_line_pointer = saved_char;
32773 ignore_rest_of_line ();
32774 }
32775
32776 /* Parse a .fpu directive. */
32777
32778 static void
32779 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
32780 {
32781 const struct arm_option_fpu_value_table *opt;
32782 char saved_char;
32783 char *name;
32784
32785 name = input_line_pointer;
32786 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
32787 input_line_pointer++;
32788 saved_char = *input_line_pointer;
32789 *input_line_pointer = 0;
32790
32791 for (opt = arm_fpus; opt->name != NULL; opt++)
32792 if (streq (opt->name, name))
32793 {
32794 selected_fpu = opt->value;
32795 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
32796 #ifndef CPU_DEFAULT
32797 if (no_cpu_selected ())
32798 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
32799 else
32800 #endif
32801 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
32802 *input_line_pointer = saved_char;
32803 demand_empty_rest_of_line ();
32804 return;
32805 }
32806
32807 as_bad (_("unknown floating point format `%s'\n"), name);
32808 *input_line_pointer = saved_char;
32809 ignore_rest_of_line ();
32810 }
32811
32812 /* Copy symbol information. */
32813
32814 void
32815 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
32816 {
32817 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
32818 }
32819
32820 #ifdef OBJ_ELF
32821 /* Given a symbolic attribute NAME, return the proper integer value.
32822 Returns -1 if the attribute is not known. */
32823
32824 int
32825 arm_convert_symbolic_attribute (const char *name)
32826 {
32827 static const struct
32828 {
32829 const char * name;
32830 const int tag;
32831 }
32832 attribute_table[] =
32833 {
32834 /* When you modify this table you should
32835 also modify the list in doc/c-arm.texi. */
32836 #define T(tag) {#tag, tag}
32837 T (Tag_CPU_raw_name),
32838 T (Tag_CPU_name),
32839 T (Tag_CPU_arch),
32840 T (Tag_CPU_arch_profile),
32841 T (Tag_ARM_ISA_use),
32842 T (Tag_THUMB_ISA_use),
32843 T (Tag_FP_arch),
32844 T (Tag_VFP_arch),
32845 T (Tag_WMMX_arch),
32846 T (Tag_Advanced_SIMD_arch),
32847 T (Tag_PCS_config),
32848 T (Tag_ABI_PCS_R9_use),
32849 T (Tag_ABI_PCS_RW_data),
32850 T (Tag_ABI_PCS_RO_data),
32851 T (Tag_ABI_PCS_GOT_use),
32852 T (Tag_ABI_PCS_wchar_t),
32853 T (Tag_ABI_FP_rounding),
32854 T (Tag_ABI_FP_denormal),
32855 T (Tag_ABI_FP_exceptions),
32856 T (Tag_ABI_FP_user_exceptions),
32857 T (Tag_ABI_FP_number_model),
32858 T (Tag_ABI_align_needed),
32859 T (Tag_ABI_align8_needed),
32860 T (Tag_ABI_align_preserved),
32861 T (Tag_ABI_align8_preserved),
32862 T (Tag_ABI_enum_size),
32863 T (Tag_ABI_HardFP_use),
32864 T (Tag_ABI_VFP_args),
32865 T (Tag_ABI_WMMX_args),
32866 T (Tag_ABI_optimization_goals),
32867 T (Tag_ABI_FP_optimization_goals),
32868 T (Tag_compatibility),
32869 T (Tag_CPU_unaligned_access),
32870 T (Tag_FP_HP_extension),
32871 T (Tag_VFP_HP_extension),
32872 T (Tag_ABI_FP_16bit_format),
32873 T (Tag_MPextension_use),
32874 T (Tag_DIV_use),
32875 T (Tag_nodefaults),
32876 T (Tag_also_compatible_with),
32877 T (Tag_conformance),
32878 T (Tag_T2EE_use),
32879 T (Tag_Virtualization_use),
32880 T (Tag_DSP_extension),
32881 T (Tag_MVE_arch),
32882 /* We deliberately do not include Tag_MPextension_use_legacy. */
32883 #undef T
32884 };
32885 unsigned int i;
32886
32887 if (name == NULL)
32888 return -1;
32889
32890 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
32891 if (streq (name, attribute_table[i].name))
32892 return attribute_table[i].tag;
32893
32894 return -1;
32895 }
32896
32897 /* Apply sym value for relocations only in the case that they are for
32898 local symbols in the same segment as the fixup and you have the
32899 respective architectural feature for blx and simple switches. */
32900
32901 int
32902 arm_apply_sym_value (struct fix * fixP, segT this_seg)
32903 {
32904 if (fixP->fx_addsy
32905 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
32906 /* PR 17444: If the local symbol is in a different section then a reloc
32907 will always be generated for it, so applying the symbol value now
32908 will result in a double offset being stored in the relocation. */
32909 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
32910 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
32911 {
32912 switch (fixP->fx_r_type)
32913 {
32914 case BFD_RELOC_ARM_PCREL_BLX:
32915 case BFD_RELOC_THUMB_PCREL_BRANCH23:
32916 if (ARM_IS_FUNC (fixP->fx_addsy))
32917 return 1;
32918 break;
32919
32920 case BFD_RELOC_ARM_PCREL_CALL:
32921 case BFD_RELOC_THUMB_PCREL_BLX:
32922 if (THUMB_IS_FUNC (fixP->fx_addsy))
32923 return 1;
32924 break;
32925
32926 default:
32927 break;
32928 }
32929
32930 }
32931 return 0;
32932 }
32933 #endif /* OBJ_ELF */
This page took 1.022157 seconds and 4 git commands to generate.